This is page 6 of 45. Use http://codebase.md/czlonkowski/n8n-mcp?page={x} to view the full context. # Directory Structure ``` ├── _config.yml ├── .claude │ └── agents │ ├── code-reviewer.md │ ├── context-manager.md │ ├── debugger.md │ ├── deployment-engineer.md │ ├── mcp-backend-engineer.md │ ├── n8n-mcp-tester.md │ ├── technical-researcher.md │ └── test-automator.md ├── .dockerignore ├── .env.docker ├── .env.example ├── .env.n8n.example ├── .env.test ├── .env.test.example ├── .github │ ├── ABOUT.md │ ├── BENCHMARK_THRESHOLDS.md │ ├── FUNDING.yml │ ├── gh-pages.yml │ ├── secret_scanning.yml │ └── workflows │ ├── benchmark-pr.yml │ ├── benchmark.yml │ ├── docker-build-fast.yml │ ├── docker-build-n8n.yml │ ├── docker-build.yml │ ├── release.yml │ ├── test.yml │ └── update-n8n-deps.yml ├── .gitignore ├── .npmignore ├── ATTRIBUTION.md ├── CHANGELOG.md ├── CLAUDE.md ├── codecov.yml ├── coverage.json ├── data │ ├── .gitkeep │ ├── nodes.db │ ├── nodes.db-shm │ ├── nodes.db-wal │ └── templates.db ├── deploy │ └── quick-deploy-n8n.sh ├── docker │ ├── docker-entrypoint.sh │ ├── n8n-mcp │ ├── parse-config.js │ └── README.md ├── docker-compose.buildkit.yml ├── docker-compose.extract.yml ├── docker-compose.n8n.yml ├── docker-compose.override.yml.example ├── docker-compose.test-n8n.yml ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.railway ├── Dockerfile.test ├── docs │ ├── AUTOMATED_RELEASES.md │ ├── BENCHMARKS.md │ ├── CHANGELOG.md │ ├── CLAUDE_CODE_SETUP.md │ ├── CLAUDE_INTERVIEW.md │ ├── CODECOV_SETUP.md │ ├── CODEX_SETUP.md │ ├── CURSOR_SETUP.md │ ├── DEPENDENCY_UPDATES.md │ ├── DOCKER_README.md │ ├── DOCKER_TROUBLESHOOTING.md │ ├── FINAL_AI_VALIDATION_SPEC.md │ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md │ ├── HTTP_DEPLOYMENT.md │ ├── img │ │ ├── cc_command.png │ │ ├── cc_connected.png │ │ ├── codex_connected.png │ │ ├── cursor_tut.png │ │ ├── Railway_api.png │ │ ├── Railway_server_address.png │ │ ├── vsc_ghcp_chat_agent_mode.png │ │ ├── vsc_ghcp_chat_instruction_files.png │ │ ├── vsc_ghcp_chat_thinking_tool.png │ │ └── windsurf_tut.png │ ├── INSTALLATION.md │ ├── LIBRARY_USAGE.md │ ├── local │ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md │ │ ├── DEEP_DIVE_ANALYSIS_README.md │ │ ├── Deep_dive_p1_p2.md │ │ ├── integration-testing-plan.md │ │ ├── integration-tests-phase1-summary.md │ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md │ │ ├── P0_IMPLEMENTATION_PLAN.md │ │ └── TEMPLATE_MINING_ANALYSIS.md │ ├── MCP_ESSENTIALS_README.md │ ├── MCP_QUICK_START_GUIDE.md │ ├── N8N_DEPLOYMENT.md │ ├── RAILWAY_DEPLOYMENT.md │ ├── README_CLAUDE_SETUP.md │ ├── README.md │ ├── tools-documentation-usage.md │ ├── VS_CODE_PROJECT_SETUP.md │ ├── WINDSURF_SETUP.md │ └── workflow-diff-examples.md ├── examples │ └── enhanced-documentation-demo.js ├── fetch_log.txt ├── LICENSE ├── MEMORY_N8N_UPDATE.md ├── MEMORY_TEMPLATE_UPDATE.md ├── monitor_fetch.sh ├── N8N_HTTP_STREAMABLE_SETUP.md ├── n8n-nodes.db ├── P0-R3-TEST-PLAN.md ├── package-lock.json ├── package.json ├── package.runtime.json ├── PRIVACY.md ├── railway.json ├── README.md ├── renovate.json ├── scripts │ ├── analyze-optimization.sh │ ├── audit-schema-coverage.ts │ ├── build-optimized.sh │ ├── compare-benchmarks.js │ ├── demo-optimization.sh │ ├── deploy-http.sh │ ├── deploy-to-vm.sh │ ├── export-webhook-workflows.ts │ ├── extract-changelog.js │ ├── extract-from-docker.js │ ├── extract-nodes-docker.sh │ ├── extract-nodes-simple.sh │ ├── format-benchmark-results.js │ ├── generate-benchmark-stub.js │ ├── generate-detailed-reports.js │ ├── generate-test-summary.js │ ├── http-bridge.js │ ├── mcp-http-client.js │ ├── migrate-nodes-fts.ts │ ├── migrate-tool-docs.ts │ ├── n8n-docs-mcp.service │ ├── nginx-n8n-mcp.conf │ ├── prebuild-fts5.ts │ ├── prepare-release.js │ ├── publish-npm-quick.sh │ ├── publish-npm.sh │ ├── quick-test.ts │ ├── run-benchmarks-ci.js │ ├── sync-runtime-version.js │ ├── test-ai-validation-debug.ts │ ├── test-code-node-enhancements.ts │ ├── test-code-node-fixes.ts │ ├── test-docker-config.sh │ ├── test-docker-fingerprint.ts │ ├── test-docker-optimization.sh │ ├── test-docker.sh │ ├── test-empty-connection-validation.ts │ ├── test-error-message-tracking.ts │ ├── test-error-output-validation.ts │ ├── test-error-validation.js │ ├── test-essentials.ts │ ├── test-expression-code-validation.ts │ ├── test-expression-format-validation.js │ ├── test-fts5-search.ts │ ├── test-fuzzy-fix.ts │ ├── test-fuzzy-simple.ts │ ├── test-helpers-validation.ts │ ├── test-http-search.ts │ ├── test-http.sh │ ├── test-jmespath-validation.ts │ ├── test-multi-tenant-simple.ts │ ├── test-multi-tenant.ts │ ├── test-n8n-integration.sh │ ├── test-node-info.js │ ├── test-node-type-validation.ts │ ├── test-nodes-base-prefix.ts │ ├── test-operation-validation.ts │ ├── test-optimized-docker.sh │ ├── test-release-automation.js │ ├── test-search-improvements.ts │ ├── test-security.ts │ ├── test-single-session.sh │ ├── test-sqljs-triggers.ts │ ├── test-telemetry-debug.ts │ ├── test-telemetry-direct.ts │ ├── test-telemetry-env.ts │ ├── test-telemetry-integration.ts │ ├── test-telemetry-no-select.ts │ ├── test-telemetry-security.ts │ ├── test-telemetry-simple.ts │ ├── test-typeversion-validation.ts │ ├── test-url-configuration.ts │ ├── test-user-id-persistence.ts │ ├── test-webhook-validation.ts │ ├── test-workflow-insert.ts │ ├── test-workflow-sanitizer.ts │ ├── test-workflow-tracking-debug.ts │ ├── update-and-publish-prep.sh │ ├── update-n8n-deps.js │ ├── update-readme-version.js │ ├── vitest-benchmark-json-reporter.js │ └── vitest-benchmark-reporter.ts ├── SECURITY.md ├── src │ ├── config │ │ └── n8n-api.ts │ ├── data │ │ └── canonical-ai-tool-examples.json │ ├── database │ │ ├── database-adapter.ts │ │ ├── migrations │ │ │ └── add-template-node-configs.sql │ │ ├── node-repository.ts │ │ ├── nodes.db │ │ ├── schema-optimized.sql │ │ └── schema.sql │ ├── errors │ │ └── validation-service-error.ts │ ├── http-server-single-session.ts │ ├── http-server.ts │ ├── index.ts │ ├── loaders │ │ └── node-loader.ts │ ├── mappers │ │ └── docs-mapper.ts │ ├── mcp │ │ ├── handlers-n8n-manager.ts │ │ ├── handlers-workflow-diff.ts │ │ ├── index.ts │ │ ├── server.ts │ │ ├── stdio-wrapper.ts │ │ ├── tool-docs │ │ │ ├── configuration │ │ │ │ ├── get-node-as-tool-info.ts │ │ │ │ ├── get-node-documentation.ts │ │ │ │ ├── get-node-essentials.ts │ │ │ │ ├── get-node-info.ts │ │ │ │ ├── get-property-dependencies.ts │ │ │ │ ├── index.ts │ │ │ │ └── search-node-properties.ts │ │ │ ├── discovery │ │ │ │ ├── get-database-statistics.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-ai-tools.ts │ │ │ │ ├── list-nodes.ts │ │ │ │ └── search-nodes.ts │ │ │ ├── guides │ │ │ │ ├── ai-agents-guide.ts │ │ │ │ └── index.ts │ │ │ ├── index.ts │ │ │ ├── system │ │ │ │ ├── index.ts │ │ │ │ ├── n8n-diagnostic.ts │ │ │ │ ├── n8n-health-check.ts │ │ │ │ ├── n8n-list-available-tools.ts │ │ │ │ └── tools-documentation.ts │ │ │ ├── templates │ │ │ │ ├── get-template.ts │ │ │ │ ├── get-templates-for-task.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-node-templates.ts │ │ │ │ ├── list-tasks.ts │ │ │ │ ├── search-templates-by-metadata.ts │ │ │ │ └── search-templates.ts │ │ │ ├── types.ts │ │ │ ├── validation │ │ │ │ ├── index.ts │ │ │ │ ├── validate-node-minimal.ts │ │ │ │ ├── validate-node-operation.ts │ │ │ │ ├── validate-workflow-connections.ts │ │ │ │ ├── validate-workflow-expressions.ts │ │ │ │ └── validate-workflow.ts │ │ │ └── workflow_management │ │ │ ├── index.ts │ │ │ ├── n8n-autofix-workflow.ts │ │ │ ├── n8n-create-workflow.ts │ │ │ ├── n8n-delete-execution.ts │ │ │ ├── n8n-delete-workflow.ts │ │ │ ├── n8n-get-execution.ts │ │ │ ├── n8n-get-workflow-details.ts │ │ │ ├── n8n-get-workflow-minimal.ts │ │ │ ├── n8n-get-workflow-structure.ts │ │ │ ├── n8n-get-workflow.ts │ │ │ ├── n8n-list-executions.ts │ │ │ ├── n8n-list-workflows.ts │ │ │ ├── n8n-trigger-webhook-workflow.ts │ │ │ ├── n8n-update-full-workflow.ts │ │ │ ├── n8n-update-partial-workflow.ts │ │ │ └── n8n-validate-workflow.ts │ │ ├── tools-documentation.ts │ │ ├── tools-n8n-friendly.ts │ │ ├── tools-n8n-manager.ts │ │ ├── tools.ts │ │ └── workflow-examples.ts │ ├── mcp-engine.ts │ ├── mcp-tools-engine.ts │ ├── n8n │ │ ├── MCPApi.credentials.ts │ │ └── MCPNode.node.ts │ ├── parsers │ │ ├── node-parser.ts │ │ ├── property-extractor.ts │ │ └── simple-parser.ts │ ├── scripts │ │ ├── debug-http-search.ts │ │ ├── extract-from-docker.ts │ │ ├── fetch-templates-robust.ts │ │ ├── fetch-templates.ts │ │ ├── rebuild-database.ts │ │ ├── rebuild-optimized.ts │ │ ├── rebuild.ts │ │ ├── sanitize-templates.ts │ │ ├── seed-canonical-ai-examples.ts │ │ ├── test-autofix-documentation.ts │ │ ├── test-autofix-workflow.ts │ │ ├── test-execution-filtering.ts │ │ ├── test-node-suggestions.ts │ │ ├── test-protocol-negotiation.ts │ │ ├── test-summary.ts │ │ ├── test-webhook-autofix.ts │ │ ├── validate.ts │ │ └── validation-summary.ts │ ├── services │ │ ├── ai-node-validator.ts │ │ ├── ai-tool-validators.ts │ │ ├── confidence-scorer.ts │ │ ├── config-validator.ts │ │ ├── enhanced-config-validator.ts │ │ ├── example-generator.ts │ │ ├── execution-processor.ts │ │ ├── expression-format-validator.ts │ │ ├── expression-validator.ts │ │ ├── n8n-api-client.ts │ │ ├── n8n-validation.ts │ │ ├── node-documentation-service.ts │ │ ├── node-similarity-service.ts │ │ ├── node-specific-validators.ts │ │ ├── operation-similarity-service.ts │ │ ├── property-dependencies.ts │ │ ├── property-filter.ts │ │ ├── resource-similarity-service.ts │ │ ├── sqlite-storage-service.ts │ │ ├── task-templates.ts │ │ ├── universal-expression-validator.ts │ │ ├── workflow-auto-fixer.ts │ │ ├── workflow-diff-engine.ts │ │ └── workflow-validator.ts │ ├── telemetry │ │ ├── batch-processor.ts │ │ ├── config-manager.ts │ │ ├── early-error-logger.ts │ │ ├── error-sanitization-utils.ts │ │ ├── error-sanitizer.ts │ │ ├── event-tracker.ts │ │ ├── event-validator.ts │ │ ├── index.ts │ │ ├── performance-monitor.ts │ │ ├── rate-limiter.ts │ │ ├── startup-checkpoints.ts │ │ ├── telemetry-error.ts │ │ ├── telemetry-manager.ts │ │ ├── telemetry-types.ts │ │ └── workflow-sanitizer.ts │ ├── templates │ │ ├── batch-processor.ts │ │ ├── metadata-generator.ts │ │ ├── README.md │ │ ├── template-fetcher.ts │ │ ├── template-repository.ts │ │ └── template-service.ts │ ├── types │ │ ├── index.ts │ │ ├── instance-context.ts │ │ ├── n8n-api.ts │ │ ├── node-types.ts │ │ └── workflow-diff.ts │ └── utils │ ├── auth.ts │ ├── bridge.ts │ ├── cache-utils.ts │ ├── console-manager.ts │ ├── documentation-fetcher.ts │ ├── enhanced-documentation-fetcher.ts │ ├── error-handler.ts │ ├── example-generator.ts │ ├── fixed-collection-validator.ts │ ├── logger.ts │ ├── mcp-client.ts │ ├── n8n-errors.ts │ ├── node-source-extractor.ts │ ├── node-type-normalizer.ts │ ├── node-type-utils.ts │ ├── node-utils.ts │ ├── npm-version-checker.ts │ ├── protocol-version.ts │ ├── simple-cache.ts │ ├── ssrf-protection.ts │ ├── template-node-resolver.ts │ ├── template-sanitizer.ts │ ├── url-detector.ts │ ├── validation-schemas.ts │ └── version.ts ├── test-output.txt ├── test-reinit-fix.sh ├── tests │ ├── __snapshots__ │ │ └── .gitkeep │ ├── auth.test.ts │ ├── benchmarks │ │ ├── database-queries.bench.ts │ │ ├── index.ts │ │ ├── mcp-tools.bench.ts │ │ ├── mcp-tools.bench.ts.disabled │ │ ├── mcp-tools.bench.ts.skip │ │ ├── node-loading.bench.ts.disabled │ │ ├── README.md │ │ ├── search-operations.bench.ts.disabled │ │ └── validation-performance.bench.ts.disabled │ ├── bridge.test.ts │ ├── comprehensive-extraction-test.js │ ├── data │ │ └── .gitkeep │ ├── debug-slack-doc.js │ ├── demo-enhanced-documentation.js │ ├── docker-tests-README.md │ ├── error-handler.test.ts │ ├── examples │ │ └── using-database-utils.test.ts │ ├── extracted-nodes-db │ │ ├── database-import.json │ │ ├── extraction-report.json │ │ ├── insert-nodes.sql │ │ ├── n8n-nodes-base__Airtable.json │ │ ├── n8n-nodes-base__Discord.json │ │ ├── n8n-nodes-base__Function.json │ │ ├── n8n-nodes-base__HttpRequest.json │ │ ├── n8n-nodes-base__If.json │ │ ├── n8n-nodes-base__Slack.json │ │ ├── n8n-nodes-base__SplitInBatches.json │ │ └── n8n-nodes-base__Webhook.json │ ├── factories │ │ ├── node-factory.ts │ │ └── property-definition-factory.ts │ ├── fixtures │ │ ├── .gitkeep │ │ ├── database │ │ │ └── test-nodes.json │ │ ├── factories │ │ │ ├── node.factory.ts │ │ │ └── parser-node.factory.ts │ │ └── template-configs.ts │ ├── helpers │ │ └── env-helpers.ts │ ├── http-server-auth.test.ts │ ├── integration │ │ ├── ai-validation │ │ │ ├── ai-agent-validation.test.ts │ │ │ ├── ai-tool-validation.test.ts │ │ │ ├── chat-trigger-validation.test.ts │ │ │ ├── e2e-validation.test.ts │ │ │ ├── helpers.ts │ │ │ ├── llm-chain-validation.test.ts │ │ │ ├── README.md │ │ │ └── TEST_REPORT.md │ │ ├── ci │ │ │ └── database-population.test.ts │ │ ├── database │ │ │ ├── connection-management.test.ts │ │ │ ├── empty-database.test.ts │ │ │ ├── fts5-search.test.ts │ │ │ ├── node-fts5-search.test.ts │ │ │ ├── node-repository.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── template-node-configs.test.ts │ │ │ ├── template-repository.test.ts │ │ │ ├── test-utils.ts │ │ │ └── transactions.test.ts │ │ ├── database-integration.test.ts │ │ ├── docker │ │ │ ├── docker-config.test.ts │ │ │ ├── docker-entrypoint.test.ts │ │ │ └── test-helpers.ts │ │ ├── flexible-instance-config.test.ts │ │ ├── mcp │ │ │ └── template-examples-e2e.test.ts │ │ ├── mcp-protocol │ │ │ ├── basic-connection.test.ts │ │ │ ├── error-handling.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── protocol-compliance.test.ts │ │ │ ├── README.md │ │ │ ├── session-management.test.ts │ │ │ ├── test-helpers.ts │ │ │ ├── tool-invocation.test.ts │ │ │ └── workflow-error-validation.test.ts │ │ ├── msw-setup.test.ts │ │ ├── n8n-api │ │ │ ├── executions │ │ │ │ ├── delete-execution.test.ts │ │ │ │ ├── get-execution.test.ts │ │ │ │ ├── list-executions.test.ts │ │ │ │ └── trigger-webhook.test.ts │ │ │ ├── scripts │ │ │ │ └── cleanup-orphans.ts │ │ │ ├── system │ │ │ │ ├── diagnostic.test.ts │ │ │ │ ├── health-check.test.ts │ │ │ │ └── list-tools.test.ts │ │ │ ├── test-connection.ts │ │ │ ├── types │ │ │ │ └── mcp-responses.ts │ │ │ ├── utils │ │ │ │ ├── cleanup-helpers.ts │ │ │ │ ├── credentials.ts │ │ │ │ ├── factories.ts │ │ │ │ ├── fixtures.ts │ │ │ │ ├── mcp-context.ts │ │ │ │ ├── n8n-client.ts │ │ │ │ ├── node-repository.ts │ │ │ │ ├── response-types.ts │ │ │ │ ├── test-context.ts │ │ │ │ └── webhook-workflows.ts │ │ │ └── workflows │ │ │ ├── autofix-workflow.test.ts │ │ │ ├── create-workflow.test.ts │ │ │ ├── delete-workflow.test.ts │ │ │ ├── get-workflow-details.test.ts │ │ │ ├── get-workflow-minimal.test.ts │ │ │ ├── get-workflow-structure.test.ts │ │ │ ├── get-workflow.test.ts │ │ │ ├── list-workflows.test.ts │ │ │ ├── smart-parameters.test.ts │ │ │ ├── update-partial-workflow.test.ts │ │ │ ├── update-workflow.test.ts │ │ │ └── validate-workflow.test.ts │ │ ├── security │ │ │ ├── command-injection-prevention.test.ts │ │ │ └── rate-limiting.test.ts │ │ ├── setup │ │ │ ├── integration-setup.ts │ │ │ └── msw-test-server.ts │ │ ├── telemetry │ │ │ ├── docker-user-id-stability.test.ts │ │ │ └── mcp-telemetry.test.ts │ │ ├── templates │ │ │ └── metadata-operations.test.ts │ │ └── workflow-creation-node-type-format.test.ts │ ├── logger.test.ts │ ├── MOCKING_STRATEGY.md │ ├── mocks │ │ ├── n8n-api │ │ │ ├── data │ │ │ │ ├── credentials.ts │ │ │ │ ├── executions.ts │ │ │ │ └── workflows.ts │ │ │ ├── handlers.ts │ │ │ └── index.ts │ │ └── README.md │ ├── node-storage-export.json │ ├── setup │ │ ├── global-setup.ts │ │ ├── msw-setup.ts │ │ ├── TEST_ENV_DOCUMENTATION.md │ │ └── test-env.ts │ ├── test-database-extraction.js │ ├── test-direct-extraction.js │ ├── test-enhanced-documentation.js │ ├── test-enhanced-integration.js │ ├── test-mcp-extraction.js │ ├── test-mcp-server-extraction.js │ ├── test-mcp-tools-integration.js │ ├── test-node-documentation-service.js │ ├── test-node-list.js │ ├── test-package-info.js │ ├── test-parsing-operations.js │ ├── test-slack-node-complete.js │ ├── test-small-rebuild.js │ ├── test-sqlite-search.js │ ├── test-storage-system.js │ ├── unit │ │ ├── __mocks__ │ │ │ ├── n8n-nodes-base.test.ts │ │ │ ├── n8n-nodes-base.ts │ │ │ └── README.md │ │ ├── database │ │ │ ├── __mocks__ │ │ │ │ └── better-sqlite3.ts │ │ │ ├── database-adapter-unit.test.ts │ │ │ ├── node-repository-core.test.ts │ │ │ ├── node-repository-operations.test.ts │ │ │ ├── node-repository-outputs.test.ts │ │ │ ├── README.md │ │ │ └── template-repository-core.test.ts │ │ ├── docker │ │ │ ├── config-security.test.ts │ │ │ ├── edge-cases.test.ts │ │ │ ├── parse-config.test.ts │ │ │ └── serve-command.test.ts │ │ ├── errors │ │ │ └── validation-service-error.test.ts │ │ ├── examples │ │ │ └── using-n8n-nodes-base-mock.test.ts │ │ ├── flexible-instance-security-advanced.test.ts │ │ ├── flexible-instance-security.test.ts │ │ ├── http-server │ │ │ └── multi-tenant-support.test.ts │ │ ├── http-server-n8n-mode.test.ts │ │ ├── http-server-n8n-reinit.test.ts │ │ ├── http-server-session-management.test.ts │ │ ├── loaders │ │ │ └── node-loader.test.ts │ │ ├── mappers │ │ │ └── docs-mapper.test.ts │ │ ├── mcp │ │ │ ├── get-node-essentials-examples.test.ts │ │ │ ├── handlers-n8n-manager-simple.test.ts │ │ │ ├── handlers-n8n-manager.test.ts │ │ │ ├── handlers-workflow-diff.test.ts │ │ │ ├── lru-cache-behavior.test.ts │ │ │ ├── multi-tenant-tool-listing.test.ts.disabled │ │ │ ├── parameter-validation.test.ts │ │ │ ├── search-nodes-examples.test.ts │ │ │ ├── tools-documentation.test.ts │ │ │ └── tools.test.ts │ │ ├── monitoring │ │ │ └── cache-metrics.test.ts │ │ ├── MULTI_TENANT_TEST_COVERAGE.md │ │ ├── multi-tenant-integration.test.ts │ │ ├── parsers │ │ │ ├── node-parser-outputs.test.ts │ │ │ ├── node-parser.test.ts │ │ │ ├── property-extractor.test.ts │ │ │ └── simple-parser.test.ts │ │ ├── scripts │ │ │ └── fetch-templates-extraction.test.ts │ │ ├── services │ │ │ ├── ai-node-validator.test.ts │ │ │ ├── ai-tool-validators.test.ts │ │ │ ├── confidence-scorer.test.ts │ │ │ ├── config-validator-basic.test.ts │ │ │ ├── config-validator-edge-cases.test.ts │ │ │ ├── config-validator-node-specific.test.ts │ │ │ ├── config-validator-security.test.ts │ │ │ ├── debug-validator.test.ts │ │ │ ├── enhanced-config-validator-integration.test.ts │ │ │ ├── enhanced-config-validator-operations.test.ts │ │ │ ├── enhanced-config-validator.test.ts │ │ │ ├── example-generator.test.ts │ │ │ ├── execution-processor.test.ts │ │ │ ├── expression-format-validator.test.ts │ │ │ ├── expression-validator-edge-cases.test.ts │ │ │ ├── expression-validator.test.ts │ │ │ ├── fixed-collection-validation.test.ts │ │ │ ├── loop-output-edge-cases.test.ts │ │ │ ├── n8n-api-client.test.ts │ │ │ ├── n8n-validation.test.ts │ │ │ ├── node-similarity-service.test.ts │ │ │ ├── node-specific-validators.test.ts │ │ │ ├── operation-similarity-service-comprehensive.test.ts │ │ │ ├── operation-similarity-service.test.ts │ │ │ ├── property-dependencies.test.ts │ │ │ ├── property-filter-edge-cases.test.ts │ │ │ ├── property-filter.test.ts │ │ │ ├── resource-similarity-service-comprehensive.test.ts │ │ │ ├── resource-similarity-service.test.ts │ │ │ ├── task-templates.test.ts │ │ │ ├── template-service.test.ts │ │ │ ├── universal-expression-validator.test.ts │ │ │ ├── validation-fixes.test.ts │ │ │ ├── workflow-auto-fixer.test.ts │ │ │ ├── workflow-diff-engine.test.ts │ │ │ ├── workflow-fixed-collection-validation.test.ts │ │ │ ├── workflow-validator-comprehensive.test.ts │ │ │ ├── workflow-validator-edge-cases.test.ts │ │ │ ├── workflow-validator-error-outputs.test.ts │ │ │ ├── workflow-validator-expression-format.test.ts │ │ │ ├── workflow-validator-loops-simple.test.ts │ │ │ ├── workflow-validator-loops.test.ts │ │ │ ├── workflow-validator-mocks.test.ts │ │ │ ├── workflow-validator-performance.test.ts │ │ │ ├── workflow-validator-with-mocks.test.ts │ │ │ └── workflow-validator.test.ts │ │ ├── telemetry │ │ │ ├── batch-processor.test.ts │ │ │ ├── config-manager.test.ts │ │ │ ├── event-tracker.test.ts │ │ │ ├── event-validator.test.ts │ │ │ ├── rate-limiter.test.ts │ │ │ ├── telemetry-error.test.ts │ │ │ ├── telemetry-manager.test.ts │ │ │ ├── v2.18.3-fixes-verification.test.ts │ │ │ └── workflow-sanitizer.test.ts │ │ ├── templates │ │ │ ├── batch-processor.test.ts │ │ │ ├── metadata-generator.test.ts │ │ │ ├── template-repository-metadata.test.ts │ │ │ └── template-repository-security.test.ts │ │ ├── test-env-example.test.ts │ │ ├── test-infrastructure.test.ts │ │ ├── types │ │ │ ├── instance-context-coverage.test.ts │ │ │ └── instance-context-multi-tenant.test.ts │ │ ├── utils │ │ │ ├── auth-timing-safe.test.ts │ │ │ ├── cache-utils.test.ts │ │ │ ├── console-manager.test.ts │ │ │ ├── database-utils.test.ts │ │ │ ├── fixed-collection-validator.test.ts │ │ │ ├── n8n-errors.test.ts │ │ │ ├── node-type-normalizer.test.ts │ │ │ ├── node-type-utils.test.ts │ │ │ ├── node-utils.test.ts │ │ │ ├── simple-cache-memory-leak-fix.test.ts │ │ │ ├── ssrf-protection.test.ts │ │ │ └── template-node-resolver.test.ts │ │ └── validation-fixes.test.ts │ └── utils │ ├── assertions.ts │ ├── builders │ │ └── workflow.builder.ts │ ├── data-generators.ts │ ├── database-utils.ts │ ├── README.md │ └── test-helpers.ts ├── thumbnail.png ├── tsconfig.build.json ├── tsconfig.json ├── types │ ├── mcp.d.ts │ └── test-env.d.ts ├── verify-telemetry-fix.js ├── versioned-nodes.md ├── vitest.config.benchmark.ts ├── vitest.config.integration.ts └── vitest.config.ts ``` # Files -------------------------------------------------------------------------------- /docs/DEPENDENCY_UPDATES.md: -------------------------------------------------------------------------------- ```markdown # n8n Dependency Updates Guide This guide explains how n8n-MCP keeps its n8n dependencies up to date with the weekly n8n release cycle. ## 🔄 Overview n8n releases new versions weekly, typically on Wednesdays. To ensure n8n-MCP stays compatible and includes the latest nodes, we've implemented automated dependency update systems. ## 🚀 Update Methods ### 1. Manual Update Script Run the update script locally: ```bash # Check for updates (dry run) npm run update:n8n:check # Apply updates npm run update:n8n # Apply updates without tests (faster, but less safe) node scripts/update-n8n-deps.js --skip-tests ``` The script will: 1. Check npm for latest versions of n8n packages 2. Update package.json 3. Run `npm install` to update lock file 4. Rebuild the node database 5. Run validation tests 6. Generate an update summary ### 2. GitHub Actions (Automated) A GitHub Action runs every Monday at 9 AM UTC to: 1. Check for n8n updates 2. Apply updates if available 3. Create a PR with the changes 4. Run all tests in the PR You can also trigger it manually: 1. Go to Actions → "Update n8n Dependencies" 2. Click "Run workflow" 3. Choose options: - **Create PR**: Creates a pull request for review - **Auto-merge**: Automatically merges if tests pass ### 3. Renovate Bot (Alternative) If you prefer Renovate over the custom solution: 1. Enable Renovate on your repository 2. The included `renovate.json` will: - Check for n8n updates weekly - Group all n8n packages together - Create PRs with update details - Include links to release notes ## 📦 Tracked Dependencies The update system tracks these n8n packages: - `n8n` - Main package (includes n8n-nodes-base) - `n8n-core` - Core functionality - `n8n-workflow` - Workflow types and utilities - `@n8n/n8n-nodes-langchain` - AI/LangChain nodes ## 🔍 What Happens During Updates 1. **Version Check**: Compares current vs latest npm versions 2. **Package Update**: Updates package.json with new versions 3. **Dependency Install**: Runs npm install to update lock file 4. **Database Rebuild**: Rebuilds the SQLite database with new node definitions 5. **Validation**: Runs tests to ensure: - All nodes load correctly - Properties are extracted - Critical nodes work - Database is valid ## ⚠️ Important Considerations ### Breaking Changes Always review n8n release notes for breaking changes: - Check [n8n Release Notes](https://docs.n8n.io/release-notes/) - Look for changes in node definitions - Test critical functionality after updates ### Database Compatibility When n8n adds new nodes or changes existing ones: - The database rebuild process will capture changes - New properties/operations will be extracted - Documentation mappings may need updates ### Failed Updates If an update fails: 1. **Check the logs** for specific errors 2. **Review release notes** for breaking changes 3. **Run validation manually**: ```bash npm run build npm run rebuild npm run validate ``` 4. **Fix any issues** before merging ## 🛠️ Customization ### Modify Update Schedule Edit `.github/workflows/update-n8n-deps.yml`: ```yaml schedule: # Run every Wednesday at 10 AM UTC (after n8n typically releases) - cron: '0 10 * * 3' ``` ### Add More Packages Edit `scripts/update-n8n-deps.js`: ```javascript this.n8nPackages = [ 'n8n', 'n8n-core', 'n8n-workflow', '@n8n/n8n-nodes-langchain', // Add more packages here ]; ``` ### Customize PR Creation Modify the GitHub Action to: - Add more reviewers - Change labels - Update PR template - Add additional checks ## 📊 Monitoring Updates ### Check Update Status ```bash # See current versions npm ls n8n n8n-core n8n-workflow @n8n/n8n-nodes-langchain # Check latest available npm view n8n version npm view n8n-core version npm view n8n-workflow version npm view @n8n/n8n-nodes-langchain version ``` ### View Update History - Check GitHub Actions history - Review merged PRs with "dependencies" label - Look at git log for "chore: update n8n dependencies" commits ## 🚨 Troubleshooting ### Update Script Fails ```bash # Run with more logging LOG_LEVEL=debug node scripts/update-n8n-deps.js # Skip tests to isolate issues node scripts/update-n8n-deps.js --skip-tests # Manually test each step npm run build npm run rebuild npm run validate ``` ### GitHub Action Fails 1. Check Action logs in GitHub 2. Run the update locally to reproduce 3. Fix issues and push manually 4. Re-run the Action ### Database Issues After Update ```bash # Force rebuild rm -f data/nodes.db npm run rebuild # Check specific nodes npm run test-nodes # Validate database npm run validate ``` ## 🔐 Security - Updates are tested before merging - PRs require review (unless auto-merge is enabled) - All changes are tracked in git - Rollback is possible via git revert ## 🎯 Best Practices 1. **Review PRs carefully** - Check for breaking changes 2. **Test after updates** - Ensure core functionality works 3. **Monitor n8n releases** - Stay informed about major changes 4. **Update regularly** - Weekly updates are easier than monthly 5. **Document issues** - Help future updates by documenting problems ## 📝 Manual Update Checklist If updating manually: - [ ] Check n8n release notes - [ ] Run `npm run update:n8n:check` - [ ] Review proposed changes - [ ] Run `npm run update:n8n` - [ ] Test core functionality - [ ] Commit and push changes - [ ] Create PR with update details - [ ] Run full test suite - [ ] Merge after review ``` -------------------------------------------------------------------------------- /src/scripts/validation-summary.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env node /** * Run validation on templates and provide a clean summary */ import { existsSync } from 'fs'; import path from 'path'; import { NodeRepository } from '../database/node-repository'; import { createDatabaseAdapter } from '../database/database-adapter'; import { WorkflowValidator } from '../services/workflow-validator'; import { EnhancedConfigValidator } from '../services/enhanced-config-validator'; import { TemplateRepository } from '../templates/template-repository'; import { Logger } from '../utils/logger'; const logger = new Logger({ prefix: '[validation-summary]' }); async function runValidationSummary() { const dbPath = path.join(process.cwd(), 'data', 'nodes.db'); if (!existsSync(dbPath)) { logger.error('Database not found. Run npm run rebuild first.'); process.exit(1); } const db = await createDatabaseAdapter(dbPath); const repository = new NodeRepository(db); const templateRepository = new TemplateRepository(db); const validator = new WorkflowValidator( repository, EnhancedConfigValidator ); try { const templates = await templateRepository.getAllTemplates(50); const results = { total: templates.length, valid: 0, invalid: 0, noErrors: 0, errorCategories: { unknownNodes: 0, missingRequired: 0, expressionErrors: 0, connectionErrors: 0, cycles: 0, other: 0 }, commonUnknownNodes: new Map<string, number>(), stickyNoteIssues: 0 }; for (const template of templates) { try { const workflow = JSON.parse(template.workflow_json || '{}'); const validationResult = await validator.validateWorkflow(workflow, { profile: 'minimal' // Use minimal profile to focus on critical errors }); if (validationResult.valid) { results.valid++; } else { results.invalid++; } if (validationResult.errors.length === 0) { results.noErrors++; } // Categorize errors validationResult.errors.forEach((error: any) => { const errorMsg = typeof error.message === 'string' ? error.message : JSON.stringify(error.message); if (errorMsg.includes('Unknown node type')) { results.errorCategories.unknownNodes++; const match = errorMsg.match(/Unknown node type: (.+)/); if (match) { const nodeType = match[1]; results.commonUnknownNodes.set(nodeType, (results.commonUnknownNodes.get(nodeType) || 0) + 1); } } else if (errorMsg.includes('missing_required')) { results.errorCategories.missingRequired++; if (error.nodeName?.includes('Sticky Note')) { results.stickyNoteIssues++; } } else if (errorMsg.includes('Expression error')) { results.errorCategories.expressionErrors++; } else if (errorMsg.includes('connection') || errorMsg.includes('Connection')) { results.errorCategories.connectionErrors++; } else if (errorMsg.includes('cycle')) { results.errorCategories.cycles++; } else { results.errorCategories.other++; } }); } catch (error) { results.invalid++; } } // Print summary console.log('\n' + '='.repeat(80)); console.log('WORKFLOW VALIDATION SUMMARY'); console.log('='.repeat(80)); console.log(`\nTemplates analyzed: ${results.total}`); console.log(`Valid workflows: ${results.valid} (${((results.valid / results.total) * 100).toFixed(1)}%)`); console.log(`Workflows without errors: ${results.noErrors} (${((results.noErrors / results.total) * 100).toFixed(1)}%)`); console.log('\nError Categories:'); console.log(` - Unknown nodes: ${results.errorCategories.unknownNodes}`); console.log(` - Missing required properties: ${results.errorCategories.missingRequired}`); console.log(` (Sticky note issues: ${results.stickyNoteIssues})`); console.log(` - Expression errors: ${results.errorCategories.expressionErrors}`); console.log(` - Connection errors: ${results.errorCategories.connectionErrors}`); console.log(` - Workflow cycles: ${results.errorCategories.cycles}`); console.log(` - Other errors: ${results.errorCategories.other}`); if (results.commonUnknownNodes.size > 0) { console.log('\nTop Unknown Node Types:'); const sortedNodes = Array.from(results.commonUnknownNodes.entries()) .sort((a, b) => b[1] - a[1]) .slice(0, 10); sortedNodes.forEach(([nodeType, count]) => { console.log(` - ${nodeType} (${count} occurrences)`); }); } console.log('\nKey Insights:'); const stickyNotePercent = ((results.stickyNoteIssues / results.errorCategories.missingRequired) * 100).toFixed(1); console.log(` - ${stickyNotePercent}% of missing required property errors are from Sticky Notes`); console.log(` - Most workflows have some validation warnings (best practices)`); console.log(` - Expression validation is working well`); console.log(` - Node type normalization is handling most cases correctly`); } catch (error) { logger.error('Failed to run validation summary:', error); process.exit(1); } finally { db.close(); } } // Run summary runValidationSummary().catch(error => { logger.error('Summary failed:', error); process.exit(1); }); ``` -------------------------------------------------------------------------------- /scripts/test-url-configuration.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env node /** * Test script for URL configuration in n8n-MCP HTTP server * Tests various BASE_URL, TRUST_PROXY, and proxy header scenarios */ import axios from 'axios'; import { spawn } from 'child_process'; import { logger } from '../src/utils/logger'; interface TestCase { name: string; env: Record<string, string>; expectedUrls?: { health: string; mcp: string; }; proxyHeaders?: Record<string, string>; } const testCases: TestCase[] = [ { name: 'Default configuration (no BASE_URL)', env: { MCP_MODE: 'http', AUTH_TOKEN: 'test-token-for-testing-only', PORT: '3001' }, expectedUrls: { health: 'http://localhost:3001/health', mcp: 'http://localhost:3001/mcp' } }, { name: 'With BASE_URL configured', env: { MCP_MODE: 'http', AUTH_TOKEN: 'test-token-for-testing-only', PORT: '3002', BASE_URL: 'https://n8n-mcp.example.com' }, expectedUrls: { health: 'https://n8n-mcp.example.com/health', mcp: 'https://n8n-mcp.example.com/mcp' } }, { name: 'With PUBLIC_URL configured', env: { MCP_MODE: 'http', AUTH_TOKEN: 'test-token-for-testing-only', PORT: '3003', PUBLIC_URL: 'https://api.company.com/mcp' }, expectedUrls: { health: 'https://api.company.com/mcp/health', mcp: 'https://api.company.com/mcp/mcp' } }, { name: 'With TRUST_PROXY and proxy headers', env: { MCP_MODE: 'http', AUTH_TOKEN: 'test-token-for-testing-only', PORT: '3004', TRUST_PROXY: '1' }, proxyHeaders: { 'X-Forwarded-Proto': 'https', 'X-Forwarded-Host': 'proxy.example.com' } }, { name: 'Fixed HTTP implementation', env: { MCP_MODE: 'http', USE_FIXED_HTTP: 'true', AUTH_TOKEN: 'test-token-for-testing-only', PORT: '3005', BASE_URL: 'https://fixed.example.com' }, expectedUrls: { health: 'https://fixed.example.com/health', mcp: 'https://fixed.example.com/mcp' } } ]; async function runTest(testCase: TestCase): Promise<void> { console.log(`\n🧪 Testing: ${testCase.name}`); console.log('Environment:', testCase.env); const serverProcess = spawn('node', ['dist/mcp/index.js'], { env: { ...process.env, ...testCase.env } }); let serverOutput = ''; let serverStarted = false; return new Promise((resolve, reject) => { const timeout = setTimeout(() => { serverProcess.kill(); reject(new Error('Server startup timeout')); }, 10000); serverProcess.stdout.on('data', (data) => { const output = data.toString(); serverOutput += output; if (output.includes('Press Ctrl+C to stop the server')) { serverStarted = true; clearTimeout(timeout); // Give server a moment to fully initialize setTimeout(async () => { try { // Test root endpoint const rootUrl = `http://localhost:${testCase.env.PORT}/`; const rootResponse = await axios.get(rootUrl, { headers: testCase.proxyHeaders || {} }); console.log('✅ Root endpoint response:'); console.log(` - Endpoints: ${JSON.stringify(rootResponse.data.endpoints, null, 2)}`); // Test health endpoint const healthUrl = `http://localhost:${testCase.env.PORT}/health`; const healthResponse = await axios.get(healthUrl); console.log(`✅ Health endpoint status: ${healthResponse.data.status}`); // Test MCP info endpoint const mcpUrl = `http://localhost:${testCase.env.PORT}/mcp`; const mcpResponse = await axios.get(mcpUrl); console.log(`✅ MCP info endpoint: ${mcpResponse.data.description}`); // Check console output if (testCase.expectedUrls) { const outputContainsExpectedUrls = serverOutput.includes(testCase.expectedUrls.health) && serverOutput.includes(testCase.expectedUrls.mcp); if (outputContainsExpectedUrls) { console.log('✅ Console output shows expected URLs'); } else { console.log('❌ Console output does not show expected URLs'); console.log('Expected:', testCase.expectedUrls); } } serverProcess.kill(); resolve(); } catch (error) { console.error('❌ Test failed:', error instanceof Error ? error.message : String(error)); serverProcess.kill(); reject(error); } }, 500); } }); serverProcess.stderr.on('data', (data) => { console.error('Server error:', data.toString()); }); serverProcess.on('close', (code) => { if (!serverStarted) { reject(new Error(`Server exited with code ${code} before starting`)); } else { resolve(); } }); }); } async function main() { console.log('🚀 n8n-MCP URL Configuration Test Suite'); console.log('======================================'); for (const testCase of testCases) { try { await runTest(testCase); console.log('✅ Test passed\n'); } catch (error) { console.error('❌ Test failed:', error instanceof Error ? error.message : String(error)); console.log('\n'); } } console.log('✨ All tests completed'); } main().catch(console.error); ``` -------------------------------------------------------------------------------- /scripts/generate-test-summary.js: -------------------------------------------------------------------------------- ```javascript #!/usr/bin/env node import { readFileSync, existsSync } from 'fs'; import { resolve } from 'path'; /** * Generate a markdown summary of test results for PR comments */ function generateTestSummary() { const results = { tests: null, coverage: null, benchmarks: null, timestamp: new Date().toISOString() }; // Read test results const testResultPath = resolve(process.cwd(), 'test-results/results.json'); if (existsSync(testResultPath)) { try { const testData = JSON.parse(readFileSync(testResultPath, 'utf-8')); const totalTests = testData.numTotalTests || 0; const passedTests = testData.numPassedTests || 0; const failedTests = testData.numFailedTests || 0; const skippedTests = testData.numSkippedTests || 0; const duration = testData.duration || 0; results.tests = { total: totalTests, passed: passedTests, failed: failedTests, skipped: skippedTests, duration: duration, success: failedTests === 0 }; } catch (error) { console.error('Error reading test results:', error); } } // Read coverage results const coveragePath = resolve(process.cwd(), 'coverage/coverage-summary.json'); if (existsSync(coveragePath)) { try { const coverageData = JSON.parse(readFileSync(coveragePath, 'utf-8')); const total = coverageData.total; results.coverage = { lines: total.lines.pct, statements: total.statements.pct, functions: total.functions.pct, branches: total.branches.pct }; } catch (error) { console.error('Error reading coverage results:', error); } } // Read benchmark results const benchmarkPath = resolve(process.cwd(), 'benchmark-results.json'); if (existsSync(benchmarkPath)) { try { const benchmarkData = JSON.parse(readFileSync(benchmarkPath, 'utf-8')); const benchmarks = []; for (const file of benchmarkData.files || []) { for (const group of file.groups || []) { for (const benchmark of group.benchmarks || []) { benchmarks.push({ name: `${group.name} - ${benchmark.name}`, mean: benchmark.result.mean, ops: benchmark.result.hz }); } } } results.benchmarks = benchmarks; } catch (error) { console.error('Error reading benchmark results:', error); } } // Generate markdown summary let summary = '## Test Results Summary\n\n'; // Test results if (results.tests) { const { total, passed, failed, skipped, duration, success } = results.tests; const emoji = success ? '✅' : '❌'; const status = success ? 'PASSED' : 'FAILED'; summary += `### ${emoji} Tests ${status}\n\n`; summary += `| Metric | Value |\n`; summary += `|--------|-------|\n`; summary += `| Total Tests | ${total} |\n`; summary += `| Passed | ${passed} |\n`; summary += `| Failed | ${failed} |\n`; summary += `| Skipped | ${skipped} |\n`; summary += `| Duration | ${(duration / 1000).toFixed(2)}s |\n\n`; } // Coverage results if (results.coverage) { const { lines, statements, functions, branches } = results.coverage; const avgCoverage = (lines + statements + functions + branches) / 4; const emoji = avgCoverage >= 80 ? '✅' : avgCoverage >= 60 ? '⚠️' : '❌'; summary += `### ${emoji} Coverage Report\n\n`; summary += `| Type | Coverage |\n`; summary += `|------|----------|\n`; summary += `| Lines | ${lines.toFixed(2)}% |\n`; summary += `| Statements | ${statements.toFixed(2)}% |\n`; summary += `| Functions | ${functions.toFixed(2)}% |\n`; summary += `| Branches | ${branches.toFixed(2)}% |\n`; summary += `| **Average** | **${avgCoverage.toFixed(2)}%** |\n\n`; } // Benchmark results if (results.benchmarks && results.benchmarks.length > 0) { summary += `### ⚡ Benchmark Results\n\n`; summary += `| Benchmark | Ops/sec | Mean (ms) |\n`; summary += `|-----------|---------|------------|\n`; for (const bench of results.benchmarks.slice(0, 10)) { // Show top 10 const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 }); const meanFormatted = (bench.mean * 1000).toFixed(3); summary += `| ${bench.name} | ${opsFormatted} | ${meanFormatted} |\n`; } if (results.benchmarks.length > 10) { summary += `\n*...and ${results.benchmarks.length - 10} more benchmarks*\n`; } summary += '\n'; } // Links to artifacts const runId = process.env.GITHUB_RUN_ID; const runNumber = process.env.GITHUB_RUN_NUMBER; const sha = process.env.GITHUB_SHA; if (runId) { summary += `### 📊 Artifacts\n\n`; summary += `- 📄 [Test Results](https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${runId})\n`; summary += `- 📊 [Coverage Report](https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${runId})\n`; summary += `- ⚡ [Benchmark Results](https://github.com/${process.env.GITHUB_REPOSITORY}/actions/runs/${runId})\n\n`; } // Metadata summary += `---\n`; summary += `*Generated at ${new Date().toUTCString()}*\n`; if (sha) { summary += `*Commit: ${sha.substring(0, 7)}*\n`; } if (runNumber) { summary += `*Run: #${runNumber}*\n`; } return summary; } // Generate and output summary const summary = generateTestSummary(); console.log(summary); // Also write to file for artifact import { writeFileSync } from 'fs'; writeFileSync('test-summary.md', summary); ``` -------------------------------------------------------------------------------- /tests/setup/msw-setup.ts: -------------------------------------------------------------------------------- ```typescript /** * MSW Setup for Tests * * NOTE: This file is NO LONGER loaded globally via vitest.config.ts to prevent * hanging in CI. Instead: * - Unit tests run without MSW * - Integration tests use ./tests/integration/setup/integration-setup.ts * * This file is kept for backwards compatibility and can be imported directly * by specific tests that need MSW functionality. */ import { setupServer } from 'msw/node'; import { HttpResponse, http, RequestHandler } from 'msw'; import { afterAll, afterEach, beforeAll } from 'vitest'; // Import handlers from our centralized location import { handlers as defaultHandlers } from '../mocks/n8n-api/handlers'; // Create the MSW server instance with default handlers export const server = setupServer(...defaultHandlers); // Enable request logging in development/debugging if (process.env.MSW_DEBUG === 'true' || process.env.TEST_DEBUG === 'true') { server.events.on('request:start', ({ request }) => { console.log('[MSW] %s %s', request.method, request.url); }); server.events.on('request:match', ({ request }) => { console.log('[MSW] Request matched:', request.method, request.url); }); server.events.on('request:unhandled', ({ request }) => { console.warn('[MSW] Unhandled request:', request.method, request.url); }); server.events.on('response:mocked', ({ request, response }) => { console.log('[MSW] Mocked response for %s %s: %d', request.method, request.url, response.status ); }); } // Start server before all tests beforeAll(() => { server.listen({ onUnhandledRequest: process.env.CI === 'true' ? 'error' : 'warn', }); }); // Reset handlers after each test (important for test isolation) afterEach(() => { server.resetHandlers(); }); // Clean up after all tests afterAll(() => { server.close(); }); /** * Utility function to add temporary handlers for specific tests * @param handlers Array of MSW request handlers */ export function useHandlers(...handlers: RequestHandler[]) { server.use(...handlers); } /** * Utility to wait for a specific request to be made * Useful for testing async operations */ export function waitForRequest(method: string, url: string | RegExp, timeout = 5000): Promise<Request> { return new Promise((resolve, reject) => { let timeoutId: NodeJS.Timeout; const handler = ({ request }: { request: Request }) => { if (request.method === method && (typeof url === 'string' ? request.url === url : url.test(request.url))) { clearTimeout(timeoutId); server.events.removeListener('request:match', handler); resolve(request); } }; // Set timeout timeoutId = setTimeout(() => { server.events.removeListener('request:match', handler); reject(new Error(`Timeout waiting for ${method} request to ${url}`)); }, timeout); server.events.on('request:match', handler); }); } /** * Create a handler factory for common n8n API patterns */ export const n8nHandlerFactory = { // Workflow endpoints workflow: { list: (workflows: any[] = []) => http.get('*/api/v1/workflows', () => { return HttpResponse.json({ data: workflows, nextCursor: null }); }), get: (id: string, workflow: any) => http.get(`*/api/v1/workflows/${id}`, () => { return HttpResponse.json({ data: workflow }); }), create: () => http.post('*/api/v1/workflows', async ({ request }) => { const body = await request.json() as Record<string, any>; return HttpResponse.json({ data: { id: 'mock-workflow-id', ...body, createdAt: new Date().toISOString(), updatedAt: new Date().toISOString() } }); }), update: (id: string) => http.patch(`*/api/v1/workflows/${id}`, async ({ request }) => { const body = await request.json() as Record<string, any>; return HttpResponse.json({ data: { id, ...body, updatedAt: new Date().toISOString() } }); }), delete: (id: string) => http.delete(`*/api/v1/workflows/${id}`, () => { return HttpResponse.json({ success: true }); }), }, // Execution endpoints execution: { list: (executions: any[] = []) => http.get('*/api/v1/executions', () => { return HttpResponse.json({ data: executions, nextCursor: null }); }), get: (id: string, execution: any) => http.get(`*/api/v1/executions/${id}`, () => { return HttpResponse.json({ data: execution }); }), }, // Webhook endpoints webhook: { trigger: (webhookUrl: string, response: any = { success: true }) => http.all(webhookUrl, () => { return HttpResponse.json(response); }), }, // Error responses error: { notFound: (resource: string = 'resource') => HttpResponse.json( { message: `${resource} not found`, code: 'NOT_FOUND' }, { status: 404 } ), unauthorized: () => HttpResponse.json( { message: 'Unauthorized', code: 'UNAUTHORIZED' }, { status: 401 } ), serverError: (message: string = 'Internal server error') => HttpResponse.json( { message, code: 'INTERNAL_ERROR' }, { status: 500 } ), validationError: (errors: any) => HttpResponse.json( { message: 'Validation failed', errors, code: 'VALIDATION_ERROR' }, { status: 400 } ), } }; // Export for use in tests export { http, HttpResponse } from 'msw'; ``` -------------------------------------------------------------------------------- /scripts/test-typeversion-validation.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env ts-node /** * Test script for typeVersion validation in workflow validator */ import { NodeRepository } from '../src/database/node-repository'; import { createDatabaseAdapter } from '../src/database/database-adapter'; import { WorkflowValidator } from '../src/services/workflow-validator'; import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator'; import { Logger } from '../src/utils/logger'; const logger = new Logger({ prefix: '[test-typeversion]' }); // Test workflows with various typeVersion scenarios const testWorkflows = { // Workflow with missing typeVersion on versioned nodes missingTypeVersion: { name: 'Missing typeVersion Test', nodes: [ { id: 'webhook_1', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [250, 300], parameters: { path: '/test', httpMethod: 'POST' } // Missing typeVersion - should error }, { id: 'execute_1', name: 'Execute Command', type: 'n8n-nodes-base.executeCommand', position: [450, 300], parameters: { command: 'echo "test"' } // Missing typeVersion - should error } ], connections: { 'Webhook': { main: [[{ node: 'Execute Command', type: 'main', index: 0 }]] } } }, // Workflow with outdated typeVersion outdatedTypeVersion: { name: 'Outdated typeVersion Test', nodes: [ { id: 'http_1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', typeVersion: 1, // Outdated - latest is likely 4+ position: [250, 300], parameters: { url: 'https://example.com', method: 'GET' } }, { id: 'code_1', name: 'Code', type: 'n8n-nodes-base.code', typeVersion: 1, // Outdated - latest is likely 2 position: [450, 300], parameters: { jsCode: 'return items;' } } ], connections: { 'HTTP Request': { main: [[{ node: 'Code', type: 'main', index: 0 }]] } } }, // Workflow with correct typeVersion correctTypeVersion: { name: 'Correct typeVersion Test', nodes: [ { id: 'webhook_1', name: 'Webhook', type: 'n8n-nodes-base.webhook', typeVersion: 2, position: [250, 300], parameters: { path: '/test', httpMethod: 'POST' } }, { id: 'http_1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', typeVersion: 4, position: [450, 300], parameters: { url: 'https://example.com', method: 'GET' } } ], connections: { 'Webhook': { main: [[{ node: 'HTTP Request', type: 'main', index: 0 }]] } } }, // Workflow with invalid typeVersion invalidTypeVersion: { name: 'Invalid typeVersion Test', nodes: [ { id: 'webhook_1', name: 'Webhook', type: 'n8n-nodes-base.webhook', typeVersion: 0, // Invalid - must be positive position: [250, 300], parameters: { path: '/test' } }, { id: 'http_1', name: 'HTTP Request', type: 'n8n-nodes-base.httpRequest', typeVersion: 999, // Too high - exceeds maximum position: [450, 300], parameters: { url: 'https://example.com' } } ], connections: { 'Webhook': { main: [[{ node: 'HTTP Request', type: 'main', index: 0 }]] } } } }; async function testTypeVersionValidation() { const dbAdapter = await createDatabaseAdapter('./data/nodes.db'); const repository = new NodeRepository(dbAdapter); const validator = new WorkflowValidator(repository, EnhancedConfigValidator); console.log('\n===================================='); console.log('Testing typeVersion Validation'); console.log('====================================\n'); // Check some versioned nodes to show their versions console.log('📊 Checking versioned nodes in database:'); const versionedNodes = ['nodes-base.webhook', 'nodes-base.httpRequest', 'nodes-base.code', 'nodes-base.executeCommand']; for (const nodeType of versionedNodes) { const nodeInfo = repository.getNode(nodeType); if (nodeInfo) { console.log(`- ${nodeType}: isVersioned=${nodeInfo.isVersioned}, maxVersion=${nodeInfo.version || 'N/A'}`); } } console.log('\n'); // Test each workflow for (const [testName, workflow] of Object.entries(testWorkflows)) { console.log(`\n🧪 Testing: ${testName}`); console.log('─'.repeat(50)); const result = await validator.validateWorkflow(workflow as any); console.log(`\n✅ Valid: ${result.valid}`); if (result.errors.length > 0) { console.log('\n❌ Errors:'); result.errors.forEach(error => { console.log(` - [${error.nodeName || 'Workflow'}] ${error.message}`); }); } if (result.warnings.length > 0) { console.log('\n⚠️ Warnings:'); result.warnings.forEach(warning => { console.log(` - [${warning.nodeName || 'Workflow'}] ${warning.message}`); }); } if (result.suggestions.length > 0) { console.log('\n💡 Suggestions:'); result.suggestions.forEach(suggestion => { console.log(` - ${suggestion}`); }); } } console.log('\n\n✅ typeVersion validation test completed!'); } // Run the test testTypeVersionValidation().catch(console.error); ``` -------------------------------------------------------------------------------- /tests/error-handler.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect, vi } from 'vitest'; import { MCPError, N8NConnectionError, AuthenticationError, ValidationError, ToolNotFoundError, ResourceNotFoundError, handleError, withErrorHandling, } from '../src/utils/error-handler'; import { logger } from '../src/utils/logger'; // Mock the logger vi.mock('../src/utils/logger', () => ({ logger: { error: vi.fn(), }, })); describe('Error Classes', () => { describe('MCPError', () => { it('should create error with all properties', () => { const error = new MCPError('Test error', 'TEST_CODE', 400, { field: 'value' }); expect(error.message).toBe('Test error'); expect(error.code).toBe('TEST_CODE'); expect(error.statusCode).toBe(400); expect(error.data).toEqual({ field: 'value' }); expect(error.name).toBe('MCPError'); }); }); describe('N8NConnectionError', () => { it('should create connection error with correct code', () => { const error = new N8NConnectionError('Connection failed'); expect(error.message).toBe('Connection failed'); expect(error.code).toBe('N8N_CONNECTION_ERROR'); expect(error.statusCode).toBe(503); expect(error.name).toBe('N8NConnectionError'); }); }); describe('AuthenticationError', () => { it('should create auth error with default message', () => { const error = new AuthenticationError(); expect(error.message).toBe('Authentication failed'); expect(error.code).toBe('AUTH_ERROR'); expect(error.statusCode).toBe(401); }); it('should accept custom message', () => { const error = new AuthenticationError('Invalid token'); expect(error.message).toBe('Invalid token'); }); }); describe('ValidationError', () => { it('should create validation error', () => { const error = new ValidationError('Invalid input', { field: 'email' }); expect(error.message).toBe('Invalid input'); expect(error.code).toBe('VALIDATION_ERROR'); expect(error.statusCode).toBe(400); expect(error.data).toEqual({ field: 'email' }); }); }); describe('ToolNotFoundError', () => { it('should create tool not found error', () => { const error = new ToolNotFoundError('myTool'); expect(error.message).toBe("Tool 'myTool' not found"); expect(error.code).toBe('TOOL_NOT_FOUND'); expect(error.statusCode).toBe(404); }); }); describe('ResourceNotFoundError', () => { it('should create resource not found error', () => { const error = new ResourceNotFoundError('workflow://123'); expect(error.message).toBe("Resource 'workflow://123' not found"); expect(error.code).toBe('RESOURCE_NOT_FOUND'); expect(error.statusCode).toBe(404); }); }); }); describe('handleError', () => { it('should return MCPError instances as-is', () => { const mcpError = new ValidationError('Test'); const result = handleError(mcpError); expect(result).toBe(mcpError); }); it('should handle HTTP 401 errors', () => { const httpError = { response: { status: 401, data: { message: 'Unauthorized' } }, }; const result = handleError(httpError); expect(result).toBeInstanceOf(AuthenticationError); expect(result.message).toBe('Unauthorized'); }); it('should handle HTTP 404 errors', () => { const httpError = { response: { status: 404, data: { message: 'Not found' } }, }; const result = handleError(httpError); expect(result.code).toBe('NOT_FOUND'); expect(result.statusCode).toBe(404); }); it('should handle HTTP 5xx errors', () => { const httpError = { response: { status: 503, data: { message: 'Service unavailable' } }, }; const result = handleError(httpError); expect(result).toBeInstanceOf(N8NConnectionError); }); it('should handle connection refused errors', () => { const connError = { code: 'ECONNREFUSED' }; const result = handleError(connError); expect(result).toBeInstanceOf(N8NConnectionError); expect(result.message).toBe('Cannot connect to n8n API'); }); it('should handle generic errors', () => { const error = new Error('Something went wrong'); const result = handleError(error); expect(result.message).toBe('Something went wrong'); expect(result.code).toBe('UNKNOWN_ERROR'); expect(result.statusCode).toBe(500); }); it('should handle errors without message', () => { const error = {}; const result = handleError(error); expect(result.message).toBe('An unexpected error occurred'); }); }); describe('withErrorHandling', () => { it('should execute operation successfully', async () => { const operation = vi.fn().mockResolvedValue('success'); const result = await withErrorHandling(operation, 'test operation'); expect(result).toBe('success'); expect(logger.error).not.toHaveBeenCalled(); }); it('should handle and log errors', async () => { const error = new Error('Operation failed'); const operation = vi.fn().mockRejectedValue(error); await expect(withErrorHandling(operation, 'test operation')).rejects.toThrow(); expect(logger.error).toHaveBeenCalledWith('Error in test operation:', error); }); it('should transform errors using handleError', async () => { const error = { code: 'ECONNREFUSED' }; const operation = vi.fn().mockRejectedValue(error); try { await withErrorHandling(operation, 'test operation'); } catch (err) { expect(err).toBeInstanceOf(N8NConnectionError); } }); }); ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/templates/search-templates-by-metadata.ts: -------------------------------------------------------------------------------- ```typescript import { ToolDocumentation } from '../types'; export const searchTemplatesByMetadataDoc: ToolDocumentation = { name: 'search_templates_by_metadata', category: 'templates', essentials: { description: 'Search templates using AI-generated metadata filters. Find templates by complexity, setup time, required services, or target audience. Enables smart template discovery beyond simple text search.', keyParameters: ['category', 'complexity', 'maxSetupMinutes', 'targetAudience'], example: 'search_templates_by_metadata({complexity: "simple", maxSetupMinutes: 30})', performance: 'Fast (<100ms) - JSON extraction queries', tips: [ 'All filters are optional - combine them for precise results', 'Use getAvailableCategories() to see valid category values', 'Complexity levels: simple, medium, complex', 'Setup time is in minutes (5-480 range)' ] }, full: { description: `Advanced template search using AI-generated metadata. Each template has been analyzed by GPT-4 to extract structured information about its purpose, complexity, setup requirements, and target users. This enables intelligent filtering beyond simple keyword matching, helping you find templates that match your specific needs, skill level, and available time.`, parameters: { category: { type: 'string', required: false, description: 'Filter by category like "automation", "integration", "data processing", "communication". Use template service getAvailableCategories() for full list.' }, complexity: { type: 'string (enum)', required: false, description: 'Filter by implementation complexity: "simple" (beginner-friendly), "medium" (some experience needed), or "complex" (advanced features)' }, maxSetupMinutes: { type: 'number', required: false, description: 'Maximum acceptable setup time in minutes (5-480). Find templates you can implement within your time budget.' }, minSetupMinutes: { type: 'number', required: false, description: 'Minimum setup time in minutes (5-480). Find more substantial templates that offer comprehensive solutions.' }, requiredService: { type: 'string', required: false, description: 'Filter by required external service like "openai", "slack", "google", "shopify". Ensures you have necessary accounts/APIs.' }, targetAudience: { type: 'string', required: false, description: 'Filter by intended users: "developers", "marketers", "analysts", "operations", "sales". Find templates for your role.' }, limit: { type: 'number', required: false, description: 'Maximum results to return. Default 20, max 100.' }, offset: { type: 'number', required: false, description: 'Pagination offset for results. Default 0.' } }, returns: `Returns an object containing: - items: Array of matching templates with full metadata - id: Template ID - name: Template name - description: Purpose and functionality - author: Creator details - nodes: Array of nodes used - views: Popularity count - metadata: AI-generated structured data - categories: Primary use categories - complexity: Difficulty level - use_cases: Specific applications - estimated_setup_minutes: Time to implement - required_services: External dependencies - key_features: Main capabilities - target_audience: Intended users - total: Total matching templates - filters: Applied filter criteria - filterSummary: Human-readable filter description - availableCategories: Suggested categories if no results - availableAudiences: Suggested audiences if no results - tip: Contextual guidance`, examples: [ 'search_templates_by_metadata({complexity: "simple"}) - Find beginner-friendly templates', 'search_templates_by_metadata({category: "automation", maxSetupMinutes: 30}) - Quick automation templates', 'search_templates_by_metadata({targetAudience: "marketers"}) - Marketing-focused workflows', 'search_templates_by_metadata({requiredService: "openai", complexity: "medium"}) - AI templates with moderate complexity', 'search_templates_by_metadata({minSetupMinutes: 60, category: "integration"}) - Comprehensive integration solutions' ], useCases: [ 'Finding beginner-friendly templates by setting complexity:"simple"', 'Discovering templates you can implement quickly with maxSetupMinutes:30', 'Finding role-specific workflows with targetAudience filter', 'Identifying templates that need specific APIs with requiredService filter', 'Combining multiple filters for precise template discovery' ], performance: 'Fast (<100ms) - Uses SQLite JSON extraction on pre-generated metadata. 97.5% coverage (2,534/2,598 templates).', bestPractices: [ 'Start with broad filters and narrow down based on results', 'Use getAvailableCategories() to discover valid category values', 'Combine complexity and setup time for skill-appropriate templates', 'Check required services before selecting templates to ensure you have necessary accounts' ], pitfalls: [ 'Not all templates have metadata (97.5% coverage)', 'Setup time estimates assume basic n8n familiarity', 'Categories/audiences use partial matching - be specific', 'Metadata is AI-generated and may occasionally be imprecise' ], relatedTools: [ 'list_templates', 'search_templates', 'list_node_templates', 'get_templates_for_task' ] } }; ``` -------------------------------------------------------------------------------- /src/utils/node-utils.ts: -------------------------------------------------------------------------------- ```typescript /** * Normalizes node type from n8n export format to database format * * Examples: * - 'n8n-nodes-base.httpRequest' → 'nodes-base.httpRequest' * - '@n8n/n8n-nodes-langchain.agent' → 'nodes-langchain.agent' * - 'n8n-nodes-langchain.chatTrigger' → 'nodes-langchain.chatTrigger' * - 'nodes-base.slack' → 'nodes-base.slack' (unchanged) * * @param nodeType The node type to normalize * @returns The normalized node type */ export function normalizeNodeType(nodeType: string): string { // Handle n8n-nodes-base -> nodes-base if (nodeType.startsWith('n8n-nodes-base.')) { return nodeType.replace('n8n-nodes-base.', 'nodes-base.'); } // Handle @n8n/n8n-nodes-langchain -> nodes-langchain if (nodeType.startsWith('@n8n/n8n-nodes-langchain.')) { return nodeType.replace('@n8n/n8n-nodes-langchain.', 'nodes-langchain.'); } // Handle n8n-nodes-langchain -> nodes-langchain (without @n8n/ prefix) if (nodeType.startsWith('n8n-nodes-langchain.')) { return nodeType.replace('n8n-nodes-langchain.', 'nodes-langchain.'); } // Return unchanged if already normalized or unknown format return nodeType; } /** * Gets alternative node type formats to try for lookups * * @param nodeType The original node type * @returns Array of alternative formats to try */ export function getNodeTypeAlternatives(nodeType: string): string[] { // Defensive: validate input to prevent TypeError when nodeType is undefined/null/empty if (!nodeType || typeof nodeType !== 'string' || nodeType.trim() === '') { return []; } const alternatives: string[] = []; // Add lowercase version alternatives.push(nodeType.toLowerCase()); // If it has a prefix, try case variations on the node name part if (nodeType.includes('.')) { const [prefix, nodeName] = nodeType.split('.'); // Try different case variations for the node name if (nodeName && nodeName.toLowerCase() !== nodeName) { alternatives.push(`${prefix}.${nodeName.toLowerCase()}`); } // For camelCase names like "chatTrigger", also try with capital first letter variations // e.g., "chattrigger" -> "chatTrigger" if (nodeName && nodeName.toLowerCase() === nodeName && nodeName.length > 1) { // Try to detect common patterns and create camelCase version const camelCaseVariants = generateCamelCaseVariants(nodeName); camelCaseVariants.forEach(variant => { alternatives.push(`${prefix}.${variant}`); }); } } // If it's just a bare node name, try with common prefixes if (!nodeType.includes('.')) { alternatives.push(`nodes-base.${nodeType}`); alternatives.push(`nodes-langchain.${nodeType}`); // Also try camelCase variants for bare names const camelCaseVariants = generateCamelCaseVariants(nodeType); camelCaseVariants.forEach(variant => { alternatives.push(`nodes-base.${variant}`); alternatives.push(`nodes-langchain.${variant}`); }); } // Normalize all alternatives and combine with originals const normalizedAlternatives = alternatives.map(alt => normalizeNodeType(alt)); // Combine original alternatives with normalized ones and remove duplicates return [...new Set([...alternatives, ...normalizedAlternatives])]; } /** * Generate camelCase variants for a lowercase string * @param str The lowercase string * @returns Array of possible camelCase variants */ function generateCamelCaseVariants(str: string): string[] { const variants: string[] = []; // Common patterns for n8n nodes const patterns = [ // Pattern: wordTrigger (e.g., chatTrigger, webhookTrigger) /^(.+)(trigger|node|request|response)$/i, // Pattern: httpRequest, mysqlDatabase /^(http|mysql|postgres|mongo|redis|mqtt|smtp|imap|ftp|ssh|api)(.+)$/i, // Pattern: googleSheets, microsoftTeams /^(google|microsoft|amazon|slack|discord|telegram)(.+)$/i, ]; for (const pattern of patterns) { const match = str.toLowerCase().match(pattern); if (match) { const [, first, second] = match; // Capitalize the second part variants.push(first.toLowerCase() + second.charAt(0).toUpperCase() + second.slice(1).toLowerCase()); } } // Generic camelCase: capitalize after common word boundaries if (variants.length === 0) { // Try splitting on common boundaries and capitalizing const words = str.split(/[-_\s]+/); if (words.length > 1) { const camelCase = words[0].toLowerCase() + words.slice(1).map(w => w.charAt(0).toUpperCase() + w.slice(1).toLowerCase() ).join(''); variants.push(camelCase); } } return variants; } /** * Constructs the workflow node type from package name and normalized node type * This creates the format that n8n expects in workflow definitions * * Examples: * - ('n8n-nodes-base', 'nodes-base.webhook') → 'n8n-nodes-base.webhook' * - ('@n8n/n8n-nodes-langchain', 'nodes-langchain.agent') → '@n8n/n8n-nodes-langchain.agent' * * @param packageName The package name from the database * @param nodeType The normalized node type from the database * @returns The workflow node type for use in n8n workflows */ export function getWorkflowNodeType(packageName: string, nodeType: string): string { // Extract just the node name from the normalized type const nodeName = nodeType.split('.').pop() || nodeType; // Construct the full workflow type based on package if (packageName === 'n8n-nodes-base') { return `n8n-nodes-base.${nodeName}`; } else if (packageName === '@n8n/n8n-nodes-langchain') { return `@n8n/n8n-nodes-langchain.${nodeName}`; } // Fallback for unknown packages - return as is return nodeType; } ``` -------------------------------------------------------------------------------- /docker/parse-config.js: -------------------------------------------------------------------------------- ```javascript #!/usr/bin/env node /** * Parse JSON config file and output shell-safe export commands * Only outputs variables that aren't already set in environment * * Security: Uses safe quoting without any shell execution */ const fs = require('fs'); // Debug logging support const DEBUG = process.env.DEBUG_CONFIG === 'true'; function debugLog(message) { if (DEBUG) { process.stderr.write(`[parse-config] ${message}\n`); } } const configPath = process.argv[2] || '/app/config.json'; debugLog(`Using config path: ${configPath}`); // Dangerous environment variables that should never be set const DANGEROUS_VARS = new Set([ 'PATH', 'LD_PRELOAD', 'LD_LIBRARY_PATH', 'LD_AUDIT', 'BASH_ENV', 'ENV', 'CDPATH', 'IFS', 'PS1', 'PS2', 'PS3', 'PS4', 'SHELL', 'BASH_FUNC', 'SHELLOPTS', 'GLOBIGNORE', 'PERL5LIB', 'PYTHONPATH', 'NODE_PATH', 'RUBYLIB' ]); /** * Sanitize a key name for use as environment variable * Converts to uppercase and replaces invalid chars with underscore */ function sanitizeKey(key) { // Convert to string and handle edge cases const keyStr = String(key || '').trim(); if (!keyStr) { return 'EMPTY_KEY'; } // Special handling for NODE_DB_PATH to preserve exact casing if (keyStr === 'NODE_DB_PATH') { return 'NODE_DB_PATH'; } const sanitized = keyStr .toUpperCase() .replace(/[^A-Z0-9]+/g, '_') .replace(/^_+|_+$/g, '') // Trim underscores .replace(/^(\d)/, '_$1'); // Prefix with _ if starts with number // If sanitization results in empty string, use a default return sanitized || 'EMPTY_KEY'; } /** * Safely quote a string for shell use * This follows POSIX shell quoting rules */ function shellQuote(str) { // Remove null bytes which are not allowed in environment variables str = str.replace(/\x00/g, ''); // Always use single quotes for consistency and safety // Single quotes protect everything except other single quotes return "'" + str.replace(/'/g, "'\"'\"'") + "'"; } try { if (!fs.existsSync(configPath)) { debugLog(`Config file not found at: ${configPath}`); process.exit(0); // Silent exit if no config file } let configContent; let config; try { configContent = fs.readFileSync(configPath, 'utf8'); debugLog(`Read config file, size: ${configContent.length} bytes`); } catch (readError) { // Silent exit on read errors debugLog(`Error reading config: ${readError.message}`); process.exit(0); } try { config = JSON.parse(configContent); debugLog(`Parsed config with ${Object.keys(config).length} top-level keys`); } catch (parseError) { // Silent exit on invalid JSON debugLog(`Error parsing JSON: ${parseError.message}`); process.exit(0); } // Validate config is an object if (typeof config !== 'object' || config === null || Array.isArray(config)) { // Silent exit on invalid config structure process.exit(0); } // Convert nested objects to flat environment variables const flattenConfig = (obj, prefix = '', depth = 0) => { const result = {}; // Prevent infinite recursion if (depth > 10) { return result; } for (const [key, value] of Object.entries(obj)) { const sanitizedKey = sanitizeKey(key); // Skip if sanitization resulted in EMPTY_KEY (indicating invalid key) if (sanitizedKey === 'EMPTY_KEY') { debugLog(`Skipping key '${key}': invalid key name`); continue; } const envKey = prefix ? `${prefix}_${sanitizedKey}` : sanitizedKey; // Skip if key is too long if (envKey.length > 255) { debugLog(`Skipping key '${envKey}': too long (${envKey.length} chars)`); continue; } if (typeof value === 'object' && value !== null && !Array.isArray(value)) { // Recursively flatten nested objects Object.assign(result, flattenConfig(value, envKey, depth + 1)); } else if (typeof value === 'string' || typeof value === 'number' || typeof value === 'boolean') { // Only include if not already set in environment if (!process.env[envKey]) { let stringValue = String(value); // Handle special JavaScript number values if (typeof value === 'number') { if (!isFinite(value)) { if (value === Infinity) { stringValue = 'Infinity'; } else if (value === -Infinity) { stringValue = '-Infinity'; } else if (isNaN(value)) { stringValue = 'NaN'; } } } // Skip if value is too long if (stringValue.length <= 32768) { result[envKey] = stringValue; } } } } return result; }; // Output shell-safe export commands const flattened = flattenConfig(config); const exports = []; for (const [key, value] of Object.entries(flattened)) { // Validate key name (alphanumeric and underscore only) if (!/^[A-Z_][A-Z0-9_]*$/.test(key)) { continue; // Skip invalid variable names } // Skip dangerous variables if (DANGEROUS_VARS.has(key) || key.startsWith('BASH_FUNC_')) { debugLog(`Warning: Ignoring dangerous variable: ${key}`); process.stderr.write(`Warning: Ignoring dangerous variable: ${key}\n`); continue; } // Safely quote the value const quotedValue = shellQuote(value); exports.push(`export ${key}=${quotedValue}`); } // Use process.stdout.write to ensure output goes to stdout if (exports.length > 0) { process.stdout.write(exports.join('\n') + '\n'); } } catch (error) { // Silent fail - don't break the container startup process.exit(0); } ``` -------------------------------------------------------------------------------- /src/mcp/handlers-workflow-diff.ts: -------------------------------------------------------------------------------- ```typescript /** * MCP Handler for Partial Workflow Updates * Handles diff-based workflow modifications */ import { z } from 'zod'; import { McpToolResponse } from '../types/n8n-api'; import { WorkflowDiffRequest, WorkflowDiffOperation } from '../types/workflow-diff'; import { WorkflowDiffEngine } from '../services/workflow-diff-engine'; import { getN8nApiClient } from './handlers-n8n-manager'; import { N8nApiError, getUserFriendlyErrorMessage } from '../utils/n8n-errors'; import { logger } from '../utils/logger'; import { InstanceContext } from '../types/instance-context'; // Zod schema for the diff request const workflowDiffSchema = z.object({ id: z.string(), operations: z.array(z.object({ type: z.string(), description: z.string().optional(), // Node operations node: z.any().optional(), nodeId: z.string().optional(), nodeName: z.string().optional(), updates: z.any().optional(), position: z.tuple([z.number(), z.number()]).optional(), // Connection operations source: z.string().optional(), target: z.string().optional(), from: z.string().optional(), // For rewireConnection to: z.string().optional(), // For rewireConnection sourceOutput: z.string().optional(), targetInput: z.string().optional(), sourceIndex: z.number().optional(), targetIndex: z.number().optional(), // Smart parameters (Phase 1 UX improvement) branch: z.enum(['true', 'false']).optional(), case: z.number().optional(), ignoreErrors: z.boolean().optional(), // Connection cleanup operations dryRun: z.boolean().optional(), connections: z.any().optional(), // Metadata operations settings: z.any().optional(), name: z.string().optional(), tag: z.string().optional(), })), validateOnly: z.boolean().optional(), continueOnError: z.boolean().optional(), }); export async function handleUpdatePartialWorkflow(args: unknown, context?: InstanceContext): Promise<McpToolResponse> { try { // Debug logging (only in debug mode) if (process.env.DEBUG_MCP === 'true') { logger.debug('Workflow diff request received', { argsType: typeof args, hasWorkflowId: args && typeof args === 'object' && 'workflowId' in args, operationCount: args && typeof args === 'object' && 'operations' in args ? (args as any).operations?.length : 0 }); } // Validate input const input = workflowDiffSchema.parse(args); // Get API client const client = getN8nApiClient(context); if (!client) { return { success: false, error: 'n8n API not configured. Please set N8N_API_URL and N8N_API_KEY environment variables.' }; } // Fetch current workflow let workflow; try { workflow = await client.getWorkflow(input.id); } catch (error) { if (error instanceof N8nApiError) { return { success: false, error: getUserFriendlyErrorMessage(error), code: error.code }; } throw error; } // Apply diff operations const diffEngine = new WorkflowDiffEngine(); const diffRequest = input as WorkflowDiffRequest; const diffResult = await diffEngine.applyDiff(workflow, diffRequest); // Check if this is a complete failure or partial success in continueOnError mode if (!diffResult.success) { // In continueOnError mode, partial success is still valuable if (diffRequest.continueOnError && diffResult.workflow && diffResult.operationsApplied && diffResult.operationsApplied > 0) { logger.info(`continueOnError mode: Applying ${diffResult.operationsApplied} successful operations despite ${diffResult.failed?.length || 0} failures`); // Continue to update workflow with partial changes } else { // Complete failure - return error return { success: false, error: 'Failed to apply diff operations', details: { errors: diffResult.errors, operationsApplied: diffResult.operationsApplied, applied: diffResult.applied, failed: diffResult.failed } }; } } // If validateOnly, return validation result if (input.validateOnly) { return { success: true, message: diffResult.message, data: { valid: true, operationsToApply: input.operations.length } }; } // Update workflow via API try { const updatedWorkflow = await client.updateWorkflow(input.id, diffResult.workflow!); return { success: true, data: updatedWorkflow, message: `Workflow "${updatedWorkflow.name}" updated successfully. Applied ${diffResult.operationsApplied} operations.`, details: { operationsApplied: diffResult.operationsApplied, workflowId: updatedWorkflow.id, workflowName: updatedWorkflow.name, applied: diffResult.applied, failed: diffResult.failed, errors: diffResult.errors } }; } catch (error) { if (error instanceof N8nApiError) { return { success: false, error: getUserFriendlyErrorMessage(error), code: error.code, details: error.details as Record<string, unknown> | undefined }; } throw error; } } catch (error) { if (error instanceof z.ZodError) { return { success: false, error: 'Invalid input', details: { errors: error.errors } }; } logger.error('Failed to update partial workflow', error); return { success: false, error: error instanceof Error ? error.message : 'Unknown error occurred' }; } } ``` -------------------------------------------------------------------------------- /tests/test-enhanced-integration.js: -------------------------------------------------------------------------------- ```javascript #!/usr/bin/env node const { DocumentationFetcher } = require('../dist/utils/documentation-fetcher'); const { NodeDocumentationService } = require('../dist/services/node-documentation-service'); async function testEnhancedIntegration() { console.log('🧪 Testing Enhanced Documentation Integration...\n'); // Test 1: DocumentationFetcher backward compatibility console.log('1️⃣ Testing DocumentationFetcher backward compatibility...'); const docFetcher = new DocumentationFetcher(); try { // Test getNodeDocumentation (backward compatible method) const simpleDoc = await docFetcher.getNodeDocumentation('n8n-nodes-base.slack'); if (simpleDoc) { console.log(' ✅ Simple documentation format works'); console.log(` - Has markdown: ${!!simpleDoc.markdown}`); console.log(` - Has URL: ${!!simpleDoc.url}`); console.log(` - Has examples: ${simpleDoc.examples?.length || 0}`); } // Test getEnhancedNodeDocumentation (new method) const enhancedDoc = await docFetcher.getEnhancedNodeDocumentation('n8n-nodes-base.slack'); if (enhancedDoc) { console.log(' ✅ Enhanced documentation format works'); console.log(` - Title: ${enhancedDoc.title || 'N/A'}`); console.log(` - Operations: ${enhancedDoc.operations?.length || 0}`); console.log(` - API Methods: ${enhancedDoc.apiMethods?.length || 0}`); console.log(` - Examples: ${enhancedDoc.examples?.length || 0}`); console.log(` - Templates: ${enhancedDoc.templates?.length || 0}`); console.log(` - Related Resources: ${enhancedDoc.relatedResources?.length || 0}`); } } catch (error) { console.error(' ❌ DocumentationFetcher test failed:', error.message); } // Test 2: NodeDocumentationService with enhanced fields console.log('\n2️⃣ Testing NodeDocumentationService enhanced schema...'); const docService = new NodeDocumentationService('data/test-enhanced-docs.db'); try { // Store a test node with enhanced documentation const testNode = { nodeType: 'test.enhanced-node', name: 'enhanced-node', displayName: 'Enhanced Test Node', description: 'A test node with enhanced documentation', sourceCode: 'const testCode = "example";', packageName: 'test-package', documentation: '# Test Documentation', documentationUrl: 'https://example.com/docs', documentationTitle: 'Enhanced Test Node Documentation', operations: [ { resource: 'Message', operation: 'Send', description: 'Send a message' } ], apiMethods: [ { resource: 'Message', operation: 'Send', apiMethod: 'chat.postMessage', apiUrl: 'https://api.slack.com/methods/chat.postMessage' } ], documentationExamples: [ { title: 'Send Message Example', type: 'json', code: '{"text": "Hello World"}' } ], templates: [ { name: 'Basic Message Template', description: 'Simple message sending template' } ], relatedResources: [ { title: 'API Documentation', url: 'https://api.slack.com', type: 'api' } ], requiredScopes: ['chat:write'], hasCredentials: true, isTrigger: false, isWebhook: false }; await docService.storeNode(testNode); console.log(' ✅ Stored node with enhanced documentation'); // Retrieve and verify const retrieved = await docService.getNodeInfo('test.enhanced-node'); if (retrieved) { console.log(' ✅ Retrieved node with enhanced fields:'); console.log(` - Has operations: ${!!retrieved.operations}`); console.log(` - Has API methods: ${!!retrieved.apiMethods}`); console.log(` - Has documentation examples: ${!!retrieved.documentationExamples}`); console.log(` - Has templates: ${!!retrieved.templates}`); console.log(` - Has related resources: ${!!retrieved.relatedResources}`); console.log(` - Has required scopes: ${!!retrieved.requiredScopes}`); } // Test search const searchResults = await docService.searchNodes({ query: 'enhanced' }); console.log(` ✅ Search found ${searchResults.length} results`); } catch (error) { console.error(' ❌ NodeDocumentationService test failed:', error.message); } finally { docService.close(); } // Test 3: MCP Server integration console.log('\n3️⃣ Testing MCP Server integration...'); try { const { N8NMCPServer } = require('../dist/mcp/server'); console.log(' ✅ MCP Server loads with enhanced documentation support'); // Check if new tools are available const { n8nTools } = require('../dist/mcp/tools'); const enhancedTools = [ 'get_node_documentation', 'search_node_documentation', 'get_node_operations', 'get_node_examples' ]; const hasAllTools = enhancedTools.every(toolName => n8nTools.some(tool => tool.name === toolName) ); if (hasAllTools) { console.log(' ✅ All enhanced documentation tools are available'); enhancedTools.forEach(toolName => { const tool = n8nTools.find(t => t.name === toolName); console.log(` - ${toolName}: ${tool.description}`); }); } else { console.log(' ⚠️ Some enhanced tools are missing'); } } catch (error) { console.error(' ❌ MCP Server integration test failed:', error.message); } console.log('\n✨ Enhanced documentation integration tests completed!'); // Cleanup await docFetcher.cleanup(); } // Run tests testEnhancedIntegration().catch(error => { console.error('Fatal error:', error); process.exit(1); }); ``` -------------------------------------------------------------------------------- /tests/integration/n8n-api/utils/credentials.ts: -------------------------------------------------------------------------------- ```typescript /** * Integration Test Credentials Management * * Provides environment-aware credential loading for integration tests. * - Local development: Reads from .env file * - CI/GitHub Actions: Uses GitHub secrets from process.env */ import dotenv from 'dotenv'; import path from 'path'; // Load .env file for local development dotenv.config({ path: path.resolve(process.cwd(), '.env') }); export interface N8nTestCredentials { url: string; apiKey: string; webhookUrls: { get: string; post: string; put: string; delete: string; }; cleanup: { enabled: boolean; tag: string; namePrefix: string; }; } /** * Get n8n credentials for integration tests * * Automatically detects environment (local vs CI) and loads * credentials from the appropriate source. * * @returns N8nTestCredentials * @throws Error if required credentials are missing */ export function getN8nCredentials(): N8nTestCredentials { if (process.env.CI) { // CI: Use GitHub secrets - validate required variables first const url = process.env.N8N_API_URL; const apiKey = process.env.N8N_API_KEY; if (!url || !apiKey) { throw new Error( 'Missing required CI credentials:\n' + ` N8N_API_URL: ${url ? 'set' : 'MISSING'}\n` + ` N8N_API_KEY: ${apiKey ? 'set' : 'MISSING'}\n` + 'Please configure GitHub secrets for integration tests.' ); } return { url, apiKey, webhookUrls: { get: process.env.N8N_TEST_WEBHOOK_GET_URL || '', post: process.env.N8N_TEST_WEBHOOK_POST_URL || '', put: process.env.N8N_TEST_WEBHOOK_PUT_URL || '', delete: process.env.N8N_TEST_WEBHOOK_DELETE_URL || '' }, cleanup: { enabled: true, tag: 'mcp-integration-test', namePrefix: '[MCP-TEST]' } }; } else { // Local: Use .env file - validate required variables first const url = process.env.N8N_API_URL; const apiKey = process.env.N8N_API_KEY; if (!url || !apiKey) { throw new Error( 'Missing required credentials in .env:\n' + ` N8N_API_URL: ${url ? 'set' : 'MISSING'}\n` + ` N8N_API_KEY: ${apiKey ? 'set' : 'MISSING'}\n\n` + 'Please add these to your .env file.\n' + 'See .env.example for configuration details.' ); } return { url, apiKey, webhookUrls: { get: process.env.N8N_TEST_WEBHOOK_GET_URL || '', post: process.env.N8N_TEST_WEBHOOK_POST_URL || '', put: process.env.N8N_TEST_WEBHOOK_PUT_URL || '', delete: process.env.N8N_TEST_WEBHOOK_DELETE_URL || '' }, cleanup: { enabled: process.env.N8N_TEST_CLEANUP_ENABLED !== 'false', tag: process.env.N8N_TEST_TAG || 'mcp-integration-test', namePrefix: process.env.N8N_TEST_NAME_PREFIX || '[MCP-TEST]' } }; } } /** * Validate that required credentials are present * * @param creds - Credentials to validate * @throws Error if required credentials are missing */ export function validateCredentials(creds: N8nTestCredentials): void { const missing: string[] = []; if (!creds.url) { missing.push(process.env.CI ? 'N8N_URL' : 'N8N_API_URL'); } if (!creds.apiKey) { missing.push('N8N_API_KEY'); } if (missing.length > 0) { throw new Error( `Missing required n8n credentials: ${missing.join(', ')}\n\n` + `Please set the following environment variables:\n` + missing.map(v => ` ${v}`).join('\n') + '\n\n' + `See .env.example for configuration details.` ); } } /** * Validate that webhook URLs are configured * * @param creds - Credentials to validate * @throws Error with setup instructions if webhook URLs are missing */ export function validateWebhookUrls(creds: N8nTestCredentials): void { const missing: string[] = []; if (!creds.webhookUrls.get) missing.push('GET'); if (!creds.webhookUrls.post) missing.push('POST'); if (!creds.webhookUrls.put) missing.push('PUT'); if (!creds.webhookUrls.delete) missing.push('DELETE'); if (missing.length > 0) { const envVars = missing.map(m => `N8N_TEST_WEBHOOK_${m}_URL`); throw new Error( `Missing webhook URLs for HTTP methods: ${missing.join(', ')}\n\n` + `Webhook testing requires pre-activated workflows in n8n.\n` + `n8n API doesn't support workflow activation, so these must be created manually.\n\n` + `Setup Instructions:\n` + `1. Create ${missing.length} workflow(s) in your n8n instance\n` + `2. Each workflow should have a single Webhook node\n` + `3. Configure webhook paths:\n` + missing.map(m => ` - ${m}: mcp-test-${m.toLowerCase()}`).join('\n') + '\n' + `4. ACTIVATE each workflow in n8n UI\n` + `5. Set the following environment variables with full webhook URLs:\n` + envVars.map(v => ` ${v}=<full-webhook-url>`).join('\n') + '\n\n' + `Example: N8N_TEST_WEBHOOK_GET_URL=https://n8n-test.n8n-mcp.com/webhook/mcp-test-get\n\n` + `See docs/local/integration-testing-plan.md for detailed instructions.` ); } } /** * Check if credentials are configured (non-throwing version) * * @returns true if basic credentials are available */ export function hasCredentials(): boolean { try { const creds = getN8nCredentials(); return !!(creds.url && creds.apiKey); } catch { return false; } } /** * Check if webhook URLs are configured (non-throwing version) * * @returns true if all webhook URLs are available */ export function hasWebhookUrls(): boolean { try { const creds = getN8nCredentials(); return !!( creds.webhookUrls.get && creds.webhookUrls.post && creds.webhookUrls.put && creds.webhookUrls.delete ); } catch { return false; } } ``` -------------------------------------------------------------------------------- /src/utils/npm-version-checker.ts: -------------------------------------------------------------------------------- ```typescript /** * NPM Version Checker Utility * * Checks if the current n8n-mcp version is outdated by comparing * against the latest version published on npm. */ import { logger } from './logger'; /** * NPM Registry Response structure * Based on npm registry JSON format for package metadata */ interface NpmRegistryResponse { version: string; [key: string]: unknown; } export interface VersionCheckResult { currentVersion: string; latestVersion: string | null; isOutdated: boolean; updateAvailable: boolean; error: string | null; checkedAt: Date; updateCommand?: string; } // Cache for version check to avoid excessive npm requests let versionCheckCache: VersionCheckResult | null = null; let lastCheckTime: number = 0; const CACHE_TTL_MS = 1 * 60 * 60 * 1000; // 1 hour cache /** * Check if current version is outdated compared to npm registry * Uses caching to avoid excessive npm API calls * * @param forceRefresh - Force a fresh check, bypassing cache * @returns Version check result */ export async function checkNpmVersion(forceRefresh: boolean = false): Promise<VersionCheckResult> { const now = Date.now(); // Return cached result if available and not expired if (!forceRefresh && versionCheckCache && (now - lastCheckTime) < CACHE_TTL_MS) { logger.debug('Returning cached npm version check result'); return versionCheckCache; } // Get current version from package.json const packageJson = require('../../package.json'); const currentVersion = packageJson.version; try { // Fetch latest version from npm registry const response = await fetch('https://registry.npmjs.org/n8n-mcp/latest', { headers: { 'Accept': 'application/json', }, signal: AbortSignal.timeout(5000) // 5 second timeout }); if (!response.ok) { logger.warn('Failed to fetch npm version info', { status: response.status, statusText: response.statusText }); const result: VersionCheckResult = { currentVersion, latestVersion: null, isOutdated: false, updateAvailable: false, error: `npm registry returned ${response.status}`, checkedAt: new Date() }; versionCheckCache = result; lastCheckTime = now; return result; } // Parse and validate JSON response let data: unknown; try { data = await response.json(); } catch (error) { throw new Error('Failed to parse npm registry response as JSON'); } // Validate response structure if (!data || typeof data !== 'object' || !('version' in data)) { throw new Error('Invalid response format from npm registry'); } const registryData = data as NpmRegistryResponse; const latestVersion = registryData.version; // Validate version format (semver: x.y.z or x.y.z-prerelease) if (!latestVersion || !/^\d+\.\d+\.\d+/.test(latestVersion)) { throw new Error(`Invalid version format from npm registry: ${latestVersion}`); } // Compare versions const isOutdated = compareVersions(currentVersion, latestVersion) < 0; const result: VersionCheckResult = { currentVersion, latestVersion, isOutdated, updateAvailable: isOutdated, error: null, checkedAt: new Date(), updateCommand: isOutdated ? `npm install -g n8n-mcp@${latestVersion}` : undefined }; // Cache the result versionCheckCache = result; lastCheckTime = now; logger.debug('npm version check completed', { current: currentVersion, latest: latestVersion, outdated: isOutdated }); return result; } catch (error) { logger.warn('Error checking npm version', { error: error instanceof Error ? error.message : String(error) }); const result: VersionCheckResult = { currentVersion, latestVersion: null, isOutdated: false, updateAvailable: false, error: error instanceof Error ? error.message : 'Unknown error', checkedAt: new Date() }; // Cache error result to avoid rapid retry versionCheckCache = result; lastCheckTime = now; return result; } } /** * Compare two semantic version strings * Returns: -1 if v1 < v2, 0 if v1 === v2, 1 if v1 > v2 * * @param v1 - First version (e.g., "1.2.3") * @param v2 - Second version (e.g., "1.3.0") * @returns Comparison result */ export function compareVersions(v1: string, v2: string): number { // Remove 'v' prefix if present const clean1 = v1.replace(/^v/, ''); const clean2 = v2.replace(/^v/, ''); // Split into parts and convert to numbers const parts1 = clean1.split('.').map(n => parseInt(n, 10) || 0); const parts2 = clean2.split('.').map(n => parseInt(n, 10) || 0); // Compare each part for (let i = 0; i < Math.max(parts1.length, parts2.length); i++) { const p1 = parts1[i] || 0; const p2 = parts2[i] || 0; if (p1 < p2) return -1; if (p1 > p2) return 1; } return 0; // Versions are equal } /** * Clear the version check cache (useful for testing) */ export function clearVersionCheckCache(): void { versionCheckCache = null; lastCheckTime = 0; } /** * Format version check result as a user-friendly message * * @param result - Version check result * @returns Formatted message */ export function formatVersionMessage(result: VersionCheckResult): string { if (result.error) { return `Version check failed: ${result.error}. Current version: ${result.currentVersion}`; } if (!result.latestVersion) { return `Current version: ${result.currentVersion} (latest version unknown)`; } if (result.isOutdated) { return `⚠️ Update available! Current: ${result.currentVersion} → Latest: ${result.latestVersion}`; } return `✓ You're up to date! Current version: ${result.currentVersion}`; } ``` -------------------------------------------------------------------------------- /scripts/test-ai-validation-debug.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env node /** * Debug test for AI validation issues * Reproduces the bugs found by n8n-mcp-tester */ import { validateAISpecificNodes, buildReverseConnectionMap } from '../src/services/ai-node-validator'; import type { WorkflowJson } from '../src/services/ai-tool-validators'; import { NodeTypeNormalizer } from '../src/utils/node-type-normalizer'; console.log('=== AI Validation Debug Tests ===\n'); // Test 1: AI Agent with NO language model connection console.log('Test 1: Missing Language Model Detection'); const workflow1: WorkflowJson = { name: 'Test Missing LM', nodes: [ { id: 'ai-agent-1', name: 'AI Agent', type: '@n8n/n8n-nodes-langchain.agent', position: [500, 300], parameters: { promptType: 'define', text: 'You are a helpful assistant' }, typeVersion: 1.7 } ], connections: { // NO connections - AI Agent is isolated } }; console.log('Workflow:', JSON.stringify(workflow1, null, 2)); const reverseMap1 = buildReverseConnectionMap(workflow1); console.log('\nReverse connection map for AI Agent:'); console.log('Entries:', Array.from(reverseMap1.entries())); console.log('AI Agent connections:', reverseMap1.get('AI Agent')); // Check node normalization const normalizedType1 = NodeTypeNormalizer.normalizeToFullForm(workflow1.nodes[0].type); console.log(`\nNode type: ${workflow1.nodes[0].type}`); console.log(`Normalized type: ${normalizedType1}`); console.log(`Match check: ${normalizedType1 === '@n8n/n8n-nodes-langchain.agent'}`); const issues1 = validateAISpecificNodes(workflow1); console.log('\nValidation issues:'); console.log(JSON.stringify(issues1, null, 2)); const hasMissingLMError = issues1.some( i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL' ); console.log(`\n✓ Has MISSING_LANGUAGE_MODEL error: ${hasMissingLMError}`); console.log(`✗ Expected: true, Got: ${hasMissingLMError}`); // Test 2: AI Agent WITH language model connection console.log('\n\n' + '='.repeat(60)); console.log('Test 2: AI Agent WITH Language Model (Should be valid)'); const workflow2: WorkflowJson = { name: 'Test With LM', nodes: [ { id: 'openai-1', name: 'OpenAI Chat Model', type: '@n8n/n8n-nodes-langchain.lmChatOpenAi', position: [200, 300], parameters: { modelName: 'gpt-4' }, typeVersion: 1 }, { id: 'ai-agent-1', name: 'AI Agent', type: '@n8n/n8n-nodes-langchain.agent', position: [500, 300], parameters: { promptType: 'define', text: 'You are a helpful assistant' }, typeVersion: 1.7 } ], connections: { 'OpenAI Chat Model': { ai_languageModel: [ [ { node: 'AI Agent', type: 'ai_languageModel', index: 0 } ] ] } } }; console.log('\nConnections:', JSON.stringify(workflow2.connections, null, 2)); const reverseMap2 = buildReverseConnectionMap(workflow2); console.log('\nReverse connection map for AI Agent:'); console.log('AI Agent connections:', reverseMap2.get('AI Agent')); const issues2 = validateAISpecificNodes(workflow2); console.log('\nValidation issues:'); console.log(JSON.stringify(issues2, null, 2)); const hasMissingLMError2 = issues2.some( i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL' ); console.log(`\n✓ Should NOT have MISSING_LANGUAGE_MODEL error: ${!hasMissingLMError2}`); console.log(`Expected: false, Got: ${hasMissingLMError2}`); // Test 3: AI Agent with tools but no language model console.log('\n\n' + '='.repeat(60)); console.log('Test 3: AI Agent with Tools but NO Language Model'); const workflow3: WorkflowJson = { name: 'Test Tools No LM', nodes: [ { id: 'http-tool-1', name: 'HTTP Request Tool', type: '@n8n/n8n-nodes-langchain.toolHttpRequest', position: [200, 300], parameters: { toolDescription: 'Calls an API', url: 'https://api.example.com' }, typeVersion: 1.1 }, { id: 'ai-agent-1', name: 'AI Agent', type: '@n8n/n8n-nodes-langchain.agent', position: [500, 300], parameters: { promptType: 'define', text: 'You are a helpful assistant' }, typeVersion: 1.7 } ], connections: { 'HTTP Request Tool': { ai_tool: [ [ { node: 'AI Agent', type: 'ai_tool', index: 0 } ] ] } } }; console.log('\nConnections:', JSON.stringify(workflow3.connections, null, 2)); const reverseMap3 = buildReverseConnectionMap(workflow3); console.log('\nReverse connection map for AI Agent:'); const aiAgentConns = reverseMap3.get('AI Agent'); console.log('AI Agent connections:', aiAgentConns); console.log('Connection types:', aiAgentConns?.map(c => c.type)); const issues3 = validateAISpecificNodes(workflow3); console.log('\nValidation issues:'); console.log(JSON.stringify(issues3, null, 2)); const hasMissingLMError3 = issues3.some( i => i.severity === 'error' && i.code === 'MISSING_LANGUAGE_MODEL' ); const hasNoToolsInfo3 = issues3.some( i => i.severity === 'info' && i.message.includes('no ai_tool connections') ); console.log(`\n✓ Should have MISSING_LANGUAGE_MODEL error: ${hasMissingLMError3}`); console.log(`Expected: true, Got: ${hasMissingLMError3}`); console.log(`✗ Should NOT have "no tools" info: ${!hasNoToolsInfo3}`); console.log(`Expected: false, Got: ${hasNoToolsInfo3}`); console.log('\n' + '='.repeat(60)); console.log('Summary:'); console.log(`Test 1 (No LM): ${hasMissingLMError ? 'PASS ✓' : 'FAIL ✗'}`); console.log(`Test 2 (With LM): ${!hasMissingLMError2 ? 'PASS ✓' : 'FAIL ✗'}`); console.log(`Test 3 (Tools, No LM): ${hasMissingLMError3 && !hasNoToolsInfo3 ? 'PASS ✓' : 'FAIL ✗'}`); ``` -------------------------------------------------------------------------------- /scripts/update-and-publish-prep.sh: -------------------------------------------------------------------------------- ```bash #!/bin/bash # Comprehensive script to update n8n dependencies, run tests, and prepare for npm publish # Based on MEMORY_N8N_UPDATE.md but enhanced with test suite and publish preparation set -e # Color codes for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color echo -e "${BLUE}🚀 n8n Update and Publish Preparation Script${NC}" echo "==============================================" echo "" # 1. Check current branch CURRENT_BRANCH=$(git branch --show-current) if [ "$CURRENT_BRANCH" != "main" ]; then echo -e "${YELLOW}⚠️ Warning: Not on main branch (current: $CURRENT_BRANCH)${NC}" echo "It's recommended to run this on the main branch." read -p "Continue anyway? (y/N) " -n 1 -r echo if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1 fi fi # 2. Check for uncommitted changes if ! git diff-index --quiet HEAD --; then echo -e "${RED}❌ Error: You have uncommitted changes${NC}" echo "Please commit or stash your changes before updating." exit 1 fi # 3. Get current versions for comparison echo -e "${BLUE}📊 Current versions:${NC}" CURRENT_N8N=$(node -e "console.log(require('./package.json').dependencies['n8n'])" 2>/dev/null || echo "not installed") CURRENT_PROJECT=$(node -e "console.log(require('./package.json').version)") echo "- n8n: $CURRENT_N8N" echo "- n8n-mcp: $CURRENT_PROJECT" echo "" # 4. Check for updates first echo -e "${BLUE}🔍 Checking for n8n updates...${NC}" npm run update:n8n:check echo "" read -p "Do you want to proceed with the update? (y/N) " -n 1 -r echo if [[ ! $REPLY =~ ^[Yy]$ ]]; then echo "Update cancelled." exit 0 fi # 5. Update n8n dependencies echo "" echo -e "${BLUE}📦 Updating n8n dependencies...${NC}" npm run update:n8n # 6. Run the test suite echo "" echo -e "${BLUE}🧪 Running comprehensive test suite (1,182 tests)...${NC}" npm test if [ $? -ne 0 ]; then echo -e "${RED}❌ Tests failed! Please fix failing tests before proceeding.${NC}" exit 1 fi echo -e "${GREEN}✅ All tests passed!${NC}" # 7. Run validation echo "" echo -e "${BLUE}✔️ Validating critical nodes...${NC}" npm run validate # 8. Build the project echo "" echo -e "${BLUE}🔨 Building project...${NC}" npm run build # 9. Bump version echo "" echo -e "${BLUE}📌 Bumping version...${NC}" # Get new n8n version NEW_N8N=$(node -e "console.log(require('./package.json').dependencies['n8n'])") # Bump patch version npm version patch --no-git-tag-version # Get new project version NEW_PROJECT=$(node -e "console.log(require('./package.json').version)") # 10. Update n8n version badge in README echo "" echo -e "${BLUE}📝 Updating n8n version badge...${NC}" sed -i.bak "s/n8n-v[0-9.]*/n8n-$NEW_N8N/" README.md && rm README.md.bak # 11. Sync runtime version (this also updates the version badge in README) echo "" echo -e "${BLUE}🔄 Syncing runtime version and updating version badge...${NC}" npm run sync:runtime-version # 12. Get update details for commit message echo "" echo -e "${BLUE}📊 Gathering update information...${NC}" # Get all n8n package versions N8N_CORE=$(node -e "console.log(require('./package.json').dependencies['n8n-core'])") N8N_WORKFLOW=$(node -e "console.log(require('./package.json').dependencies['n8n-workflow'])") N8N_LANGCHAIN=$(node -e "console.log(require('./package.json').dependencies['@n8n/n8n-nodes-langchain'])") # Get node count from database NODE_COUNT=$(node -e " const Database = require('better-sqlite3'); const db = new Database('./data/nodes.db', { readonly: true }); const count = db.prepare('SELECT COUNT(*) as count FROM nodes').get().count; console.log(count); db.close(); " 2>/dev/null || echo "unknown") # Check if templates were sanitized TEMPLATES_SANITIZED=false if [ -f "./data/nodes.db" ]; then TEMPLATE_COUNT=$(node -e " const Database = require('better-sqlite3'); const db = new Database('./data/nodes.db', { readonly: true }); const count = db.prepare('SELECT COUNT(*) as count FROM templates').get().count; console.log(count); db.close(); " 2>/dev/null || echo "0") if [ "$TEMPLATE_COUNT" != "0" ]; then TEMPLATES_SANITIZED=true fi fi # 13. Create commit message echo "" echo -e "${BLUE}📝 Creating commit...${NC}" COMMIT_MSG="chore: update n8n to $NEW_N8N and bump version to $NEW_PROJECT - Updated n8n to $NEW_N8N - Updated n8n-core to $N8N_CORE - Updated n8n-workflow to $N8N_WORKFLOW - Updated @n8n/n8n-nodes-langchain to $N8N_LANGCHAIN - Rebuilt node database with $NODE_COUNT nodes" if [ "$TEMPLATES_SANITIZED" = true ]; then COMMIT_MSG="$COMMIT_MSG - Sanitized $TEMPLATE_COUNT workflow templates" fi COMMIT_MSG="$COMMIT_MSG - All 1,182 tests passing (933 unit, 249 integration) - All validation tests passing - Built and prepared for npm publish 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <[email protected]>" # 14. Stage all changes git add -A # 15. Show what will be committed echo "" echo -e "${BLUE}📋 Changes to be committed:${NC}" git status --short # 16. Commit changes git commit -m "$COMMIT_MSG" # 17. Summary echo "" echo -e "${GREEN}✅ Update completed successfully!${NC}" echo "" echo -e "${BLUE}Summary:${NC}" echo "- Updated n8n from $CURRENT_N8N to $NEW_N8N" echo "- Bumped version from $CURRENT_PROJECT to $NEW_PROJECT" echo "- All 1,182 tests passed" echo "- Project built and ready for npm publish" echo "" echo -e "${YELLOW}Next steps:${NC}" echo "1. Push to GitHub:" echo -e " ${GREEN}git push origin $CURRENT_BRANCH${NC}" echo "" echo "2. Create a GitHub release (after push):" echo -e " ${GREEN}gh release create v$NEW_PROJECT --title \"v$NEW_PROJECT\" --notes \"Updated n8n to $NEW_N8N\"${NC}" echo "" echo "3. Publish to npm:" echo -e " ${GREEN}npm run prepare:publish${NC}" echo " Then follow the instructions to publish with OTP" echo "" echo -e "${BLUE}🎉 Done!${NC}" ``` -------------------------------------------------------------------------------- /scripts/test-node-type-validation.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env tsx /** * Test script for node type validation * Tests the improvements to catch invalid node types like "nodes-base.webhook" */ import { WorkflowValidator } from '../src/services/workflow-validator'; import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator'; import { NodeRepository } from '../src/database/node-repository'; import { createDatabaseAdapter } from '../src/database/database-adapter'; import { validateWorkflowStructure } from '../src/services/n8n-validation'; import { Logger } from '../src/utils/logger'; const logger = new Logger({ prefix: '[TestNodeTypeValidation]' }); async function testValidation() { const adapter = await createDatabaseAdapter('./data/nodes.db'); const repository = new NodeRepository(adapter); const validator = new WorkflowValidator(repository, EnhancedConfigValidator); logger.info('Testing node type validation...\n'); // Test 1: The exact broken workflow from Claude Desktop const brokenWorkflowFromLogs = { "nodes": [ { "parameters": {}, "id": "webhook_node", "name": "Webhook", "type": "nodes-base.webhook", // WRONG! Missing n8n- prefix "typeVersion": 2, "position": [260, 300] as [number, number] } ], "connections": {}, "pinData": {}, "meta": { "instanceId": "74e11c77e266f2c77f6408eb6c88e3fec63c9a5d8c4a3a2ea4c135c542012d6b" } }; logger.info('Test 1: Invalid node type "nodes-base.webhook" (missing n8n- prefix)'); const result1 = await validator.validateWorkflow(brokenWorkflowFromLogs as any); logger.info('Validation result:'); logger.info(`Valid: ${result1.valid}`); logger.info(`Errors: ${result1.errors.length}`); result1.errors.forEach(err => { if (typeof err === 'string') { logger.error(` - ${err}`); } else if (err && typeof err === 'object' && 'message' in err) { logger.error(` - ${err.message}`); } }); // Check if the specific error about nodes-base.webhook was caught const hasNodeBaseError = result1.errors.some(err => err && typeof err === 'object' && 'message' in err && err.message.includes('nodes-base.webhook') && err.message.includes('n8n-nodes-base.webhook') ); logger.info(`Caught nodes-base.webhook error: ${hasNodeBaseError ? 'YES ✅' : 'NO ❌'}`); // Test 2: Node type without any prefix const noPrefixWorkflow = { "name": "Test Workflow", "nodes": [ { "id": "webhook-1", "name": "My Webhook", "type": "webhook", // WRONG! No package prefix "typeVersion": 2, "position": [250, 300] as [number, number], "parameters": {} }, { "id": "set-1", "name": "Set Data", "type": "set", // WRONG! No package prefix "typeVersion": 3.4, "position": [450, 300] as [number, number], "parameters": {} } ], "connections": { "My Webhook": { "main": [[{ "node": "Set Data", "type": "main", "index": 0 }]] } } }; logger.info('\nTest 2: Node types without package prefix ("webhook", "set")'); const result2 = await validator.validateWorkflow(noPrefixWorkflow as any); logger.info('Validation result:'); logger.info(`Valid: ${result2.valid}`); logger.info(`Errors: ${result2.errors.length}`); result2.errors.forEach(err => { if (typeof err === 'string') { logger.error(` - ${err}`); } else if (err && typeof err === 'object' && 'message' in err) { logger.error(` - ${err.message}`); } }); // Test 3: Completely invalid node type const invalidNodeWorkflow = { "name": "Test Workflow", "nodes": [ { "id": "fake-1", "name": "Fake Node", "type": "n8n-nodes-base.fakeNodeThatDoesNotExist", "typeVersion": 1, "position": [250, 300] as [number, number], "parameters": {} } ], "connections": {} }; logger.info('\nTest 3: Completely invalid node type'); const result3 = await validator.validateWorkflow(invalidNodeWorkflow as any); logger.info('Validation result:'); logger.info(`Valid: ${result3.valid}`); logger.info(`Errors: ${result3.errors.length}`); result3.errors.forEach(err => { if (typeof err === 'string') { logger.error(` - ${err}`); } else if (err && typeof err === 'object' && 'message' in err) { logger.error(` - ${err.message}`); } }); // Test 4: Using n8n-validation.ts function logger.info('\nTest 4: Testing n8n-validation.ts with invalid node types'); const errors = validateWorkflowStructure(brokenWorkflowFromLogs as any); logger.info('Validation errors:'); errors.forEach(err => logger.error(` - ${err}`)); // Test 5: Valid workflow (should pass) const validWorkflow = { "name": "Valid Webhook Workflow", "nodes": [ { "id": "webhook-1", "name": "Webhook", "type": "n8n-nodes-base.webhook", // CORRECT! "typeVersion": 2, "position": [250, 300] as [number, number], "parameters": { "path": "my-webhook", "responseMode": "onReceived", "responseData": "allEntries" } } ], "connections": {} }; logger.info('\nTest 5: Valid workflow with correct node type'); const result5 = await validator.validateWorkflow(validWorkflow as any); logger.info('Validation result:'); logger.info(`Valid: ${result5.valid}`); logger.info(`Errors: ${result5.errors.length}`); logger.info(`Warnings: ${result5.warnings.length}`); result5.warnings.forEach(warn => { if (warn && typeof warn === 'object' && 'message' in warn) { logger.warn(` - ${warn.message}`); } }); adapter.close(); } testValidation().catch(err => { logger.error('Test failed:', err); process.exit(1); }); ``` -------------------------------------------------------------------------------- /scripts/test-code-node-enhancements.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env npx tsx /** * Test script for Code node enhancements * Tests: * 1. Code node documentation in tools_documentation * 2. Enhanced validation for Code nodes * 3. Code node examples * 4. Code node task templates */ import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator.js'; import { ExampleGenerator } from '../src/services/example-generator.js'; import { TaskTemplates } from '../src/services/task-templates.js'; import { getToolDocumentation } from '../src/mcp/tools-documentation.js'; console.log('🧪 Testing Code Node Enhancements\n'); // Test 1: Code node documentation console.log('1️⃣ Testing Code Node Documentation'); console.log('====================================='); const codeNodeDocs = getToolDocumentation('code_node_guide', 'essentials'); console.log('✅ Code node documentation available'); console.log('First 500 chars:', codeNodeDocs.substring(0, 500) + '...\n'); // Test 2: Code node validation console.log('2️⃣ Testing Code Node Validation'); console.log('====================================='); // Test cases const validationTests = [ { name: 'Empty code', config: { language: 'javaScript', jsCode: '' } }, { name: 'No return statement', config: { language: 'javaScript', jsCode: 'const data = items;' } }, { name: 'Invalid return format', config: { language: 'javaScript', jsCode: 'return "hello";' } }, { name: 'Valid code', config: { language: 'javaScript', jsCode: 'return [{json: {result: "success"}}];' } }, { name: 'Python with external library', config: { language: 'python', pythonCode: 'import pandas as pd\nreturn [{"json": {"result": "fail"}}]' } }, { name: 'Code with $json in wrong mode', config: { language: 'javaScript', jsCode: 'const value = $json.field;\nreturn [{json: {value}}];' } }, { name: 'Code with security issue', config: { language: 'javaScript', jsCode: 'const result = eval(item.json.code);\nreturn [{json: {result}}];' } } ]; for (const test of validationTests) { console.log(`\nTest: ${test.name}`); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.code', test.config, [ { name: 'language', type: 'options', options: ['javaScript', 'python'] }, { name: 'jsCode', type: 'string' }, { name: 'pythonCode', type: 'string' }, { name: 'mode', type: 'options', options: ['runOnceForAllItems', 'runOnceForEachItem'] } ], 'operation', 'ai-friendly' ); console.log(` Valid: ${result.valid}`); if (result.errors.length > 0) { console.log(` Errors: ${result.errors.map(e => e.message).join(', ')}`); } if (result.warnings.length > 0) { console.log(` Warnings: ${result.warnings.map(w => w.message).join(', ')}`); } if (result.suggestions.length > 0) { console.log(` Suggestions: ${result.suggestions.join(', ')}`); } } // Test 3: Code node examples console.log('\n\n3️⃣ Testing Code Node Examples'); console.log('====================================='); const codeExamples = ExampleGenerator.getExamples('nodes-base.code'); console.log('Available examples:', Object.keys(codeExamples)); console.log('\nMinimal example:'); console.log(JSON.stringify(codeExamples.minimal, null, 2)); console.log('\nCommon example preview:'); console.log(codeExamples.common?.jsCode?.substring(0, 200) + '...'); // Test 4: Code node task templates console.log('\n\n4️⃣ Testing Code Node Task Templates'); console.log('====================================='); const codeNodeTasks = [ 'transform_data', 'custom_ai_tool', 'aggregate_data', 'batch_process_with_api', 'error_safe_transform', 'async_data_processing', 'python_data_analysis' ]; for (const taskName of codeNodeTasks) { const template = TaskTemplates.getTemplate(taskName); if (template) { console.log(`\n✅ ${taskName}:`); console.log(` Description: ${template.description}`); console.log(` Language: ${template.configuration.language || 'javaScript'}`); console.log(` Code preview: ${template.configuration.jsCode?.substring(0, 100) || template.configuration.pythonCode?.substring(0, 100)}...`); } else { console.log(`\n❌ ${taskName}: Template not found`); } } // Test 5: Validate a complex Code node configuration console.log('\n\n5️⃣ Testing Complex Code Node Validation'); console.log('=========================================='); const complexCode = { language: 'javaScript', mode: 'runOnceForEachItem', jsCode: `// Complex validation test try { const email = $json.email; const response = await $helpers.httpRequest({ method: 'POST', url: 'https://api.example.com/validate', body: { email } }); return [{ json: { ...response, validated: true } }]; } catch (error) { return [{ json: { error: error.message, validated: false } }]; }`, onError: 'continueRegularOutput', retryOnFail: true, maxTries: 3 }; const complexResult = EnhancedConfigValidator.validateWithMode( 'nodes-base.code', complexCode, [ { name: 'language', type: 'options', options: ['javaScript', 'python'] }, { name: 'jsCode', type: 'string' }, { name: 'mode', type: 'options', options: ['runOnceForAllItems', 'runOnceForEachItem'] }, { name: 'onError', type: 'options' }, { name: 'retryOnFail', type: 'boolean' }, { name: 'maxTries', type: 'number' } ], 'operation', 'strict' ); console.log('Complex code validation:'); console.log(` Valid: ${complexResult.valid}`); console.log(` Errors: ${complexResult.errors.length}`); console.log(` Warnings: ${complexResult.warnings.length}`); console.log(` Suggestions: ${complexResult.suggestions.length}`); console.log('\n✅ All Code node enhancement tests completed!'); ``` -------------------------------------------------------------------------------- /tests/unit/database/database-adapter-unit.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect, vi } from 'vitest'; // Mock logger vi.mock('../../../src/utils/logger', () => ({ logger: { info: vi.fn(), warn: vi.fn(), error: vi.fn(), debug: vi.fn() } })); describe('Database Adapter - Unit Tests', () => { describe('DatabaseAdapter Interface', () => { it('should define interface when adapter is created', () => { // This is a type test - ensuring the interface is correctly defined type DatabaseAdapter = { prepare: (sql: string) => any; exec: (sql: string) => void; close: () => void; pragma: (key: string, value?: any) => any; readonly inTransaction: boolean; transaction: <T>(fn: () => T) => T; checkFTS5Support: () => boolean; }; // Type assertion to ensure interface matches const mockAdapter: DatabaseAdapter = { prepare: vi.fn(), exec: vi.fn(), close: vi.fn(), pragma: vi.fn(), inTransaction: false, transaction: vi.fn((fn) => fn()), checkFTS5Support: vi.fn(() => true) }; expect(mockAdapter).toBeDefined(); expect(mockAdapter.prepare).toBeDefined(); expect(mockAdapter.exec).toBeDefined(); expect(mockAdapter.close).toBeDefined(); expect(mockAdapter.pragma).toBeDefined(); expect(mockAdapter.transaction).toBeDefined(); expect(mockAdapter.checkFTS5Support).toBeDefined(); }); }); describe('PreparedStatement Interface', () => { it('should define interface when statement is prepared', () => { // Type test for PreparedStatement type PreparedStatement = { run: (...params: any[]) => { changes: number; lastInsertRowid: number | bigint }; get: (...params: any[]) => any; all: (...params: any[]) => any[]; iterate: (...params: any[]) => IterableIterator<any>; pluck: (toggle?: boolean) => PreparedStatement; expand: (toggle?: boolean) => PreparedStatement; raw: (toggle?: boolean) => PreparedStatement; columns: () => any[]; bind: (...params: any[]) => PreparedStatement; }; const mockStmt: PreparedStatement = { run: vi.fn(() => ({ changes: 1, lastInsertRowid: 1 })), get: vi.fn(), all: vi.fn(() => []), iterate: vi.fn(function* () {}), pluck: vi.fn(function(this: any) { return this; }), expand: vi.fn(function(this: any) { return this; }), raw: vi.fn(function(this: any) { return this; }), columns: vi.fn(() => []), bind: vi.fn(function(this: any) { return this; }) }; expect(mockStmt).toBeDefined(); expect(mockStmt.run).toBeDefined(); expect(mockStmt.get).toBeDefined(); expect(mockStmt.all).toBeDefined(); expect(mockStmt.iterate).toBeDefined(); expect(mockStmt.pluck).toBeDefined(); expect(mockStmt.expand).toBeDefined(); expect(mockStmt.raw).toBeDefined(); expect(mockStmt.columns).toBeDefined(); expect(mockStmt.bind).toBeDefined(); }); }); describe('FTS5 Support Detection', () => { it('should detect support when FTS5 module is available', () => { const mockDb = { exec: vi.fn() }; // Function to test FTS5 support detection logic const checkFTS5Support = (db: any): boolean => { try { db.exec("CREATE VIRTUAL TABLE IF NOT EXISTS test_fts5 USING fts5(content);"); db.exec("DROP TABLE IF EXISTS test_fts5;"); return true; } catch (error) { return false; } }; // Test when FTS5 is supported expect(checkFTS5Support(mockDb)).toBe(true); expect(mockDb.exec).toHaveBeenCalledWith( "CREATE VIRTUAL TABLE IF NOT EXISTS test_fts5 USING fts5(content);" ); // Test when FTS5 is not supported mockDb.exec.mockImplementation(() => { throw new Error('no such module: fts5'); }); expect(checkFTS5Support(mockDb)).toBe(false); }); }); describe('Transaction Handling', () => { it('should handle commit and rollback when transaction is executed', () => { // Test transaction wrapper logic const mockDb = { exec: vi.fn(), inTransaction: false }; const transaction = <T>(db: any, fn: () => T): T => { try { db.exec('BEGIN'); db.inTransaction = true; const result = fn(); db.exec('COMMIT'); db.inTransaction = false; return result; } catch (error) { db.exec('ROLLBACK'); db.inTransaction = false; throw error; } }; // Test successful transaction const result = transaction(mockDb, () => 'success'); expect(result).toBe('success'); expect(mockDb.exec).toHaveBeenCalledWith('BEGIN'); expect(mockDb.exec).toHaveBeenCalledWith('COMMIT'); expect(mockDb.inTransaction).toBe(false); // Reset mocks mockDb.exec.mockClear(); // Test failed transaction expect(() => { transaction(mockDb, () => { throw new Error('transaction error'); }); }).toThrow('transaction error'); expect(mockDb.exec).toHaveBeenCalledWith('BEGIN'); expect(mockDb.exec).toHaveBeenCalledWith('ROLLBACK'); expect(mockDb.inTransaction).toBe(false); }); }); describe('Pragma Handling', () => { it('should return values when pragma commands are executed', () => { const mockDb = { pragma: vi.fn((key: string, value?: any) => { if (key === 'journal_mode' && value === 'WAL') { return 'wal'; } return null; }) }; expect(mockDb.pragma('journal_mode', 'WAL')).toBe('wal'); expect(mockDb.pragma('other_key')).toBe(null); }); }); }); ``` -------------------------------------------------------------------------------- /.github/workflows/benchmark-pr.yml: -------------------------------------------------------------------------------- ```yaml name: Benchmark PR Comparison on: pull_request: branches: [main] paths-ignore: - '**.md' - '**.txt' - 'docs/**' - 'examples/**' - '.github/FUNDING.yml' - '.github/ISSUE_TEMPLATE/**' - '.github/pull_request_template.md' - '.gitignore' - 'LICENSE*' - 'ATTRIBUTION.md' - 'SECURITY.md' - 'CODE_OF_CONDUCT.md' permissions: pull-requests: write contents: read statuses: write jobs: benchmark-comparison: runs-on: ubuntu-latest steps: - name: Checkout PR branch uses: actions/checkout@v4 with: fetch-depth: 0 - name: Setup Node.js uses: actions/setup-node@v4 with: node-version: 20 cache: 'npm' - name: Install dependencies run: npm ci # Run benchmarks on current branch - name: Run current benchmarks run: npm run benchmark:ci - name: Save current results run: cp benchmark-results.json benchmark-current.json # Checkout and run benchmarks on base branch - name: Checkout base branch run: | git checkout ${{ github.event.pull_request.base.sha }} git status - name: Install base dependencies run: npm ci - name: Run baseline benchmarks run: npm run benchmark:ci continue-on-error: true - name: Save baseline results run: | if [ -f benchmark-results.json ]; then cp benchmark-results.json benchmark-baseline.json else echo '{"files":[]}' > benchmark-baseline.json fi # Compare results - name: Checkout PR branch again run: git checkout ${{ github.event.pull_request.head.sha }} - name: Compare benchmarks id: compare run: | node scripts/compare-benchmarks.js benchmark-current.json benchmark-baseline.json || echo "REGRESSION=true" >> $GITHUB_OUTPUT # Upload comparison artifacts - name: Upload benchmark comparison if: always() uses: actions/upload-artifact@v4 with: name: benchmark-comparison-${{ github.run_number }} path: | benchmark-current.json benchmark-baseline.json benchmark-comparison.json benchmark-comparison.md retention-days: 30 # Post comparison to PR - name: Post benchmark comparison to PR if: always() uses: actions/github-script@v7 continue-on-error: true with: script: | try { const fs = require('fs'); let comment = '## ⚡ Benchmark Comparison\n\n'; try { if (fs.existsSync('benchmark-comparison.md')) { const comparison = fs.readFileSync('benchmark-comparison.md', 'utf8'); comment += comparison; } else { comment += 'Benchmark comparison could not be generated.'; } } catch (error) { comment += `Error reading benchmark comparison: ${error.message}`; } comment += '\n\n---\n'; comment += `*[View full benchmark results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})*`; // Find existing comment const { data: comments } = await github.rest.issues.listComments({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number, }); const botComment = comments.find(comment => comment.user.type === 'Bot' && comment.body.includes('## ⚡ Benchmark Comparison') ); if (botComment) { await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, comment_id: botComment.id, body: comment }); } else { await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number, body: comment }); } } catch (error) { console.error('Failed to create/update PR comment:', error.message); console.log('This is likely due to insufficient permissions for external PRs.'); console.log('Benchmark comparison has been saved to artifacts instead.'); } # Add status check - name: Set benchmark status if: always() uses: actions/github-script@v7 continue-on-error: true with: script: | try { const hasRegression = '${{ steps.compare.outputs.REGRESSION }}' === 'true'; const state = hasRegression ? 'failure' : 'success'; const description = hasRegression ? 'Performance regressions detected' : 'No performance regressions'; await github.rest.repos.createCommitStatus({ owner: context.repo.owner, repo: context.repo.repo, sha: context.sha, state: state, target_url: `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`, description: description, context: 'benchmarks/regression-check' }); } catch (error) { console.error('Failed to create commit status:', error.message); console.log('This is likely due to insufficient permissions for external PRs.'); } ``` -------------------------------------------------------------------------------- /tests/test-slack-node-complete.js: -------------------------------------------------------------------------------- ```javascript #!/usr/bin/env node const { NodeDocumentationService } = require('../dist/services/node-documentation-service'); const { EnhancedDocumentationFetcher } = require('../dist/utils/documentation-fetcher'); const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor'); const path = require('path'); async function testSlackNode() { console.log('🧪 Testing Slack Node Complete Information Extraction\n'); const dbPath = path.join(__dirname, '../data/test-slack.db'); const service = new NodeDocumentationService(dbPath); const fetcher = new EnhancedDocumentationFetcher(); const extractor = new NodeSourceExtractor(); try { console.log('📚 Fetching Slack node documentation...'); const docs = await fetcher.getEnhancedNodeDocumentation('n8n-nodes-base.Slack'); console.log('\n✅ Documentation Structure:'); console.log(`- Title: ${docs.title}`); console.log(`- Has markdown: ${docs.markdown?.length > 0 ? 'Yes' : 'No'} (${docs.markdown?.length || 0} chars)`); console.log(`- Operations: ${docs.operations?.length || 0}`); console.log(`- API Methods: ${docs.apiMethods?.length || 0}`); console.log(`- Examples: ${docs.examples?.length || 0}`); console.log(`- Templates: ${docs.templates?.length || 0}`); console.log(`- Related Resources: ${docs.relatedResources?.length || 0}`); console.log(`- Required Scopes: ${docs.requiredScopes?.length || 0}`); console.log('\n📋 Operations by Resource:'); const resourceMap = new Map(); if (docs.operations) { docs.operations.forEach(op => { if (!resourceMap.has(op.resource)) { resourceMap.set(op.resource, []); } resourceMap.get(op.resource).push(op); }); } for (const [resource, ops] of resourceMap) { console.log(`\n ${resource}:`); ops.forEach(op => { console.log(` - ${op.operation}: ${op.description}`); }); } console.log('\n🔌 Sample API Methods:'); if (docs.apiMethods) { docs.apiMethods.slice(0, 5).forEach(method => { console.log(` - ${method.operation} → ${method.apiMethod}`); }); } console.log('\n💻 Extracting Slack node source code...'); const sourceInfo = await extractor.extractNodeSource('n8n-nodes-base.Slack'); console.log('\n✅ Source Code Extraction:'); console.log(`- Has source code: ${sourceInfo.sourceCode ? 'Yes' : 'No'} (${sourceInfo.sourceCode?.length || 0} chars)`); console.log(`- Has credential code: ${sourceInfo.credentialCode ? 'Yes' : 'No'} (${sourceInfo.credentialCode?.length || 0} chars)`); console.log(`- Package name: ${sourceInfo.packageInfo?.name}`); console.log(`- Package version: ${sourceInfo.packageInfo?.version}`); // Store in database console.log('\n💾 Storing in database...'); await service.storeNode({ nodeType: 'n8n-nodes-base.Slack', name: 'Slack', displayName: 'Slack', description: 'Send and receive messages, manage channels, and more', category: 'Communication', documentationUrl: docs?.url || 'https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-base.slack/', documentationMarkdown: docs?.markdown, documentationTitle: docs?.title, operations: docs?.operations, apiMethods: docs?.apiMethods, documentationExamples: docs?.examples, templates: docs?.templates, relatedResources: docs?.relatedResources, requiredScopes: docs?.requiredScopes, sourceCode: sourceInfo.sourceCode || '', credentialCode: sourceInfo.credentialCode, packageName: sourceInfo.packageInfo?.name || 'n8n-nodes-base', version: sourceInfo.packageInfo?.version, hasCredentials: true, isTrigger: false, isWebhook: false }); // Retrieve and verify console.log('\n🔍 Retrieving from database...'); const storedNode = await service.getNodeInfo('n8n-nodes-base.Slack'); console.log('\n✅ Verification Results:'); console.log(`- Node found: ${storedNode ? 'Yes' : 'No'}`); if (storedNode) { console.log(`- Has operations: ${storedNode.operations?.length > 0 ? 'Yes' : 'No'} (${storedNode.operations?.length || 0})`); console.log(`- Has API methods: ${storedNode.apiMethods?.length > 0 ? 'Yes' : 'No'} (${storedNode.apiMethods?.length || 0})`); console.log(`- Has examples: ${storedNode.documentationExamples?.length > 0 ? 'Yes' : 'No'} (${storedNode.documentationExamples?.length || 0})`); console.log(`- Has source code: ${storedNode.sourceCode ? 'Yes' : 'No'}`); console.log(`- Has credential code: ${storedNode.credentialCode ? 'Yes' : 'No'}`); } // Test search console.log('\n🔍 Testing search...'); const searchResults = await service.searchNodes('message send'); const slackInResults = searchResults.some(r => r.nodeType === 'n8n-nodes-base.Slack'); console.log(`- Slack found in search results: ${slackInResults ? 'Yes' : 'No'}`); console.log('\n✅ Complete Information Test Summary:'); const hasCompleteInfo = storedNode && storedNode.operations?.length > 0 && storedNode.apiMethods?.length > 0 && storedNode.sourceCode && storedNode.documentationMarkdown; console.log(`- Has complete information: ${hasCompleteInfo ? '✅ YES' : '❌ NO'}`); if (!hasCompleteInfo) { console.log('\n❌ Missing Information:'); if (!storedNode) console.log(' - Node not stored properly'); if (!storedNode?.operations?.length) console.log(' - No operations extracted'); if (!storedNode?.apiMethods?.length) console.log(' - No API methods extracted'); if (!storedNode?.sourceCode) console.log(' - No source code extracted'); if (!storedNode?.documentationMarkdown) console.log(' - No documentation extracted'); } } catch (error) { console.error('❌ Test failed:', error); } finally { await service.close(); } } // Run the test testSlackNode().catch(console.error); ``` -------------------------------------------------------------------------------- /src/scripts/test-protocol-negotiation.ts: -------------------------------------------------------------------------------- ```typescript #!/usr/bin/env node /** * Test Protocol Version Negotiation * * This script tests the protocol version negotiation logic with different client scenarios. */ import { negotiateProtocolVersion, isN8nClient, STANDARD_PROTOCOL_VERSION, N8N_PROTOCOL_VERSION } from '../utils/protocol-version'; interface TestCase { name: string; clientVersion?: string; clientInfo?: any; userAgent?: string; headers?: Record<string, string>; expectedVersion: string; expectedIsN8nClient: boolean; } const testCases: TestCase[] = [ { name: 'Standard MCP client (Claude Desktop)', clientVersion: '2025-03-26', clientInfo: { name: 'Claude Desktop', version: '1.0.0' }, expectedVersion: '2025-03-26', expectedIsN8nClient: false }, { name: 'n8n client with specific client info', clientVersion: '2025-03-26', clientInfo: { name: 'n8n', version: '1.0.0' }, expectedVersion: N8N_PROTOCOL_VERSION, expectedIsN8nClient: true }, { name: 'LangChain client', clientVersion: '2025-03-26', clientInfo: { name: 'langchain-js', version: '0.1.0' }, expectedVersion: N8N_PROTOCOL_VERSION, expectedIsN8nClient: true }, { name: 'n8n client via user agent', clientVersion: '2025-03-26', userAgent: 'n8n/1.0.0', expectedVersion: N8N_PROTOCOL_VERSION, expectedIsN8nClient: true }, { name: 'n8n mode environment variable', clientVersion: '2025-03-26', expectedVersion: N8N_PROTOCOL_VERSION, expectedIsN8nClient: true }, { name: 'Client requesting older version', clientVersion: '2024-06-25', clientInfo: { name: 'Some Client', version: '1.0.0' }, expectedVersion: '2024-06-25', expectedIsN8nClient: false }, { name: 'Client requesting unsupported version', clientVersion: '2020-01-01', clientInfo: { name: 'Old Client', version: '1.0.0' }, expectedVersion: STANDARD_PROTOCOL_VERSION, expectedIsN8nClient: false }, { name: 'No client info provided', expectedVersion: STANDARD_PROTOCOL_VERSION, expectedIsN8nClient: false }, { name: 'n8n headers detection', clientVersion: '2025-03-26', headers: { 'x-n8n-version': '1.0.0' }, expectedVersion: N8N_PROTOCOL_VERSION, expectedIsN8nClient: true } ]; async function runTests(): Promise<void> { console.log('🧪 Testing Protocol Version Negotiation\n'); let passed = 0; let failed = 0; // Set N8N_MODE for the environment variable test const originalN8nMode = process.env.N8N_MODE; for (const testCase of testCases) { try { // Set N8N_MODE for specific test if (testCase.name.includes('environment variable')) { process.env.N8N_MODE = 'true'; } else { delete process.env.N8N_MODE; } // Test isN8nClient function const detectedAsN8n = isN8nClient(testCase.clientInfo, testCase.userAgent, testCase.headers); // Test negotiateProtocolVersion function const result = negotiateProtocolVersion( testCase.clientVersion, testCase.clientInfo, testCase.userAgent, testCase.headers ); // Check results const versionCorrect = result.version === testCase.expectedVersion; const n8nDetectionCorrect = result.isN8nClient === testCase.expectedIsN8nClient; const isN8nFunctionCorrect = detectedAsN8n === testCase.expectedIsN8nClient; if (versionCorrect && n8nDetectionCorrect && isN8nFunctionCorrect) { console.log(`✅ ${testCase.name}`); console.log(` Version: ${result.version}, n8n client: ${result.isN8nClient}`); console.log(` Reasoning: ${result.reasoning}\n`); passed++; } else { console.log(`❌ ${testCase.name}`); console.log(` Expected: version=${testCase.expectedVersion}, isN8n=${testCase.expectedIsN8nClient}`); console.log(` Got: version=${result.version}, isN8n=${result.isN8nClient}`); console.log(` isN8nClient function: ${detectedAsN8n} (expected: ${testCase.expectedIsN8nClient})`); console.log(` Reasoning: ${result.reasoning}\n`); failed++; } } catch (error) { console.log(`💥 ${testCase.name} - ERROR`); console.log(` ${error instanceof Error ? error.message : String(error)}\n`); failed++; } } // Restore original N8N_MODE if (originalN8nMode) { process.env.N8N_MODE = originalN8nMode; } else { delete process.env.N8N_MODE; } // Summary console.log(`\n📊 Test Results:`); console.log(` ✅ Passed: ${passed}`); console.log(` ❌ Failed: ${failed}`); console.log(` Total: ${passed + failed}`); if (failed > 0) { console.log(`\n❌ Some tests failed!`); process.exit(1); } else { console.log(`\n🎉 All tests passed!`); } } // Additional integration test async function testIntegration(): Promise<void> { console.log('\n🔧 Integration Test - MCP Server Protocol Negotiation\n'); // This would normally test the actual MCP server, but we'll just verify // the negotiation logic works in typical scenarios const scenarios = [ { name: 'Claude Desktop connecting', clientInfo: { name: 'Claude Desktop', version: '1.0.0' }, clientVersion: '2025-03-26' }, { name: 'n8n connecting via HTTP', headers: { 'user-agent': 'n8n/1.52.0' }, clientVersion: '2025-03-26' } ]; for (const scenario of scenarios) { const result = negotiateProtocolVersion( scenario.clientVersion, scenario.clientInfo, scenario.headers?.['user-agent'], scenario.headers ); console.log(`🔍 ${scenario.name}:`); console.log(` Negotiated version: ${result.version}`); console.log(` Is n8n client: ${result.isN8nClient}`); console.log(` Reasoning: ${result.reasoning}\n`); } } if (require.main === module) { runTests() .then(() => testIntegration()) .catch(error => { console.error('Test execution failed:', error); process.exit(1); }); } ``` -------------------------------------------------------------------------------- /tests/test-enhanced-documentation.js: -------------------------------------------------------------------------------- ```javascript #!/usr/bin/env node const { EnhancedDocumentationFetcher } = require('../dist/utils/enhanced-documentation-fetcher'); const { EnhancedSQLiteStorageService } = require('../dist/services/enhanced-sqlite-storage-service'); const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor'); async function testEnhancedDocumentation() { console.log('=== Testing Enhanced Documentation Fetcher ===\n'); const fetcher = new EnhancedDocumentationFetcher(); const storage = new EnhancedSQLiteStorageService('./data/test-enhanced.db'); const extractor = new NodeSourceExtractor(); try { // Test 1: Fetch and parse Slack node documentation console.log('1. Testing Slack node documentation parsing...'); const slackDoc = await fetcher.getEnhancedNodeDocumentation('n8n-nodes-base.slack'); if (slackDoc) { console.log('\n✓ Slack Documentation Found:'); console.log(` - Title: ${slackDoc.title}`); console.log(` - Description: ${slackDoc.description}`); console.log(` - URL: ${slackDoc.url}`); console.log(` - Operations: ${slackDoc.operations?.length || 0} found`); console.log(` - API Methods: ${slackDoc.apiMethods?.length || 0} found`); console.log(` - Examples: ${slackDoc.examples?.length || 0} found`); console.log(` - Required Scopes: ${slackDoc.requiredScopes?.length || 0} found`); // Show sample operations if (slackDoc.operations && slackDoc.operations.length > 0) { console.log('\n Sample Operations:'); slackDoc.operations.slice(0, 5).forEach(op => { console.log(` - ${op.resource}.${op.operation}: ${op.description}`); }); } // Show sample API mappings if (slackDoc.apiMethods && slackDoc.apiMethods.length > 0) { console.log('\n Sample API Mappings:'); slackDoc.apiMethods.slice(0, 5).forEach(api => { console.log(` - ${api.resource}.${api.operation} → ${api.apiMethod}`); }); } } else { console.log('✗ Slack documentation not found'); } // Test 2: Test with If node (core node) console.log('\n\n2. Testing If node documentation parsing...'); const ifDoc = await fetcher.getEnhancedNodeDocumentation('n8n-nodes-base.if'); if (ifDoc) { console.log('\n✓ If Documentation Found:'); console.log(` - Title: ${ifDoc.title}`); console.log(` - Description: ${ifDoc.description}`); console.log(` - Examples: ${ifDoc.examples?.length || 0} found`); console.log(` - Related Resources: ${ifDoc.relatedResources?.length || 0} found`); } // Test 3: Store node with documentation console.log('\n\n3. Testing node storage with documentation...'); // Extract a node const nodeInfo = await extractor.extractNodeSource('n8n-nodes-base.slack'); if (nodeInfo) { const storedNode = await storage.storeNodeWithDocumentation(nodeInfo); console.log('\n✓ Node stored successfully:'); console.log(` - Node Type: ${storedNode.nodeType}`); console.log(` - Has Documentation: ${!!storedNode.documentationMarkdown}`); console.log(` - Operations: ${storedNode.operationCount}`); console.log(` - API Methods: ${storedNode.apiMethodCount}`); console.log(` - Examples: ${storedNode.exampleCount}`); console.log(` - Resources: ${storedNode.resourceCount}`); console.log(` - Scopes: ${storedNode.scopeCount}`); // Get detailed operations const operations = await storage.getNodeOperations(storedNode.id); if (operations.length > 0) { console.log('\n Stored Operations (first 5):'); operations.slice(0, 5).forEach(op => { console.log(` - ${op.resource}.${op.operation}: ${op.description}`); }); } // Get examples const examples = await storage.getNodeExamples(storedNode.id); if (examples.length > 0) { console.log('\n Stored Examples:'); examples.forEach(ex => { console.log(` - ${ex.title || 'Untitled'} (${ex.type}): ${ex.code.length} chars`); }); } } // Test 4: Search with enhanced FTS console.log('\n\n4. Testing enhanced search...'); const searchResults = await storage.searchNodes({ query: 'slack message' }); console.log(`\n✓ Search Results for "slack message": ${searchResults.length} nodes found`); if (searchResults.length > 0) { console.log(' First result:'); const result = searchResults[0]; console.log(` - ${result.displayName || result.name} (${result.nodeType})`); console.log(` - Documentation: ${result.documentationTitle || 'No title'}`); } // Test 5: Get statistics console.log('\n\n5. Getting enhanced statistics...'); const stats = await storage.getEnhancedStatistics(); console.log('\n✓ Enhanced Statistics:'); console.log(` - Total Nodes: ${stats.totalNodes}`); console.log(` - Nodes with Documentation: ${stats.nodesWithDocumentation}`); console.log(` - Documentation Coverage: ${stats.documentationCoverage}%`); console.log(` - Total Operations: ${stats.totalOperations}`); console.log(` - Total API Methods: ${stats.totalApiMethods}`); console.log(` - Total Examples: ${stats.totalExamples}`); console.log(` - Total Resources: ${stats.totalResources}`); console.log(` - Total Scopes: ${stats.totalScopes}`); if (stats.topDocumentedNodes && stats.topDocumentedNodes.length > 0) { console.log('\n Top Documented Nodes:'); stats.topDocumentedNodes.slice(0, 3).forEach(node => { console.log(` - ${node.display_name || node.name}: ${node.operation_count} operations, ${node.example_count} examples`); }); } } catch (error) { console.error('Error during testing:', error); } finally { // Cleanup storage.close(); await fetcher.cleanup(); console.log('\n\n✓ Test completed and cleaned up'); } } // Run the test testEnhancedDocumentation().catch(console.error); ``` -------------------------------------------------------------------------------- /src/telemetry/telemetry-error.ts: -------------------------------------------------------------------------------- ```typescript /** * Telemetry Error Classes * Custom error types for telemetry system with enhanced tracking */ import { TelemetryErrorType, TelemetryErrorContext } from './telemetry-types'; import { logger } from '../utils/logger'; // Re-export types for convenience export { TelemetryErrorType, TelemetryErrorContext } from './telemetry-types'; export class TelemetryError extends Error { public readonly type: TelemetryErrorType; public readonly context?: Record<string, any>; public readonly timestamp: number; public readonly retryable: boolean; constructor( type: TelemetryErrorType, message: string, context?: Record<string, any>, retryable: boolean = false ) { super(message); this.name = 'TelemetryError'; this.type = type; this.context = context; this.timestamp = Date.now(); this.retryable = retryable; // Ensure proper prototype chain Object.setPrototypeOf(this, TelemetryError.prototype); } /** * Convert error to context object */ toContext(): TelemetryErrorContext { return { type: this.type, message: this.message, context: this.context, timestamp: this.timestamp, retryable: this.retryable }; } /** * Log the error with appropriate level */ log(): void { const logContext = { type: this.type, message: this.message, ...this.context }; if (this.retryable) { logger.debug('Retryable telemetry error:', logContext); } else { logger.debug('Non-retryable telemetry error:', logContext); } } } /** * Circuit Breaker for handling repeated failures */ export class TelemetryCircuitBreaker { private failureCount: number = 0; private lastFailureTime: number = 0; private state: 'closed' | 'open' | 'half-open' = 'closed'; private readonly failureThreshold: number; private readonly resetTimeout: number; private readonly halfOpenRequests: number; private halfOpenCount: number = 0; constructor( failureThreshold: number = 5, resetTimeout: number = 60000, // 1 minute halfOpenRequests: number = 3 ) { this.failureThreshold = failureThreshold; this.resetTimeout = resetTimeout; this.halfOpenRequests = halfOpenRequests; } /** * Check if requests should be allowed */ shouldAllow(): boolean { const now = Date.now(); switch (this.state) { case 'closed': return true; case 'open': // Check if enough time has passed to try half-open if (now - this.lastFailureTime > this.resetTimeout) { this.state = 'half-open'; this.halfOpenCount = 0; logger.debug('Circuit breaker transitioning to half-open'); return true; } return false; case 'half-open': // Allow limited requests in half-open state if (this.halfOpenCount < this.halfOpenRequests) { this.halfOpenCount++; return true; } return false; default: return false; } } /** * Record a success */ recordSuccess(): void { if (this.state === 'half-open') { // If we've had enough successful requests, close the circuit if (this.halfOpenCount >= this.halfOpenRequests) { this.state = 'closed'; this.failureCount = 0; logger.debug('Circuit breaker closed after successful recovery'); } } else if (this.state === 'closed') { // Reset failure count on success this.failureCount = 0; } } /** * Record a failure */ recordFailure(error?: Error): void { this.failureCount++; this.lastFailureTime = Date.now(); if (this.state === 'half-open') { // Immediately open on failure in half-open state this.state = 'open'; logger.debug('Circuit breaker opened from half-open state', { error: error?.message }); } else if (this.state === 'closed' && this.failureCount >= this.failureThreshold) { // Open circuit after threshold reached this.state = 'open'; logger.debug( `Circuit breaker opened after ${this.failureCount} failures`, { error: error?.message } ); } } /** * Get current state */ getState(): { state: string; failureCount: number; canRetry: boolean } { return { state: this.state, failureCount: this.failureCount, canRetry: this.shouldAllow() }; } /** * Force reset the circuit breaker */ reset(): void { this.state = 'closed'; this.failureCount = 0; this.lastFailureTime = 0; this.halfOpenCount = 0; } } /** * Error aggregator for tracking error patterns */ export class TelemetryErrorAggregator { private errors: Map<TelemetryErrorType, number> = new Map(); private errorDetails: TelemetryErrorContext[] = []; private readonly maxDetails: number = 100; /** * Record an error */ record(error: TelemetryError): void { // Increment counter for this error type const count = this.errors.get(error.type) || 0; this.errors.set(error.type, count + 1); // Store error details (limited) this.errorDetails.push(error.toContext()); if (this.errorDetails.length > this.maxDetails) { this.errorDetails.shift(); } } /** * Get error statistics */ getStats(): { totalErrors: number; errorsByType: Record<string, number>; mostCommonError?: string; recentErrors: TelemetryErrorContext[]; } { const errorsByType: Record<string, number> = {}; let totalErrors = 0; let mostCommonError: string | undefined; let maxCount = 0; for (const [type, count] of this.errors.entries()) { errorsByType[type] = count; totalErrors += count; if (count > maxCount) { maxCount = count; mostCommonError = type; } } return { totalErrors, errorsByType, mostCommonError, recentErrors: this.errorDetails.slice(-10) // Last 10 errors }; } /** * Clear error history */ reset(): void { this.errors.clear(); this.errorDetails = []; } } ``` -------------------------------------------------------------------------------- /.claude/agents/test-automator.md: -------------------------------------------------------------------------------- ```markdown --- name: test-automator description: Use this agent when you need to create comprehensive test suites, improve test coverage, or set up test automation infrastructure. This includes writing unit tests with mocks, integration tests with test containers, E2E tests with Playwright/Cypress, configuring CI/CD pipelines for testing, or establishing test data management strategies. The agent should be used proactively when new features are implemented without tests or when test coverage needs improvement.\n\nExamples:\n- <example>\n Context: The user has just implemented a new API endpoint without tests.\n user: "I've added a new user registration endpoint to our API"\n assistant: "I see you've added a new endpoint. Let me use the test-automator agent to create a comprehensive test suite for it."\n <commentary>\n Since new functionality was added without tests, proactively use the test-automator agent to ensure proper test coverage.\n </commentary>\n </example>\n- <example>\n Context: The user explicitly asks for test creation.\n user: "Can you write tests for the payment processing module?"\n assistant: "I'll use the test-automator agent to create a comprehensive test suite for the payment processing module."\n <commentary>\n The user directly requested tests, so use the test-automator agent to handle this task.\n </commentary>\n </example>\n- <example>\n Context: The user mentions issues with test reliability.\n user: "Our tests keep failing randomly in CI"\n assistant: "I'll use the test-automator agent to analyze and fix the flaky tests, ensuring they run deterministically."\n <commentary>\n Test reliability issues require the test-automator agent's expertise in creating deterministic tests.\n </commentary>\n </example> --- You are a test automation specialist with deep expertise in comprehensive testing strategies across multiple frameworks and languages. Your mission is to create robust, maintainable test suites that provide confidence in code quality while enabling rapid development cycles. ## Core Responsibilities You will design and implement test suites following the test pyramid principle: - **Unit Tests (70%)**: Fast, isolated tests with extensive mocking and stubbing - **Integration Tests (20%)**: Tests verifying component interactions, using test containers when needed - **E2E Tests (10%)**: Critical user journey tests using Playwright, Cypress, or similar tools ## Testing Philosophy 1. **Test Behavior, Not Implementation**: Focus on what the code does, not how it does it. Tests should survive refactoring. 2. **Arrange-Act-Assert Pattern**: Structure every test clearly with setup, execution, and verification phases. 3. **Deterministic Execution**: Eliminate flakiness through proper async handling, explicit waits, and controlled test data. 4. **Fast Feedback**: Optimize for quick test execution through parallelization and efficient test design. 5. **Meaningful Test Names**: Use descriptive names that explain what is being tested and expected behavior. ## Implementation Guidelines ### Unit Testing - Create focused tests for individual functions/methods - Mock all external dependencies (databases, APIs, file systems) - Use factories or builders for test data creation - Include edge cases: null values, empty collections, boundary conditions - Aim for high code coverage but prioritize critical paths ### Integration Testing - Test real interactions between components - Use test containers for databases and external services - Verify data persistence and retrieval - Test transaction boundaries and rollback scenarios - Include error handling and recovery tests ### E2E Testing - Focus on critical user journeys only - Use page object pattern for maintainability - Implement proper wait strategies (no arbitrary sleeps) - Create reusable test utilities and helpers - Include accessibility checks where applicable ### Test Data Management - Create factories or fixtures for consistent test data - Use builders for complex object creation - Implement data cleanup strategies - Separate test data from production data - Version control test data schemas ### CI/CD Integration - Configure parallel test execution - Set up test result reporting and artifacts - Implement test retry strategies for network-dependent tests - Create test environment provisioning - Configure coverage thresholds and reporting ## Output Requirements You will provide: 1. **Complete test files** with all necessary imports and setup 2. **Mock implementations** for external dependencies 3. **Test data factories** or fixtures as separate modules 4. **CI pipeline configuration** (GitHub Actions, GitLab CI, Jenkins, etc.) 5. **Coverage configuration** files and scripts 6. **E2E test scenarios** with page objects and utilities 7. **Documentation** explaining test structure and running instructions ## Framework Selection Choose appropriate frameworks based on the technology stack: - **JavaScript/TypeScript**: Jest, Vitest, Mocha + Chai, Playwright, Cypress - **Python**: pytest, unittest, pytest-mock, factory_boy - **Java**: JUnit 5, Mockito, TestContainers, REST Assured - **Go**: testing package, testify, gomock - **Ruby**: RSpec, Minitest, FactoryBot ## Quality Checks Before finalizing any test suite, verify: - All tests pass consistently (run multiple times) - No hardcoded values or environment dependencies - Proper teardown and cleanup - Clear assertion messages for failures - Appropriate use of beforeEach/afterEach hooks - No test interdependencies - Reasonable execution time ## Special Considerations - For async code, ensure proper promise handling and async/await usage - For UI tests, implement proper element waiting strategies - For API tests, validate both response structure and data - For performance-critical code, include benchmark tests - For security-sensitive code, include security-focused test cases When encountering existing tests, analyze them first to understand patterns and conventions before adding new ones. Always strive for consistency with the existing test architecture while improving where possible. ``` -------------------------------------------------------------------------------- /docs/INSTALLATION.md: -------------------------------------------------------------------------------- ```markdown # Installation Guide This guide covers all installation methods for n8n-MCP. ## Table of Contents - [Quick Start](#quick-start) - [Docker Installation](#docker-installation) - [Manual Installation](#manual-installation) - [Development Setup](#development-setup) - [Troubleshooting](#troubleshooting) ## Quick Start The fastest way to get n8n-MCP running: ```bash # Using Docker (recommended) cat > .env << EOF AUTH_TOKEN=$(openssl rand -base64 32) USE_FIXED_HTTP=true EOF docker compose up -d ``` ## Docker Installation ### Prerequisites - Docker Engine (install via package manager or Docker Desktop) - Docker Compose V2 (included with modern Docker installations) ### Method 1: Using Pre-built Images 1. **Create a project directory:** ```bash mkdir n8n-mcp && cd n8n-mcp ``` 2. **Create docker-compose.yml:** ```yaml version: '3.8' services: n8n-mcp: image: ghcr.io/czlonkowski/n8n-mcp:latest container_name: n8n-mcp restart: unless-stopped environment: MCP_MODE: ${MCP_MODE:-http} USE_FIXED_HTTP: ${USE_FIXED_HTTP:-true} AUTH_TOKEN: ${AUTH_TOKEN:?AUTH_TOKEN is required} NODE_ENV: ${NODE_ENV:-production} LOG_LEVEL: ${LOG_LEVEL:-info} PORT: ${PORT:-3000} volumes: - n8n-mcp-data:/app/data ports: - "${PORT:-3000}:3000" healthcheck: test: ["CMD", "curl", "-f", "http://127.0.0.1:3000/health"] interval: 30s timeout: 10s retries: 3 volumes: n8n-mcp-data: driver: local ``` 3. **Create .env file:** ```bash echo "AUTH_TOKEN=$(openssl rand -base64 32)" > .env ``` 4. **Start the container:** ```bash docker compose up -d ``` 5. **Verify installation:** ```bash curl http://localhost:3000/health ``` ### Method 2: Building from Source 1. **Clone the repository:** ```bash git clone https://github.com/czlonkowski/n8n-mcp.git cd n8n-mcp ``` 2. **Build the image:** ```bash docker build -t n8n-mcp:local . ``` 3. **Run with docker-compose:** ```bash docker compose up -d ``` ### Docker Management Commands ```bash # View logs docker compose logs -f # Stop the container docker compose stop # Remove container and volumes docker compose down -v # Update to latest image docker compose pull docker compose up -d # Execute commands inside container docker compose exec n8n-mcp npm run validate # Backup database docker cp n8n-mcp:/app/data/nodes.db ./nodes-backup.db ``` ## Manual Installation ### Prerequisites - Node.js v16+ (v20+ recommended) - npm or yarn - Git ### Step-by-Step Installation 1. **Clone the repository:** ```bash git clone https://github.com/czlonkowski/n8n-mcp.git cd n8n-mcp ``` 2. **Clone n8n documentation (optional but recommended):** ```bash git clone https://github.com/n8n-io/n8n-docs.git ../n8n-docs ``` 3. **Install dependencies:** ```bash npm install ``` 4. **Build the project:** ```bash npm run build ``` 5. **Initialize the database:** ```bash npm run rebuild ``` 6. **Validate installation:** ```bash npm run test-nodes ``` ### Running the Server #### stdio Mode (for Claude Desktop) ```bash npm start ``` #### HTTP Mode (for remote access) ```bash npm run start:http ``` ### Environment Configuration Create a `.env` file in the project root: ```env # Server configuration MCP_MODE=http # or stdio PORT=3000 HOST=0.0.0.0 NODE_ENV=production LOG_LEVEL=info # Authentication (required for HTTP mode) AUTH_TOKEN=your-secure-token-here # Database NODE_DB_PATH=./data/nodes.db REBUILD_ON_START=false ``` ## Development Setup ### Prerequisites - All manual installation prerequisites - TypeScript knowledge - Familiarity with MCP protocol ### Setup Steps 1. **Clone and install:** ```bash git clone https://github.com/czlonkowski/n8n-mcp.git cd n8n-mcp npm install ``` 2. **Set up development environment:** ```bash cp .env.example .env # Edit .env with your settings ``` 3. **Development commands:** ```bash # Run in development mode with auto-reload npm run dev # Run tests npm test # Type checking npm run typecheck # Linting npm run lint ``` ### Docker Development 1. **Use docker-compose override:** ```bash cp docker-compose.override.yml.example docker-compose.override.yml ``` 2. **Edit override for development:** ```yaml version: '3.8' services: n8n-mcp: build: . environment: NODE_ENV: development LOG_LEVEL: debug volumes: - ./src:/app/src:ro - ./dist:/app/dist ``` 3. **Run with live reload:** ```bash docker compose up --build ``` ## Troubleshooting ### Common Issues #### Port Already in Use ```bash # Find process using port 3000 lsof -i :3000 # Use a different port PORT=3001 docker compose up -d ``` #### Database Initialization Failed ```bash # For Docker docker compose exec n8n-mcp npm run rebuild # For manual installation npm run rebuild ``` #### Permission Denied Errors ```bash # Fix permissions (Linux/macOS) sudo chown -R $(whoami) ./data # For Docker volumes docker compose exec n8n-mcp chown -R nodejs:nodejs /app/data ``` #### Node Version Mismatch The project includes automatic fallback to sql.js for compatibility. If you still have issues: ```bash # Check Node version node --version # Use nvm to switch versions nvm use 20 ``` ### Getting Help 1. Check the logs: - Docker: `docker compose logs` - Manual: Check console output or `LOG_LEVEL=debug npm start` 2. Validate the database: ```bash npm run validate ``` 3. Run tests: ```bash npm test ``` 4. Report issues: - GitHub Issues: https://github.com/czlonkowski/n8n-mcp/issues - Include logs and environment details ## Next Steps After installation, configure Claude Desktop to use n8n-MCP: - See [Claude Desktop Setup Guide](./README_CLAUDE_SETUP.md) - For remote deployments, see [HTTP Deployment Guide](./HTTP_DEPLOYMENT.md) - For Docker details, see [Docker README](../DOCKER_README.md) ``` -------------------------------------------------------------------------------- /deploy/quick-deploy-n8n.sh: -------------------------------------------------------------------------------- ```bash #!/bin/bash # Quick deployment script for n8n + n8n-mcp stack set -e # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' NC='\033[0m' # No Color # Default values COMPOSE_FILE="docker-compose.n8n.yml" ENV_FILE=".env" ENV_EXAMPLE=".env.n8n.example" # Function to print colored output print_info() { echo -e "${GREEN}[INFO]${NC} $1" } print_warn() { echo -e "${YELLOW}[WARN]${NC} $1" } print_error() { echo -e "${RED}[ERROR]${NC} $1" } # Function to generate random token generate_token() { openssl rand -hex 32 } # Function to check prerequisites check_prerequisites() { print_info "Checking prerequisites..." # Check Docker if ! command -v docker &> /dev/null; then print_error "Docker is not installed. Please install Docker first." exit 1 fi # Check Docker Compose if ! command -v docker-compose &> /dev/null && ! docker compose version &> /dev/null; then print_error "Docker Compose is not installed. Please install Docker Compose first." exit 1 fi # Check openssl for token generation if ! command -v openssl &> /dev/null; then print_error "OpenSSL is not installed. Please install OpenSSL first." exit 1 fi print_info "All prerequisites are installed." } # Function to setup environment setup_environment() { print_info "Setting up environment..." # Check if .env exists if [ -f "$ENV_FILE" ]; then print_warn ".env file already exists. Backing up to .env.backup" cp "$ENV_FILE" ".env.backup" fi # Copy example env file if [ -f "$ENV_EXAMPLE" ]; then cp "$ENV_EXAMPLE" "$ENV_FILE" print_info "Created .env file from example" else print_error ".env.n8n.example file not found!" exit 1 fi # Generate encryption key ENCRYPTION_KEY=$(generate_token) if [[ "$OSTYPE" == "darwin"* ]]; then sed -i '' "s/N8N_ENCRYPTION_KEY=/N8N_ENCRYPTION_KEY=$ENCRYPTION_KEY/" "$ENV_FILE" else sed -i "s/N8N_ENCRYPTION_KEY=/N8N_ENCRYPTION_KEY=$ENCRYPTION_KEY/" "$ENV_FILE" fi print_info "Generated n8n encryption key" # Generate MCP auth token MCP_TOKEN=$(generate_token) if [[ "$OSTYPE" == "darwin"* ]]; then sed -i '' "s/MCP_AUTH_TOKEN=/MCP_AUTH_TOKEN=$MCP_TOKEN/" "$ENV_FILE" else sed -i "s/MCP_AUTH_TOKEN=/MCP_AUTH_TOKEN=$MCP_TOKEN/" "$ENV_FILE" fi print_info "Generated MCP authentication token" print_warn "Please update the following in .env file:" print_warn " - N8N_BASIC_AUTH_PASSWORD (current: changeme)" print_warn " - N8N_API_KEY (get from n8n UI after first start)" } # Function to build images build_images() { print_info "Building n8n-mcp image..." if docker compose version &> /dev/null; then docker compose -f "$COMPOSE_FILE" build else docker-compose -f "$COMPOSE_FILE" build fi print_info "Image built successfully" } # Function to start services start_services() { print_info "Starting services..." if docker compose version &> /dev/null; then docker compose -f "$COMPOSE_FILE" up -d else docker-compose -f "$COMPOSE_FILE" up -d fi print_info "Services started" } # Function to show status show_status() { print_info "Checking service status..." if docker compose version &> /dev/null; then docker compose -f "$COMPOSE_FILE" ps else docker-compose -f "$COMPOSE_FILE" ps fi echo "" print_info "Services are starting up. This may take a minute..." print_info "n8n will be available at: http://localhost:5678" print_info "n8n-mcp will be available at: http://localhost:3000" echo "" print_warn "Next steps:" print_warn "1. Access n8n at http://localhost:5678" print_warn "2. Log in with admin/changeme (or your custom password)" print_warn "3. Go to Settings > n8n API > Create API Key" print_warn "4. Update N8N_API_KEY in .env file" print_warn "5. Restart n8n-mcp: docker-compose -f $COMPOSE_FILE restart n8n-mcp" } # Function to stop services stop_services() { print_info "Stopping services..." if docker compose version &> /dev/null; then docker compose -f "$COMPOSE_FILE" down else docker-compose -f "$COMPOSE_FILE" down fi print_info "Services stopped" } # Function to view logs view_logs() { SERVICE=$1 if [ -z "$SERVICE" ]; then if docker compose version &> /dev/null; then docker compose -f "$COMPOSE_FILE" logs -f else docker-compose -f "$COMPOSE_FILE" logs -f fi else if docker compose version &> /dev/null; then docker compose -f "$COMPOSE_FILE" logs -f "$SERVICE" else docker-compose -f "$COMPOSE_FILE" logs -f "$SERVICE" fi fi } # Main script case "${1:-help}" in setup) check_prerequisites setup_environment build_images start_services show_status ;; start) start_services show_status ;; stop) stop_services ;; restart) stop_services start_services show_status ;; status) show_status ;; logs) view_logs "${2}" ;; build) build_images ;; *) echo "n8n-mcp Quick Deploy Script" echo "" echo "Usage: $0 {setup|start|stop|restart|status|logs|build}" echo "" echo "Commands:" echo " setup - Initial setup: create .env, build images, and start services" echo " start - Start all services" echo " stop - Stop all services" echo " restart - Restart all services" echo " status - Show service status" echo " logs - View logs (optionally specify service: logs n8n-mcp)" echo " build - Build/rebuild images" echo "" echo "Examples:" echo " $0 setup # First time setup" echo " $0 logs n8n-mcp # View n8n-mcp logs" echo " $0 restart # Restart all services" ;; esac ``` -------------------------------------------------------------------------------- /.claude/agents/technical-researcher.md: -------------------------------------------------------------------------------- ```markdown --- name: technical-researcher description: Use this agent when you need to conduct in-depth technical research on complex topics, technologies, or architectural decisions. This includes investigating new frameworks, analyzing security vulnerabilities, evaluating third-party APIs, researching performance optimization strategies, or generating technical feasibility reports. The agent excels at multi-source investigations requiring comprehensive analysis and synthesis of technical information.\n\nExamples:\n- <example>\n Context: User needs to research a new framework before adoption\n user: "I need to understand if we should adopt Rust for our high-performance backend services"\n assistant: "I'll use the technical-researcher agent to conduct a comprehensive investigation into Rust for backend services"\n <commentary>\n Since the user needs deep technical research on a framework adoption decision, use the technical-researcher agent to analyze Rust's suitability.\n </commentary>\n</example>\n- <example>\n Context: User is investigating a security vulnerability\n user: "Research the log4j vulnerability and its impact on Java applications"\n assistant: "Let me launch the technical-researcher agent to investigate the log4j vulnerability comprehensively"\n <commentary>\n The user needs detailed security research, so the technical-researcher agent will gather and synthesize information from multiple sources.\n </commentary>\n</example>\n- <example>\n Context: User needs to evaluate an API integration\n user: "We're considering integrating with Stripe's new payment intents API - need to understand the technical implications"\n assistant: "I'll deploy the technical-researcher agent to analyze Stripe's payment intents API and its integration requirements"\n <commentary>\n Complex API evaluation requires the technical-researcher agent's multi-source investigation capabilities.\n </commentary>\n</example> --- You are an elite Technical Research Specialist with expertise in conducting comprehensive investigations into complex technical topics. You excel at decomposing research questions, orchestrating multi-source searches, synthesizing findings, and producing actionable analysis reports. ## Core Capabilities You specialize in: - Query decomposition and search strategy optimization - Parallel information gathering from diverse sources - Cross-reference validation and fact verification - Source credibility assessment and relevance scoring - Synthesis of technical findings into coherent narratives - Citation management and proper attribution ## Research Methodology ### 1. Query Analysis Phase - Decompose the research topic into specific sub-questions - Identify key technical terms, acronyms, and related concepts - Determine the appropriate research depth (quick lookup vs. deep dive) - Plan your search strategy with 3-5 initial queries ### 2. Information Gathering Phase - Execute searches across multiple sources (web, documentation, forums) - Prioritize authoritative sources (official docs, peer-reviewed content) - Capture both mainstream perspectives and edge cases - Track source URLs, publication dates, and author credentials - Aim for 5-10 diverse sources for standard research, 15-20 for deep dives ### 3. Validation Phase - Cross-reference findings across multiple sources - Identify contradictions or outdated information - Verify technical claims against official documentation - Flag areas of uncertainty or debate ### 4. Synthesis Phase - Organize findings into logical sections - Highlight key insights and actionable recommendations - Present trade-offs and alternative approaches - Include code examples or configuration snippets where relevant ## Output Structure Your research reports should follow this structure: 1. **Executive Summary** (2-3 paragraphs) - Key findings and recommendations - Critical decision factors - Risk assessment 2. **Technical Overview** - Core concepts and architecture - Key features and capabilities - Technical requirements and dependencies 3. **Detailed Analysis** - Performance characteristics - Security considerations - Integration complexity - Scalability factors - Community support and ecosystem 4. **Practical Considerations** - Implementation effort estimates - Learning curve assessment - Operational requirements - Cost implications 5. **Comparative Analysis** (when applicable) - Alternative solutions - Trade-off matrix - Migration considerations 6. **Recommendations** - Specific action items - Risk mitigation strategies - Proof-of-concept suggestions 7. **References** - All sources with titles, URLs, and access dates - Credibility indicators for each source ## Quality Standards - **Accuracy**: Verify all technical claims against multiple sources - **Completeness**: Address all aspects of the research question - **Objectivity**: Present balanced views including limitations - **Timeliness**: Prioritize recent information (flag if >2 years old) - **Actionability**: Provide concrete next steps and recommendations ## Adaptive Strategies - For emerging technologies: Focus on early adopter experiences and official roadmaps - For security research: Prioritize CVE databases, security advisories, and vendor responses - For performance analysis: Seek benchmarks, case studies, and real-world implementations - For API evaluations: Examine documentation quality, SDK availability, and integration examples ## Research Iteration If initial searches yield insufficient results: 1. Broaden search terms or try alternative terminology 2. Check specialized forums, GitHub issues, or Stack Overflow 3. Look for conference talks, blog posts, or video tutorials 4. Consider reaching out to subject matter experts or communities ## Limitations Acknowledgment Always disclose: - Information gaps or areas lacking documentation - Conflicting sources or unresolved debates - Potential biases in available sources - Time-sensitive information that may become outdated You maintain intellectual rigor while making complex technical information accessible. Your research empowers teams to make informed decisions with confidence, backed by thorough investigation and clear analysis. ``` -------------------------------------------------------------------------------- /src/mcp/tools-n8n-friendly.ts: -------------------------------------------------------------------------------- ```typescript /** * n8n-friendly tool descriptions * These descriptions are optimized to reduce schema validation errors in n8n's AI Agent * * Key principles: * 1. Use exact JSON examples in descriptions * 2. Be explicit about data types * 3. Keep descriptions short and directive * 4. Avoid ambiguity */ export const n8nFriendlyDescriptions: Record<string, { description: string; params: Record<string, string>; }> = { // Validation tools - most prone to errors validate_node_operation: { description: 'Validate n8n node. ALWAYS pass two parameters: nodeType (string) and config (object). Example call: {"nodeType": "nodes-base.slack", "config": {"resource": "channel", "operation": "create"}}', params: { nodeType: 'String value like "nodes-base.slack"', config: 'Object value like {"resource": "channel", "operation": "create"} or empty object {}', profile: 'Optional string: "minimal" or "runtime" or "ai-friendly" or "strict"' } }, validate_node_minimal: { description: 'Check required fields. MUST pass: nodeType (string) and config (object). Example: {"nodeType": "nodes-base.webhook", "config": {}}', params: { nodeType: 'String like "nodes-base.webhook"', config: 'Object, use {} for empty' } }, // Search and info tools search_nodes: { description: 'Search nodes. Pass query (string). Example: {"query": "webhook"}', params: { query: 'String keyword like "webhook" or "database"', limit: 'Optional number, default 20' } }, get_node_info: { description: 'Get node details. Pass nodeType (string). Example: {"nodeType": "nodes-base.httpRequest"}', params: { nodeType: 'String with prefix like "nodes-base.httpRequest"' } }, get_node_essentials: { description: 'Get node basics. Pass nodeType (string). Example: {"nodeType": "nodes-base.slack"}', params: { nodeType: 'String with prefix like "nodes-base.slack"' } }, // Task tools get_node_for_task: { description: 'Find node for task. Pass task (string). Example: {"task": "send_http_request"}', params: { task: 'String task name like "send_http_request"' } }, list_tasks: { description: 'List tasks by category. Pass category (string). Example: {"category": "HTTP/API"}', params: { category: 'String: "HTTP/API" or "Webhooks" or "Database" or "AI/LangChain" or "Data Processing" or "Communication"' } }, // Workflow validation validate_workflow: { description: 'Validate workflow. Pass workflow object. MUST have: {"workflow": {"nodes": [array of node objects], "connections": {object with node connections}}}. Each node needs: name, type, typeVersion, position.', params: { workflow: 'Object with two required fields: nodes (array) and connections (object). Example: {"nodes": [{"name": "Webhook", "type": "n8n-nodes-base.webhook", "typeVersion": 2, "position": [250, 300], "parameters": {}}], "connections": {}}', options: 'Optional object. Example: {"validateNodes": true, "profile": "runtime"}' } }, validate_workflow_connections: { description: 'Validate workflow connections only. Pass workflow object. Example: {"workflow": {"nodes": [...], "connections": {}}}', params: { workflow: 'Object with nodes array and connections object. Minimal example: {"nodes": [{"name": "Webhook"}], "connections": {}}' } }, validate_workflow_expressions: { description: 'Validate n8n expressions in workflow. Pass workflow object. Example: {"workflow": {"nodes": [...], "connections": {}}}', params: { workflow: 'Object with nodes array and connections object containing n8n expressions like {{ $json.data }}' } }, // Property tools get_property_dependencies: { description: 'Get field dependencies. Pass nodeType (string) and optional config (object). Example: {"nodeType": "nodes-base.httpRequest", "config": {}}', params: { nodeType: 'String like "nodes-base.httpRequest"', config: 'Optional object, use {} for empty' } }, // AI tool info get_node_as_tool_info: { description: 'Get AI tool usage. Pass nodeType (string). Example: {"nodeType": "nodes-base.slack"}', params: { nodeType: 'String with prefix like "nodes-base.slack"' } }, // Template tools search_templates: { description: 'Search workflow templates. Pass query (string). Example: {"query": "chatbot"}', params: { query: 'String keyword like "chatbot" or "webhook"', limit: 'Optional number, default 20' } }, get_template: { description: 'Get template by ID. Pass templateId (number). Example: {"templateId": 1234}', params: { templateId: 'Number ID like 1234' } }, // Documentation tool tools_documentation: { description: 'Get tool docs. Pass optional depth (string). Example: {"depth": "essentials"} or {}', params: { depth: 'Optional string: "essentials" or "overview" or "detailed"', topic: 'Optional string topic name' } } }; /** * Apply n8n-friendly descriptions to tools * This function modifies tool descriptions to be more explicit for n8n's AI agent */ export function makeToolsN8nFriendly(tools: any[]): any[] { return tools.map(tool => { const toolName = tool.name as string; const friendlyDesc = n8nFriendlyDescriptions[toolName]; if (friendlyDesc) { // Clone the tool to avoid mutating the original const updatedTool = { ...tool }; // Update the main description updatedTool.description = friendlyDesc.description; // Clone inputSchema if it exists if (tool.inputSchema?.properties) { updatedTool.inputSchema = { ...tool.inputSchema, properties: { ...tool.inputSchema.properties } }; // Update parameter descriptions Object.keys(updatedTool.inputSchema.properties).forEach(param => { if (friendlyDesc.params[param]) { updatedTool.inputSchema.properties[param] = { ...updatedTool.inputSchema.properties[param], description: friendlyDesc.params[param] }; } }); } return updatedTool; } return tool; }); } ```