This is page 10 of 59. Use http://codebase.md/czlonkowski/n8n-mcp?lines=true&page={x} to view the full context. # Directory Structure ``` ├── _config.yml ├── .claude │ └── agents │ ├── code-reviewer.md │ ├── context-manager.md │ ├── debugger.md │ ├── deployment-engineer.md │ ├── mcp-backend-engineer.md │ ├── n8n-mcp-tester.md │ ├── technical-researcher.md │ └── test-automator.md ├── .dockerignore ├── .env.docker ├── .env.example ├── .env.n8n.example ├── .env.test ├── .env.test.example ├── .github │ ├── ABOUT.md │ ├── BENCHMARK_THRESHOLDS.md │ ├── FUNDING.yml │ ├── gh-pages.yml │ ├── secret_scanning.yml │ └── workflows │ ├── benchmark-pr.yml │ ├── benchmark.yml │ ├── docker-build-fast.yml │ ├── docker-build-n8n.yml │ ├── docker-build.yml │ ├── release.yml │ ├── test.yml │ └── update-n8n-deps.yml ├── .gitignore ├── .npmignore ├── ATTRIBUTION.md ├── CHANGELOG.md ├── CLAUDE.md ├── codecov.yml ├── coverage.json ├── data │ ├── .gitkeep │ ├── nodes.db │ ├── nodes.db-shm │ ├── nodes.db-wal │ └── templates.db ├── deploy │ └── quick-deploy-n8n.sh ├── docker │ ├── docker-entrypoint.sh │ ├── n8n-mcp │ ├── parse-config.js │ └── README.md ├── docker-compose.buildkit.yml ├── docker-compose.extract.yml ├── docker-compose.n8n.yml ├── docker-compose.override.yml.example ├── docker-compose.test-n8n.yml ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.railway ├── Dockerfile.test ├── docs │ ├── AUTOMATED_RELEASES.md │ ├── BENCHMARKS.md │ ├── CHANGELOG.md │ ├── CLAUDE_CODE_SETUP.md │ ├── CLAUDE_INTERVIEW.md │ ├── CODECOV_SETUP.md │ ├── CODEX_SETUP.md │ ├── CURSOR_SETUP.md │ ├── DEPENDENCY_UPDATES.md │ ├── DOCKER_README.md │ ├── DOCKER_TROUBLESHOOTING.md │ ├── FINAL_AI_VALIDATION_SPEC.md │ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md │ ├── HTTP_DEPLOYMENT.md │ ├── img │ │ ├── cc_command.png │ │ ├── cc_connected.png │ │ ├── codex_connected.png │ │ ├── cursor_tut.png │ │ ├── Railway_api.png │ │ ├── Railway_server_address.png │ │ ├── vsc_ghcp_chat_agent_mode.png │ │ ├── vsc_ghcp_chat_instruction_files.png │ │ ├── vsc_ghcp_chat_thinking_tool.png │ │ └── windsurf_tut.png │ ├── INSTALLATION.md │ ├── LIBRARY_USAGE.md │ ├── local │ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md │ │ ├── DEEP_DIVE_ANALYSIS_README.md │ │ ├── Deep_dive_p1_p2.md │ │ ├── integration-testing-plan.md │ │ ├── integration-tests-phase1-summary.md │ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md │ │ ├── P0_IMPLEMENTATION_PLAN.md │ │ └── TEMPLATE_MINING_ANALYSIS.md │ ├── MCP_ESSENTIALS_README.md │ ├── MCP_QUICK_START_GUIDE.md │ ├── N8N_DEPLOYMENT.md │ ├── RAILWAY_DEPLOYMENT.md │ ├── README_CLAUDE_SETUP.md │ ├── README.md │ ├── tools-documentation-usage.md │ ├── VS_CODE_PROJECT_SETUP.md │ ├── WINDSURF_SETUP.md │ └── workflow-diff-examples.md ├── examples │ └── enhanced-documentation-demo.js ├── fetch_log.txt ├── LICENSE ├── MEMORY_N8N_UPDATE.md ├── MEMORY_TEMPLATE_UPDATE.md ├── monitor_fetch.sh ├── N8N_HTTP_STREAMABLE_SETUP.md ├── n8n-nodes.db ├── P0-R3-TEST-PLAN.md ├── package-lock.json ├── package.json ├── package.runtime.json ├── PRIVACY.md ├── railway.json ├── README.md ├── renovate.json ├── scripts │ ├── analyze-optimization.sh │ ├── audit-schema-coverage.ts │ ├── build-optimized.sh │ ├── compare-benchmarks.js │ ├── demo-optimization.sh │ ├── deploy-http.sh │ ├── deploy-to-vm.sh │ ├── export-webhook-workflows.ts │ ├── extract-changelog.js │ ├── extract-from-docker.js │ ├── extract-nodes-docker.sh │ ├── extract-nodes-simple.sh │ ├── format-benchmark-results.js │ ├── generate-benchmark-stub.js │ ├── generate-detailed-reports.js │ ├── generate-test-summary.js │ ├── http-bridge.js │ ├── mcp-http-client.js │ ├── migrate-nodes-fts.ts │ ├── migrate-tool-docs.ts │ ├── n8n-docs-mcp.service │ ├── nginx-n8n-mcp.conf │ ├── prebuild-fts5.ts │ ├── prepare-release.js │ ├── publish-npm-quick.sh │ ├── publish-npm.sh │ ├── quick-test.ts │ ├── run-benchmarks-ci.js │ ├── sync-runtime-version.js │ ├── test-ai-validation-debug.ts │ ├── test-code-node-enhancements.ts │ ├── test-code-node-fixes.ts │ ├── test-docker-config.sh │ ├── test-docker-fingerprint.ts │ ├── test-docker-optimization.sh │ ├── test-docker.sh │ ├── test-empty-connection-validation.ts │ ├── test-error-message-tracking.ts │ ├── test-error-output-validation.ts │ ├── test-error-validation.js │ ├── test-essentials.ts │ ├── test-expression-code-validation.ts │ ├── test-expression-format-validation.js │ ├── test-fts5-search.ts │ ├── test-fuzzy-fix.ts │ ├── test-fuzzy-simple.ts │ ├── test-helpers-validation.ts │ ├── test-http-search.ts │ ├── test-http.sh │ ├── test-jmespath-validation.ts │ ├── test-multi-tenant-simple.ts │ ├── test-multi-tenant.ts │ ├── test-n8n-integration.sh │ ├── test-node-info.js │ ├── test-node-type-validation.ts │ ├── test-nodes-base-prefix.ts │ ├── test-operation-validation.ts │ ├── test-optimized-docker.sh │ ├── test-release-automation.js │ ├── test-search-improvements.ts │ ├── test-security.ts │ ├── test-single-session.sh │ ├── test-sqljs-triggers.ts │ ├── test-telemetry-debug.ts │ ├── test-telemetry-direct.ts │ ├── test-telemetry-env.ts │ ├── test-telemetry-integration.ts │ ├── test-telemetry-no-select.ts │ ├── test-telemetry-security.ts │ ├── test-telemetry-simple.ts │ ├── test-typeversion-validation.ts │ ├── test-url-configuration.ts │ ├── test-user-id-persistence.ts │ ├── test-webhook-validation.ts │ ├── test-workflow-insert.ts │ ├── test-workflow-sanitizer.ts │ ├── test-workflow-tracking-debug.ts │ ├── update-and-publish-prep.sh │ ├── update-n8n-deps.js │ ├── update-readme-version.js │ ├── vitest-benchmark-json-reporter.js │ └── vitest-benchmark-reporter.ts ├── SECURITY.md ├── src │ ├── config │ │ └── n8n-api.ts │ ├── data │ │ └── canonical-ai-tool-examples.json │ ├── database │ │ ├── database-adapter.ts │ │ ├── migrations │ │ │ └── add-template-node-configs.sql │ │ ├── node-repository.ts │ │ ├── nodes.db │ │ ├── schema-optimized.sql │ │ └── schema.sql │ ├── errors │ │ └── validation-service-error.ts │ ├── http-server-single-session.ts │ ├── http-server.ts │ ├── index.ts │ ├── loaders │ │ └── node-loader.ts │ ├── mappers │ │ └── docs-mapper.ts │ ├── mcp │ │ ├── handlers-n8n-manager.ts │ │ ├── handlers-workflow-diff.ts │ │ ├── index.ts │ │ ├── server.ts │ │ ├── stdio-wrapper.ts │ │ ├── tool-docs │ │ │ ├── configuration │ │ │ │ ├── get-node-as-tool-info.ts │ │ │ │ ├── get-node-documentation.ts │ │ │ │ ├── get-node-essentials.ts │ │ │ │ ├── get-node-info.ts │ │ │ │ ├── get-property-dependencies.ts │ │ │ │ ├── index.ts │ │ │ │ └── search-node-properties.ts │ │ │ ├── discovery │ │ │ │ ├── get-database-statistics.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-ai-tools.ts │ │ │ │ ├── list-nodes.ts │ │ │ │ └── search-nodes.ts │ │ │ ├── guides │ │ │ │ ├── ai-agents-guide.ts │ │ │ │ └── index.ts │ │ │ ├── index.ts │ │ │ ├── system │ │ │ │ ├── index.ts │ │ │ │ ├── n8n-diagnostic.ts │ │ │ │ ├── n8n-health-check.ts │ │ │ │ ├── n8n-list-available-tools.ts │ │ │ │ └── tools-documentation.ts │ │ │ ├── templates │ │ │ │ ├── get-template.ts │ │ │ │ ├── get-templates-for-task.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-node-templates.ts │ │ │ │ ├── list-tasks.ts │ │ │ │ ├── search-templates-by-metadata.ts │ │ │ │ └── search-templates.ts │ │ │ ├── types.ts │ │ │ ├── validation │ │ │ │ ├── index.ts │ │ │ │ ├── validate-node-minimal.ts │ │ │ │ ├── validate-node-operation.ts │ │ │ │ ├── validate-workflow-connections.ts │ │ │ │ ├── validate-workflow-expressions.ts │ │ │ │ └── validate-workflow.ts │ │ │ └── workflow_management │ │ │ ├── index.ts │ │ │ ├── n8n-autofix-workflow.ts │ │ │ ├── n8n-create-workflow.ts │ │ │ ├── n8n-delete-execution.ts │ │ │ ├── n8n-delete-workflow.ts │ │ │ ├── n8n-get-execution.ts │ │ │ ├── n8n-get-workflow-details.ts │ │ │ ├── n8n-get-workflow-minimal.ts │ │ │ ├── n8n-get-workflow-structure.ts │ │ │ ├── n8n-get-workflow.ts │ │ │ ├── n8n-list-executions.ts │ │ │ ├── n8n-list-workflows.ts │ │ │ ├── n8n-trigger-webhook-workflow.ts │ │ │ ├── n8n-update-full-workflow.ts │ │ │ ├── n8n-update-partial-workflow.ts │ │ │ └── n8n-validate-workflow.ts │ │ ├── tools-documentation.ts │ │ ├── tools-n8n-friendly.ts │ │ ├── tools-n8n-manager.ts │ │ ├── tools.ts │ │ └── workflow-examples.ts │ ├── mcp-engine.ts │ ├── mcp-tools-engine.ts │ ├── n8n │ │ ├── MCPApi.credentials.ts │ │ └── MCPNode.node.ts │ ├── parsers │ │ ├── node-parser.ts │ │ ├── property-extractor.ts │ │ └── simple-parser.ts │ ├── scripts │ │ ├── debug-http-search.ts │ │ ├── extract-from-docker.ts │ │ ├── fetch-templates-robust.ts │ │ ├── fetch-templates.ts │ │ ├── rebuild-database.ts │ │ ├── rebuild-optimized.ts │ │ ├── rebuild.ts │ │ ├── sanitize-templates.ts │ │ ├── seed-canonical-ai-examples.ts │ │ ├── test-autofix-documentation.ts │ │ ├── test-autofix-workflow.ts │ │ ├── test-execution-filtering.ts │ │ ├── test-node-suggestions.ts │ │ ├── test-protocol-negotiation.ts │ │ ├── test-summary.ts │ │ ├── test-webhook-autofix.ts │ │ ├── validate.ts │ │ └── validation-summary.ts │ ├── services │ │ ├── ai-node-validator.ts │ │ ├── ai-tool-validators.ts │ │ ├── confidence-scorer.ts │ │ ├── config-validator.ts │ │ ├── enhanced-config-validator.ts │ │ ├── example-generator.ts │ │ ├── execution-processor.ts │ │ ├── expression-format-validator.ts │ │ ├── expression-validator.ts │ │ ├── n8n-api-client.ts │ │ ├── n8n-validation.ts │ │ ├── node-documentation-service.ts │ │ ├── node-similarity-service.ts │ │ ├── node-specific-validators.ts │ │ ├── operation-similarity-service.ts │ │ ├── property-dependencies.ts │ │ ├── property-filter.ts │ │ ├── resource-similarity-service.ts │ │ ├── sqlite-storage-service.ts │ │ ├── task-templates.ts │ │ ├── universal-expression-validator.ts │ │ ├── workflow-auto-fixer.ts │ │ ├── workflow-diff-engine.ts │ │ └── workflow-validator.ts │ ├── telemetry │ │ ├── batch-processor.ts │ │ ├── config-manager.ts │ │ ├── early-error-logger.ts │ │ ├── error-sanitization-utils.ts │ │ ├── error-sanitizer.ts │ │ ├── event-tracker.ts │ │ ├── event-validator.ts │ │ ├── index.ts │ │ ├── performance-monitor.ts │ │ ├── rate-limiter.ts │ │ ├── startup-checkpoints.ts │ │ ├── telemetry-error.ts │ │ ├── telemetry-manager.ts │ │ ├── telemetry-types.ts │ │ └── workflow-sanitizer.ts │ ├── templates │ │ ├── batch-processor.ts │ │ ├── metadata-generator.ts │ │ ├── README.md │ │ ├── template-fetcher.ts │ │ ├── template-repository.ts │ │ └── template-service.ts │ ├── types │ │ ├── index.ts │ │ ├── instance-context.ts │ │ ├── n8n-api.ts │ │ ├── node-types.ts │ │ └── workflow-diff.ts │ └── utils │ ├── auth.ts │ ├── bridge.ts │ ├── cache-utils.ts │ ├── console-manager.ts │ ├── documentation-fetcher.ts │ ├── enhanced-documentation-fetcher.ts │ ├── error-handler.ts │ ├── example-generator.ts │ ├── fixed-collection-validator.ts │ ├── logger.ts │ ├── mcp-client.ts │ ├── n8n-errors.ts │ ├── node-source-extractor.ts │ ├── node-type-normalizer.ts │ ├── node-type-utils.ts │ ├── node-utils.ts │ ├── npm-version-checker.ts │ ├── protocol-version.ts │ ├── simple-cache.ts │ ├── ssrf-protection.ts │ ├── template-node-resolver.ts │ ├── template-sanitizer.ts │ ├── url-detector.ts │ ├── validation-schemas.ts │ └── version.ts ├── test-output.txt ├── test-reinit-fix.sh ├── tests │ ├── __snapshots__ │ │ └── .gitkeep │ ├── auth.test.ts │ ├── benchmarks │ │ ├── database-queries.bench.ts │ │ ├── index.ts │ │ ├── mcp-tools.bench.ts │ │ ├── mcp-tools.bench.ts.disabled │ │ ├── mcp-tools.bench.ts.skip │ │ ├── node-loading.bench.ts.disabled │ │ ├── README.md │ │ ├── search-operations.bench.ts.disabled │ │ └── validation-performance.bench.ts.disabled │ ├── bridge.test.ts │ ├── comprehensive-extraction-test.js │ ├── data │ │ └── .gitkeep │ ├── debug-slack-doc.js │ ├── demo-enhanced-documentation.js │ ├── docker-tests-README.md │ ├── error-handler.test.ts │ ├── examples │ │ └── using-database-utils.test.ts │ ├── extracted-nodes-db │ │ ├── database-import.json │ │ ├── extraction-report.json │ │ ├── insert-nodes.sql │ │ ├── n8n-nodes-base__Airtable.json │ │ ├── n8n-nodes-base__Discord.json │ │ ├── n8n-nodes-base__Function.json │ │ ├── n8n-nodes-base__HttpRequest.json │ │ ├── n8n-nodes-base__If.json │ │ ├── n8n-nodes-base__Slack.json │ │ ├── n8n-nodes-base__SplitInBatches.json │ │ └── n8n-nodes-base__Webhook.json │ ├── factories │ │ ├── node-factory.ts │ │ └── property-definition-factory.ts │ ├── fixtures │ │ ├── .gitkeep │ │ ├── database │ │ │ └── test-nodes.json │ │ ├── factories │ │ │ ├── node.factory.ts │ │ │ └── parser-node.factory.ts │ │ └── template-configs.ts │ ├── helpers │ │ └── env-helpers.ts │ ├── http-server-auth.test.ts │ ├── integration │ │ ├── ai-validation │ │ │ ├── ai-agent-validation.test.ts │ │ │ ├── ai-tool-validation.test.ts │ │ │ ├── chat-trigger-validation.test.ts │ │ │ ├── e2e-validation.test.ts │ │ │ ├── helpers.ts │ │ │ ├── llm-chain-validation.test.ts │ │ │ ├── README.md │ │ │ └── TEST_REPORT.md │ │ ├── ci │ │ │ └── database-population.test.ts │ │ ├── database │ │ │ ├── connection-management.test.ts │ │ │ ├── empty-database.test.ts │ │ │ ├── fts5-search.test.ts │ │ │ ├── node-fts5-search.test.ts │ │ │ ├── node-repository.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── template-node-configs.test.ts │ │ │ ├── template-repository.test.ts │ │ │ ├── test-utils.ts │ │ │ └── transactions.test.ts │ │ ├── database-integration.test.ts │ │ ├── docker │ │ │ ├── docker-config.test.ts │ │ │ ├── docker-entrypoint.test.ts │ │ │ └── test-helpers.ts │ │ ├── flexible-instance-config.test.ts │ │ ├── mcp │ │ │ └── template-examples-e2e.test.ts │ │ ├── mcp-protocol │ │ │ ├── basic-connection.test.ts │ │ │ ├── error-handling.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── protocol-compliance.test.ts │ │ │ ├── README.md │ │ │ ├── session-management.test.ts │ │ │ ├── test-helpers.ts │ │ │ ├── tool-invocation.test.ts │ │ │ └── workflow-error-validation.test.ts │ │ ├── msw-setup.test.ts │ │ ├── n8n-api │ │ │ ├── executions │ │ │ │ ├── delete-execution.test.ts │ │ │ │ ├── get-execution.test.ts │ │ │ │ ├── list-executions.test.ts │ │ │ │ └── trigger-webhook.test.ts │ │ │ ├── scripts │ │ │ │ └── cleanup-orphans.ts │ │ │ ├── system │ │ │ │ ├── diagnostic.test.ts │ │ │ │ ├── health-check.test.ts │ │ │ │ └── list-tools.test.ts │ │ │ ├── test-connection.ts │ │ │ ├── types │ │ │ │ └── mcp-responses.ts │ │ │ ├── utils │ │ │ │ ├── cleanup-helpers.ts │ │ │ │ ├── credentials.ts │ │ │ │ ├── factories.ts │ │ │ │ ├── fixtures.ts │ │ │ │ ├── mcp-context.ts │ │ │ │ ├── n8n-client.ts │ │ │ │ ├── node-repository.ts │ │ │ │ ├── response-types.ts │ │ │ │ ├── test-context.ts │ │ │ │ └── webhook-workflows.ts │ │ │ └── workflows │ │ │ ├── autofix-workflow.test.ts │ │ │ ├── create-workflow.test.ts │ │ │ ├── delete-workflow.test.ts │ │ │ ├── get-workflow-details.test.ts │ │ │ ├── get-workflow-minimal.test.ts │ │ │ ├── get-workflow-structure.test.ts │ │ │ ├── get-workflow.test.ts │ │ │ ├── list-workflows.test.ts │ │ │ ├── smart-parameters.test.ts │ │ │ ├── update-partial-workflow.test.ts │ │ │ ├── update-workflow.test.ts │ │ │ └── validate-workflow.test.ts │ │ ├── security │ │ │ ├── command-injection-prevention.test.ts │ │ │ └── rate-limiting.test.ts │ │ ├── setup │ │ │ ├── integration-setup.ts │ │ │ └── msw-test-server.ts │ │ ├── telemetry │ │ │ ├── docker-user-id-stability.test.ts │ │ │ └── mcp-telemetry.test.ts │ │ ├── templates │ │ │ └── metadata-operations.test.ts │ │ └── workflow-creation-node-type-format.test.ts │ ├── logger.test.ts │ ├── MOCKING_STRATEGY.md │ ├── mocks │ │ ├── n8n-api │ │ │ ├── data │ │ │ │ ├── credentials.ts │ │ │ │ ├── executions.ts │ │ │ │ └── workflows.ts │ │ │ ├── handlers.ts │ │ │ └── index.ts │ │ └── README.md │ ├── node-storage-export.json │ ├── setup │ │ ├── global-setup.ts │ │ ├── msw-setup.ts │ │ ├── TEST_ENV_DOCUMENTATION.md │ │ └── test-env.ts │ ├── test-database-extraction.js │ ├── test-direct-extraction.js │ ├── test-enhanced-documentation.js │ ├── test-enhanced-integration.js │ ├── test-mcp-extraction.js │ ├── test-mcp-server-extraction.js │ ├── test-mcp-tools-integration.js │ ├── test-node-documentation-service.js │ ├── test-node-list.js │ ├── test-package-info.js │ ├── test-parsing-operations.js │ ├── test-slack-node-complete.js │ ├── test-small-rebuild.js │ ├── test-sqlite-search.js │ ├── test-storage-system.js │ ├── unit │ │ ├── __mocks__ │ │ │ ├── n8n-nodes-base.test.ts │ │ │ ├── n8n-nodes-base.ts │ │ │ └── README.md │ │ ├── database │ │ │ ├── __mocks__ │ │ │ │ └── better-sqlite3.ts │ │ │ ├── database-adapter-unit.test.ts │ │ │ ├── node-repository-core.test.ts │ │ │ ├── node-repository-operations.test.ts │ │ │ ├── node-repository-outputs.test.ts │ │ │ ├── README.md │ │ │ └── template-repository-core.test.ts │ │ ├── docker │ │ │ ├── config-security.test.ts │ │ │ ├── edge-cases.test.ts │ │ │ ├── parse-config.test.ts │ │ │ └── serve-command.test.ts │ │ ├── errors │ │ │ └── validation-service-error.test.ts │ │ ├── examples │ │ │ └── using-n8n-nodes-base-mock.test.ts │ │ ├── flexible-instance-security-advanced.test.ts │ │ ├── flexible-instance-security.test.ts │ │ ├── http-server │ │ │ └── multi-tenant-support.test.ts │ │ ├── http-server-n8n-mode.test.ts │ │ ├── http-server-n8n-reinit.test.ts │ │ ├── http-server-session-management.test.ts │ │ ├── loaders │ │ │ └── node-loader.test.ts │ │ ├── mappers │ │ │ └── docs-mapper.test.ts │ │ ├── mcp │ │ │ ├── get-node-essentials-examples.test.ts │ │ │ ├── handlers-n8n-manager-simple.test.ts │ │ │ ├── handlers-n8n-manager.test.ts │ │ │ ├── handlers-workflow-diff.test.ts │ │ │ ├── lru-cache-behavior.test.ts │ │ │ ├── multi-tenant-tool-listing.test.ts.disabled │ │ │ ├── parameter-validation.test.ts │ │ │ ├── search-nodes-examples.test.ts │ │ │ ├── tools-documentation.test.ts │ │ │ └── tools.test.ts │ │ ├── monitoring │ │ │ └── cache-metrics.test.ts │ │ ├── MULTI_TENANT_TEST_COVERAGE.md │ │ ├── multi-tenant-integration.test.ts │ │ ├── parsers │ │ │ ├── node-parser-outputs.test.ts │ │ │ ├── node-parser.test.ts │ │ │ ├── property-extractor.test.ts │ │ │ └── simple-parser.test.ts │ │ ├── scripts │ │ │ └── fetch-templates-extraction.test.ts │ │ ├── services │ │ │ ├── ai-node-validator.test.ts │ │ │ ├── ai-tool-validators.test.ts │ │ │ ├── confidence-scorer.test.ts │ │ │ ├── config-validator-basic.test.ts │ │ │ ├── config-validator-edge-cases.test.ts │ │ │ ├── config-validator-node-specific.test.ts │ │ │ ├── config-validator-security.test.ts │ │ │ ├── debug-validator.test.ts │ │ │ ├── enhanced-config-validator-integration.test.ts │ │ │ ├── enhanced-config-validator-operations.test.ts │ │ │ ├── enhanced-config-validator.test.ts │ │ │ ├── example-generator.test.ts │ │ │ ├── execution-processor.test.ts │ │ │ ├── expression-format-validator.test.ts │ │ │ ├── expression-validator-edge-cases.test.ts │ │ │ ├── expression-validator.test.ts │ │ │ ├── fixed-collection-validation.test.ts │ │ │ ├── loop-output-edge-cases.test.ts │ │ │ ├── n8n-api-client.test.ts │ │ │ ├── n8n-validation.test.ts │ │ │ ├── node-similarity-service.test.ts │ │ │ ├── node-specific-validators.test.ts │ │ │ ├── operation-similarity-service-comprehensive.test.ts │ │ │ ├── operation-similarity-service.test.ts │ │ │ ├── property-dependencies.test.ts │ │ │ ├── property-filter-edge-cases.test.ts │ │ │ ├── property-filter.test.ts │ │ │ ├── resource-similarity-service-comprehensive.test.ts │ │ │ ├── resource-similarity-service.test.ts │ │ │ ├── task-templates.test.ts │ │ │ ├── template-service.test.ts │ │ │ ├── universal-expression-validator.test.ts │ │ │ ├── validation-fixes.test.ts │ │ │ ├── workflow-auto-fixer.test.ts │ │ │ ├── workflow-diff-engine.test.ts │ │ │ ├── workflow-fixed-collection-validation.test.ts │ │ │ ├── workflow-validator-comprehensive.test.ts │ │ │ ├── workflow-validator-edge-cases.test.ts │ │ │ ├── workflow-validator-error-outputs.test.ts │ │ │ ├── workflow-validator-expression-format.test.ts │ │ │ ├── workflow-validator-loops-simple.test.ts │ │ │ ├── workflow-validator-loops.test.ts │ │ │ ├── workflow-validator-mocks.test.ts │ │ │ ├── workflow-validator-performance.test.ts │ │ │ ├── workflow-validator-with-mocks.test.ts │ │ │ └── workflow-validator.test.ts │ │ ├── telemetry │ │ │ ├── batch-processor.test.ts │ │ │ ├── config-manager.test.ts │ │ │ ├── event-tracker.test.ts │ │ │ ├── event-validator.test.ts │ │ │ ├── rate-limiter.test.ts │ │ │ ├── telemetry-error.test.ts │ │ │ ├── telemetry-manager.test.ts │ │ │ ├── v2.18.3-fixes-verification.test.ts │ │ │ └── workflow-sanitizer.test.ts │ │ ├── templates │ │ │ ├── batch-processor.test.ts │ │ │ ├── metadata-generator.test.ts │ │ │ ├── template-repository-metadata.test.ts │ │ │ └── template-repository-security.test.ts │ │ ├── test-env-example.test.ts │ │ ├── test-infrastructure.test.ts │ │ ├── types │ │ │ ├── instance-context-coverage.test.ts │ │ │ └── instance-context-multi-tenant.test.ts │ │ ├── utils │ │ │ ├── auth-timing-safe.test.ts │ │ │ ├── cache-utils.test.ts │ │ │ ├── console-manager.test.ts │ │ │ ├── database-utils.test.ts │ │ │ ├── fixed-collection-validator.test.ts │ │ │ ├── n8n-errors.test.ts │ │ │ ├── node-type-normalizer.test.ts │ │ │ ├── node-type-utils.test.ts │ │ │ ├── node-utils.test.ts │ │ │ ├── simple-cache-memory-leak-fix.test.ts │ │ │ ├── ssrf-protection.test.ts │ │ │ └── template-node-resolver.test.ts │ │ └── validation-fixes.test.ts │ └── utils │ ├── assertions.ts │ ├── builders │ │ └── workflow.builder.ts │ ├── data-generators.ts │ ├── database-utils.ts │ ├── README.md │ └── test-helpers.ts ├── thumbnail.png ├── tsconfig.build.json ├── tsconfig.json ├── types │ ├── mcp.d.ts │ └── test-env.d.ts ├── verify-telemetry-fix.js ├── versioned-nodes.md ├── vitest.config.benchmark.ts ├── vitest.config.integration.ts └── vitest.config.ts ``` # Files -------------------------------------------------------------------------------- /docs/README_CLAUDE_SETUP.md: -------------------------------------------------------------------------------- ```markdown 1 | # Claude Desktop Configuration for n8n-MCP 2 | 3 | This guide helps you connect n8n-MCP to Claude Desktop, giving Claude comprehensive knowledge about n8n's 525 workflow automation nodes, including 263 AI-capable tools. 4 | 5 | ## 🎯 Prerequisites 6 | 7 | - Claude Desktop installed 8 | - For local installation: Node.js (any version) 9 | - For Docker: Docker installed (see installation instructions in main README) 10 | 11 | ## 🛠️ Configuration Methods 12 | 13 | ### Method 1: Local Installation (Recommended) 💻 14 | 15 | 1. **Install and build:** 16 | ```bash 17 | git clone https://github.com/czlonkowski/n8n-mcp.git 18 | cd n8n-mcp 19 | npm install 20 | npm run build 21 | npm run rebuild 22 | ``` 23 | 24 | 2. **Configure Claude Desktop:** 25 | ```json 26 | { 27 | "mcpServers": { 28 | "n8n-mcp": { 29 | "command": "node", 30 | "args": ["/absolute/path/to/n8n-mcp/dist/mcp/index.js"], 31 | "env": { 32 | "NODE_ENV": "production", 33 | "LOG_LEVEL": "error", 34 | "MCP_MODE": "stdio", 35 | "DISABLE_CONSOLE_OUTPUT": "true" 36 | } 37 | } 38 | } 39 | } 40 | ``` 41 | 42 | ⚠️ **Important**: 43 | - Use absolute paths, not relative paths 44 | - The environment variables shown above are critical for proper stdio communication 45 | 46 | ### Method 2: Docker 🐳 47 | 48 | No installation needed - runs directly from Docker: 49 | 50 | ```json 51 | { 52 | "mcpServers": { 53 | "n8n-mcp": { 54 | "command": "docker", 55 | "args": [ 56 | "run", "-i", "--rm", 57 | "-e", "MCP_MODE=stdio", 58 | "-e", "LOG_LEVEL=error", 59 | "-e", "DISABLE_CONSOLE_OUTPUT=true", 60 | "ghcr.io/czlonkowski/n8n-mcp:latest" 61 | ] 62 | } 63 | } 64 | } 65 | ``` 66 | 67 | ✨ **Benefits**: No setup required, always up-to-date, isolated environment. 68 | 69 | ### Method 3: Remote Server Connection (Advanced) 70 | 71 | ⚠️ **Note**: Remote connections are complex and may have compatibility issues. Consider using local installation instead. 72 | 73 | For production deployments with multiple users: 74 | 75 | 1. **Deploy server with HTTP mode** (see [HTTP Deployment Guide](./HTTP_DEPLOYMENT.md)) 76 | 77 | 2. **Connect using custom HTTP client:** 78 | ```json 79 | { 80 | "mcpServers": { 81 | "n8n-remote": { 82 | "command": "node", 83 | "args": [ 84 | "/path/to/n8n-mcp/scripts/mcp-http-client.js", 85 | "http://your-server.com:3000/mcp" 86 | ], 87 | "env": { 88 | "MCP_AUTH_TOKEN": "your-auth-token" 89 | } 90 | } 91 | } 92 | } 93 | ``` 94 | 95 | 📝 **Note**: Native remote MCP support is available in Claude Pro/Team/Enterprise via Settings > Integrations. 96 | 97 | ## 📁 Configuration File Locations 98 | 99 | Find your `claude_desktop_config.json` file: 100 | 101 | - **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json` 102 | - **Windows**: `%APPDATA%\Claude\claude_desktop_config.json` 103 | - **Linux**: `~/.config/Claude/claude_desktop_config.json` 104 | 105 | 🔄 **Important**: After editing, restart Claude Desktop (Cmd/Ctrl+R or quit and reopen). 106 | 107 | ## ✅ Verify Installation 108 | 109 | After restarting Claude Desktop: 110 | 111 | 1. Look for "n8n-docker" or "n8n-documentation" in the MCP servers list 112 | 2. Try asking Claude: "What n8n nodes are available for working with Slack?" 113 | 3. Or use a tool directly: "Use the list_nodes tool to show me trigger nodes" 114 | 115 | ## 🔧 Available Tools (v2.5.1) 116 | 117 | ### Essential Tool - Start Here! 118 | - **`tools_documentation`** - Get documentation for any MCP tool (ALWAYS use this first!) 119 | 120 | ### Core Tools 121 | - **`list_nodes`** - List all n8n nodes with filtering options 122 | - **`get_node_info`** - Get comprehensive information (now includes aiToolCapabilities) 123 | - **`get_node_essentials`** - Get only 10-20 essential properties (95% smaller!) 124 | - **`search_nodes`** - Full-text search across all node documentation 125 | - **`search_node_properties`** - Find specific properties within nodes 126 | - **`get_node_documentation`** - Get parsed documentation from n8n-docs 127 | - **`get_database_statistics`** - View database metrics and coverage 128 | 129 | ### AI Tools (Enhanced in v2.5.1) 130 | - **`list_ai_tools`** - List AI-capable nodes (ANY node can be used as AI tool!) 131 | - **`get_node_as_tool_info`** - Get guidance on using any node as an AI tool 132 | 133 | ### Task & Template Tools 134 | - **`get_node_for_task`** - Pre-configured node settings for common tasks 135 | - **`list_tasks`** - Discover available task templates 136 | - **`list_node_templates`** - Find workflow templates using specific nodes 137 | - **`get_template`** - Get complete workflow JSON for import 138 | - **`search_templates`** - Search templates by keywords 139 | - **`get_templates_for_task`** - Get curated templates for common tasks 140 | 141 | ### Validation Tools (Professional Grade) 142 | - **`validate_node_operation`** - Smart validation with operation awareness 143 | - **`validate_node_minimal`** - Quick validation for just required fields 144 | - **`validate_workflow`** - Complete workflow validation (validates AI tool connections) 145 | - **`validate_workflow_connections`** - Check workflow structure 146 | - **`validate_workflow_expressions`** - Validate n8n expressions including $fromAI() 147 | - **`get_property_dependencies`** - Analyze property visibility conditions 148 | 149 | ### Example Questions to Ask Claude: 150 | - "Show me all n8n nodes for working with databases" 151 | - "How do I use the HTTP Request node?" 152 | - "Get the essentials for Slack node" (uses get_node_essentials) 153 | - "How can I use Google Sheets as an AI tool?" 154 | - "Validate my workflow before deployment" 155 | - "Find templates for webhook automation" 156 | 157 | ## 🔍 Troubleshooting 158 | 159 | ### Server Not Appearing in Claude 160 | 161 | 1. **Check JSON syntax**: 162 | ```bash 163 | # Validate your config file 164 | cat ~/Library/Application\ Support/Claude/claude_desktop_config.json | jq . 165 | ``` 166 | 167 | 2. **Verify paths are absolute** (not relative) 168 | 169 | 3. **Restart Claude Desktop completely** (quit and reopen) 170 | 171 | ### Remote Connection Issues 172 | 173 | **"TransformStream is not defined" error:** 174 | - Cause: Node.js version < 18 175 | - Fix: Update Node.js to v18 or newer 176 | ```bash 177 | node --version # Should be v18.0.0 or higher 178 | ``` 179 | 180 | **"Server disconnected" error:** 181 | - Check AUTH_TOKEN matches between server and client 182 | - Verify server is running: `curl https://your-server.com/health` 183 | - Check for VPN interference 184 | 185 | ### Docker Issues 186 | 187 | **"Cannot find image" error:** 188 | ```bash 189 | # Pull the latest image 190 | docker pull ghcr.io/czlonkowski/n8n-mcp:latest 191 | ``` 192 | 193 | **Permission denied:** 194 | ```bash 195 | # Ensure Docker is running 196 | docker ps 197 | ``` 198 | 199 | ### Common Issues 200 | 201 | **"Expected ',' or ']' after array element" errors in logs:** 202 | - Cause: Console output interfering with stdio communication 203 | - Fix: Ensure all required environment variables are set: 204 | - `MCP_MODE=stdio` 205 | - `LOG_LEVEL=error` 206 | - `DISABLE_CONSOLE_OUTPUT=true` 207 | 208 | **"NODE_MODULE_VERSION mismatch" warnings:** 209 | - Not a problem! The server automatically falls back to a pure JavaScript implementation 210 | - The warnings are suppressed with proper environment variables 211 | 212 | **Server appears but tools don't work:** 213 | - Check that you've built the project: `npm run build` 214 | - Verify the database exists: `npm run rebuild` 215 | - Restart Claude Desktop completely (quit and reopen) 216 | 217 | ### Quick Fixes 218 | 219 | - 🔄 **Always restart Claude** after config changes 220 | - 📋 **Copy example configs exactly** (watch for typos) 221 | - 📂 **Use absolute paths** (/Users/... not ~/...) 222 | - 🔍 **Check logs**: View > Developer > Logs in Claude Desktop 223 | - 🛑 **Set all environment variables** shown in the examples 224 | 225 | For more help, see [Troubleshooting Guide](./TROUBLESHOOTING.md) ``` -------------------------------------------------------------------------------- /.github/workflows/update-n8n-deps.yml: -------------------------------------------------------------------------------- ```yaml 1 | name: Update n8n Dependencies 2 | 3 | on: 4 | # Run every Monday at 9 AM UTC 5 | schedule: 6 | - cron: '0 9 * * 1' 7 | 8 | # Allow manual trigger 9 | workflow_dispatch: 10 | inputs: 11 | create_pr: 12 | description: 'Create a PR for updates' 13 | required: true 14 | type: boolean 15 | default: true 16 | auto_merge: 17 | description: 'Auto-merge PR if tests pass' 18 | required: true 19 | type: boolean 20 | default: false 21 | 22 | jobs: 23 | check-and-update: 24 | runs-on: ubuntu-latest 25 | 26 | permissions: 27 | contents: write 28 | pull-requests: write 29 | 30 | steps: 31 | - name: Checkout repository 32 | uses: actions/checkout@v4 33 | with: 34 | token: ${{ secrets.GITHUB_TOKEN }} 35 | lfs: true 36 | 37 | - name: Setup Node.js 38 | uses: actions/setup-node@v4 39 | with: 40 | node-version: '20' 41 | cache: 'npm' 42 | 43 | - name: Check for updates (dry run) 44 | id: check 45 | run: | 46 | # Ensure we're in the right directory 47 | cd ${{ github.workspace }} 48 | 49 | # First do a dry run to check if updates are needed 50 | node scripts/update-n8n-deps.js --dry-run > update-check.log 2>&1 || { 51 | echo "❌ Error running update check:" 52 | cat update-check.log 53 | exit 1 54 | } 55 | 56 | # Check if updates are available 57 | if grep -q "update available" update-check.log; then 58 | echo "updates_available=true" >> $GITHUB_OUTPUT 59 | echo "📦 Updates available!" 60 | else 61 | echo "updates_available=false" >> $GITHUB_OUTPUT 62 | echo "✅ All dependencies are up to date" 63 | fi 64 | 65 | # Show the check results 66 | cat update-check.log 67 | 68 | - name: Apply updates 69 | if: steps.check.outputs.updates_available == 'true' 70 | id: update 71 | run: | 72 | # Ensure we're in the right directory 73 | cd ${{ github.workspace }} 74 | 75 | # Run the actual update 76 | node scripts/update-n8n-deps.js || { 77 | echo "❌ Error running update:" 78 | exit 1 79 | } 80 | 81 | # Check if files changed 82 | if git diff --quiet; then 83 | echo "files_changed=false" >> $GITHUB_OUTPUT 84 | else 85 | echo "files_changed=true" >> $GITHUB_OUTPUT 86 | fi 87 | 88 | - name: Create update branch 89 | if: steps.update.outputs.files_changed == 'true' && (github.event_name == 'schedule' || inputs.create_pr) 90 | id: branch 91 | run: | 92 | BRANCH_NAME="update-n8n-deps-$(date +%Y%m%d)" 93 | echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT 94 | 95 | git config user.name "github-actions[bot]" 96 | git config user.email "github-actions[bot]@users.noreply.github.com" 97 | 98 | git checkout -b $BRANCH_NAME 99 | git add package.json package-lock.json 100 | 101 | # Get update summary (file is written by the update script) 102 | UPDATE_SUMMARY=$(cat update-summary.txt 2>/dev/null || echo "Updated n8n dependencies") 103 | 104 | # Create commit message using heredoc 105 | COMMIT_MSG=$(cat <<'COMMIT_EOF' 106 | chore: update n8n dependencies 107 | 108 | ${UPDATE_SUMMARY} 109 | 110 | 🤖 Automated dependency update 111 | COMMIT_EOF 112 | ) 113 | # Replace placeholder with actual summary 114 | COMMIT_MSG="${COMMIT_MSG//\${UPDATE_SUMMARY}/$UPDATE_SUMMARY}" 115 | 116 | git commit -m "$COMMIT_MSG" 117 | git push origin $BRANCH_NAME 118 | 119 | # Save update summary as output for PR 120 | { 121 | echo 'UPDATE_SUMMARY<<EOF' 122 | if [ -f update-summary.txt ]; then 123 | cat update-summary.txt 124 | else 125 | echo "See commit for details" 126 | fi 127 | echo 'EOF' 128 | } >> $GITHUB_OUTPUT 129 | 130 | - name: Create Pull Request 131 | if: steps.branch.outputs.branch_name != '' 132 | uses: peter-evans/create-pull-request@v5 133 | with: 134 | token: ${{ secrets.GITHUB_TOKEN }} 135 | branch: ${{ steps.branch.outputs.branch_name }} 136 | title: 'chore: Update n8n dependencies' 137 | body: | 138 | ## 🔄 Automated n8n Dependency Update 139 | 140 | This PR updates n8n dependencies to their latest versions. 141 | 142 | ### 📦 Updates 143 | ``` 144 | ${{ steps.update.outputs.UPDATE_SUMMARY }} 145 | ``` 146 | 147 | ### ✅ Validation 148 | - [x] Dependencies updated 149 | - [x] Lock file updated 150 | - [x] Database rebuilt successfully 151 | - [x] All tests passed 152 | 153 | ### 🔍 Review Checklist 154 | - [ ] Review the [n8n release notes](https://docs.n8n.io/release-notes/) 155 | - [ ] Check for breaking changes 156 | - [ ] Test core functionality 157 | 158 | --- 159 | *This PR was automatically created by the n8n dependency update workflow.* 160 | labels: | 161 | dependencies 162 | automated 163 | assignees: ${{ github.repository_owner }} 164 | 165 | - name: Auto-merge PR (if enabled) 166 | if: steps.branch.outputs.branch_name != '' && inputs.auto_merge 167 | env: 168 | GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} 169 | run: | 170 | # Wait for PR to be created 171 | sleep 10 172 | 173 | # Find the PR 174 | PR_NUMBER=$(gh pr list --head ${{ steps.branch.outputs.branch_name }} --json number -q '.[0].number') 175 | 176 | if [ -n "$PR_NUMBER" ]; then 177 | echo "Auto-merging PR #$PR_NUMBER..." 178 | gh pr merge $PR_NUMBER --merge --auto 179 | fi 180 | 181 | # Direct commit option (for manual trigger) 182 | direct-update: 183 | if: github.event_name == 'workflow_dispatch' && !inputs.create_pr 184 | runs-on: ubuntu-latest 185 | 186 | permissions: 187 | contents: write 188 | 189 | steps: 190 | - name: Checkout repository 191 | uses: actions/checkout@v4 192 | with: 193 | token: ${{ secrets.GITHUB_TOKEN }} 194 | lfs: true 195 | 196 | - name: Setup Node.js 197 | uses: actions/setup-node@v4 198 | with: 199 | node-version: '20' 200 | cache: 'npm' 201 | 202 | - name: Update dependencies 203 | run: | 204 | node scripts/update-n8n-deps.js 205 | 206 | # Check if files changed 207 | if ! git diff --quiet; then 208 | git config user.name "github-actions[bot]" 209 | git config user.email "github-actions[bot]@users.noreply.github.com" 210 | 211 | git add package.json package-lock.json 212 | 213 | # Get update summary 214 | UPDATE_SUMMARY=$(cat update-summary.txt || echo "Updated n8n dependencies") 215 | 216 | # Create commit message using heredoc 217 | COMMIT_MSG=$(cat <<'COMMIT_EOF' 218 | chore: update n8n dependencies 219 | 220 | ${UPDATE_SUMMARY} 221 | 222 | 🤖 Automated dependency update 223 | COMMIT_EOF 224 | ) 225 | # Replace placeholder with actual summary 226 | COMMIT_MSG="${COMMIT_MSG//\${UPDATE_SUMMARY}/$UPDATE_SUMMARY}" 227 | 228 | git commit -m "$COMMIT_MSG" 229 | 230 | git push 231 | else 232 | echo "No updates needed" 233 | fi ``` -------------------------------------------------------------------------------- /src/utils/node-type-normalizer.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Universal Node Type Normalizer - FOR DATABASE OPERATIONS ONLY 3 | * 4 | * ⚠️ WARNING: Do NOT use before n8n API calls! 5 | * 6 | * This class converts node types to SHORT form (database format). 7 | * The n8n API requires FULL form (n8n-nodes-base.*). 8 | * 9 | * **Use this ONLY when:** 10 | * - Querying the node database 11 | * - Searching for node information 12 | * - Looking up node metadata 13 | * 14 | * **Do NOT use before:** 15 | * - Creating workflows (n8n_create_workflow) 16 | * - Updating workflows (n8n_update_workflow) 17 | * - Any n8n API calls 18 | * 19 | * **IMPORTANT:** The n8n-mcp database stores nodes in SHORT form: 20 | * - n8n-nodes-base → nodes-base 21 | * - @n8n/n8n-nodes-langchain → nodes-langchain 22 | * 23 | * But the n8n API requires FULL form: 24 | * - nodes-base → n8n-nodes-base 25 | * - nodes-langchain → @n8n/n8n-nodes-langchain 26 | * 27 | * @example Database Lookup (CORRECT usage) 28 | * const dbType = NodeTypeNormalizer.normalizeToFullForm('n8n-nodes-base.webhook') 29 | * // → 'nodes-base.webhook' 30 | * const node = await repository.getNode(dbType) 31 | * 32 | * @example API Call (INCORRECT - Do NOT do this!) 33 | * const workflow = { nodes: [{ type: 'n8n-nodes-base.webhook' }] } 34 | * const normalized = NodeTypeNormalizer.normalizeWorkflowNodeTypes(workflow) 35 | * // ❌ WRONG! normalized has SHORT form, API needs FULL form 36 | * await client.createWorkflow(normalized) // FAILS! 37 | * 38 | * @example API Call (CORRECT) 39 | * const workflow = { nodes: [{ type: 'n8n-nodes-base.webhook' }] } 40 | * // ✅ Send as-is to API (FULL form required) 41 | * await client.createWorkflow(workflow) // WORKS! 42 | */ 43 | 44 | export interface NodeTypeNormalizationResult { 45 | original: string; 46 | normalized: string; 47 | wasNormalized: boolean; 48 | package: 'base' | 'langchain' | 'community' | 'unknown'; 49 | } 50 | 51 | export class NodeTypeNormalizer { 52 | /** 53 | * Normalize node type to canonical SHORT form (database format) 54 | * 55 | * This is the PRIMARY method to use throughout the codebase. 56 | * It converts any node type variation to the SHORT form that the database uses. 57 | * 58 | * **NOTE:** Method name says "ToFullForm" for backward compatibility, 59 | * but actually normalizes TO SHORT form to match database storage. 60 | * 61 | * @param type - Node type in any format 62 | * @returns Normalized node type in short form (database format) 63 | * 64 | * @example 65 | * normalizeToFullForm('n8n-nodes-base.webhook') 66 | * // → 'nodes-base.webhook' 67 | * 68 | * @example 69 | * normalizeToFullForm('nodes-base.webhook') 70 | * // → 'nodes-base.webhook' (unchanged) 71 | * 72 | * @example 73 | * normalizeToFullForm('@n8n/n8n-nodes-langchain.agent') 74 | * // → 'nodes-langchain.agent' 75 | */ 76 | static normalizeToFullForm(type: string): string { 77 | if (!type || typeof type !== 'string') { 78 | return type; 79 | } 80 | 81 | // Normalize full forms to short form (database format) 82 | if (type.startsWith('n8n-nodes-base.')) { 83 | return type.replace(/^n8n-nodes-base\./, 'nodes-base.'); 84 | } 85 | if (type.startsWith('@n8n/n8n-nodes-langchain.')) { 86 | return type.replace(/^@n8n\/n8n-nodes-langchain\./, 'nodes-langchain.'); 87 | } 88 | // Handle n8n-nodes-langchain without @n8n/ prefix 89 | if (type.startsWith('n8n-nodes-langchain.')) { 90 | return type.replace(/^n8n-nodes-langchain\./, 'nodes-langchain.'); 91 | } 92 | 93 | // Already in short form or community node - return unchanged 94 | return type; 95 | } 96 | 97 | /** 98 | * Normalize with detailed result including metadata 99 | * 100 | * Use this when you need to know if normalization occurred 101 | * or what package the node belongs to. 102 | * 103 | * @param type - Node type in any format 104 | * @returns Detailed normalization result 105 | * 106 | * @example 107 | * normalizeWithDetails('nodes-base.webhook') 108 | * // → { 109 | * // original: 'nodes-base.webhook', 110 | * // normalized: 'n8n-nodes-base.webhook', 111 | * // wasNormalized: true, 112 | * // package: 'base' 113 | * // } 114 | */ 115 | static normalizeWithDetails(type: string): NodeTypeNormalizationResult { 116 | const original = type; 117 | const normalized = this.normalizeToFullForm(type); 118 | 119 | return { 120 | original, 121 | normalized, 122 | wasNormalized: original !== normalized, 123 | package: this.detectPackage(normalized) 124 | }; 125 | } 126 | 127 | /** 128 | * Detect package type from node type 129 | * 130 | * @param type - Node type (in any form) 131 | * @returns Package identifier 132 | */ 133 | private static detectPackage(type: string): 'base' | 'langchain' | 'community' | 'unknown' { 134 | // Check both short and full forms 135 | if (type.startsWith('nodes-base.') || type.startsWith('n8n-nodes-base.')) return 'base'; 136 | if (type.startsWith('nodes-langchain.') || type.startsWith('@n8n/n8n-nodes-langchain.') || type.startsWith('n8n-nodes-langchain.')) return 'langchain'; 137 | if (type.includes('.')) return 'community'; 138 | return 'unknown'; 139 | } 140 | 141 | /** 142 | * Batch normalize multiple node types 143 | * 144 | * Use this when you need to normalize multiple types at once. 145 | * 146 | * @param types - Array of node types 147 | * @returns Map of original → normalized types 148 | * 149 | * @example 150 | * normalizeBatch(['nodes-base.webhook', 'nodes-base.set']) 151 | * // → Map { 152 | * // 'nodes-base.webhook' => 'n8n-nodes-base.webhook', 153 | * // 'nodes-base.set' => 'n8n-nodes-base.set' 154 | * // } 155 | */ 156 | static normalizeBatch(types: string[]): Map<string, string> { 157 | const result = new Map<string, string>(); 158 | for (const type of types) { 159 | result.set(type, this.normalizeToFullForm(type)); 160 | } 161 | return result; 162 | } 163 | 164 | /** 165 | * Normalize all node types in a workflow 166 | * 167 | * This is the key method for fixing workflows before validation. 168 | * It normalizes all node types in place while preserving all other 169 | * workflow properties. 170 | * 171 | * @param workflow - Workflow object with nodes array 172 | * @returns Workflow with normalized node types 173 | * 174 | * @example 175 | * const workflow = { 176 | * nodes: [ 177 | * { type: 'nodes-base.webhook', id: '1', name: 'Webhook' }, 178 | * { type: 'nodes-base.set', id: '2', name: 'Set' } 179 | * ], 180 | * connections: {} 181 | * }; 182 | * const normalized = normalizeWorkflowNodeTypes(workflow); 183 | * // workflow.nodes[0].type → 'n8n-nodes-base.webhook' 184 | * // workflow.nodes[1].type → 'n8n-nodes-base.set' 185 | */ 186 | static normalizeWorkflowNodeTypes(workflow: any): any { 187 | if (!workflow?.nodes || !Array.isArray(workflow.nodes)) { 188 | return workflow; 189 | } 190 | 191 | return { 192 | ...workflow, 193 | nodes: workflow.nodes.map((node: any) => ({ 194 | ...node, 195 | type: this.normalizeToFullForm(node.type) 196 | })) 197 | }; 198 | } 199 | 200 | /** 201 | * Check if a node type is in full form (needs normalization) 202 | * 203 | * @param type - Node type to check 204 | * @returns True if in full form (will be normalized to short) 205 | */ 206 | static isFullForm(type: string): boolean { 207 | if (!type || typeof type !== 'string') { 208 | return false; 209 | } 210 | 211 | return ( 212 | type.startsWith('n8n-nodes-base.') || 213 | type.startsWith('@n8n/n8n-nodes-langchain.') || 214 | type.startsWith('n8n-nodes-langchain.') 215 | ); 216 | } 217 | 218 | /** 219 | * Check if a node type is in short form (database format) 220 | * 221 | * @param type - Node type to check 222 | * @returns True if in short form (already in database format) 223 | */ 224 | static isShortForm(type: string): boolean { 225 | if (!type || typeof type !== 'string') { 226 | return false; 227 | } 228 | 229 | return ( 230 | type.startsWith('nodes-base.') || 231 | type.startsWith('nodes-langchain.') 232 | ); 233 | } 234 | } 235 | ``` -------------------------------------------------------------------------------- /src/types/instance-context.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Instance Context for flexible configuration support 3 | * 4 | * Allows the n8n-mcp engine to accept instance-specific configuration 5 | * at runtime, enabling flexible deployment scenarios while maintaining 6 | * backward compatibility with environment-based configuration. 7 | */ 8 | 9 | export interface InstanceContext { 10 | /** 11 | * Instance-specific n8n API configuration 12 | * When provided, these override environment variables 13 | */ 14 | n8nApiUrl?: string; 15 | n8nApiKey?: string; 16 | n8nApiTimeout?: number; 17 | n8nApiMaxRetries?: number; 18 | 19 | /** 20 | * Instance identification 21 | * Used for session management and logging 22 | */ 23 | instanceId?: string; 24 | sessionId?: string; 25 | 26 | /** 27 | * Extensible metadata for future use 28 | * Allows passing additional configuration without interface changes 29 | */ 30 | metadata?: Record<string, any>; 31 | } 32 | 33 | /** 34 | * Validate URL format with enhanced checks 35 | */ 36 | function isValidUrl(url: string): boolean { 37 | try { 38 | const parsed = new URL(url); 39 | 40 | // Allow only http and https protocols 41 | if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') { 42 | return false; 43 | } 44 | 45 | // Check for reasonable hostname (not empty or invalid) 46 | if (!parsed.hostname || parsed.hostname.length === 0) { 47 | return false; 48 | } 49 | 50 | // Validate port if present 51 | if (parsed.port && (isNaN(Number(parsed.port)) || Number(parsed.port) < 1 || Number(parsed.port) > 65535)) { 52 | return false; 53 | } 54 | 55 | // Allow localhost, IP addresses, and domain names 56 | const hostname = parsed.hostname.toLowerCase(); 57 | 58 | // Allow localhost for development 59 | if (hostname === 'localhost' || hostname === '127.0.0.1' || hostname === '::1') { 60 | return true; 61 | } 62 | 63 | // Basic IPv4 address validation 64 | const ipv4Pattern = /^(\d{1,3}\.){3}\d{1,3}$/; 65 | if (ipv4Pattern.test(hostname)) { 66 | const parts = hostname.split('.'); 67 | return parts.every(part => { 68 | const num = parseInt(part, 10); 69 | return num >= 0 && num <= 255; 70 | }); 71 | } 72 | 73 | // Basic IPv6 pattern check (simplified) 74 | if (hostname.includes(':') || hostname.startsWith('[') && hostname.endsWith(']')) { 75 | // Basic IPv6 validation - just checking it's not obviously wrong 76 | return true; 77 | } 78 | 79 | // Domain name validation - allow subdomains and TLDs 80 | const domainPattern = /^([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\.)*[a-zA-Z]([a-zA-Z0-9-]*[a-zA-Z0-9])?$/; 81 | return domainPattern.test(hostname); 82 | } catch { 83 | return false; 84 | } 85 | } 86 | 87 | /** 88 | * Validate API key format (basic check for non-empty string) 89 | */ 90 | function isValidApiKey(key: string): boolean { 91 | // API key should be non-empty and not contain obvious placeholder values 92 | return key.length > 0 && 93 | !key.toLowerCase().includes('your_api_key') && 94 | !key.toLowerCase().includes('placeholder') && 95 | !key.toLowerCase().includes('example'); 96 | } 97 | 98 | /** 99 | * Type guard to check if an object is an InstanceContext 100 | */ 101 | export function isInstanceContext(obj: any): obj is InstanceContext { 102 | if (!obj || typeof obj !== 'object') return false; 103 | 104 | // Check for known properties with validation 105 | const hasValidUrl = obj.n8nApiUrl === undefined || 106 | (typeof obj.n8nApiUrl === 'string' && isValidUrl(obj.n8nApiUrl)); 107 | 108 | const hasValidKey = obj.n8nApiKey === undefined || 109 | (typeof obj.n8nApiKey === 'string' && isValidApiKey(obj.n8nApiKey)); 110 | 111 | const hasValidTimeout = obj.n8nApiTimeout === undefined || 112 | (typeof obj.n8nApiTimeout === 'number' && obj.n8nApiTimeout > 0); 113 | 114 | const hasValidRetries = obj.n8nApiMaxRetries === undefined || 115 | (typeof obj.n8nApiMaxRetries === 'number' && obj.n8nApiMaxRetries >= 0); 116 | 117 | const hasValidInstanceId = obj.instanceId === undefined || typeof obj.instanceId === 'string'; 118 | const hasValidSessionId = obj.sessionId === undefined || typeof obj.sessionId === 'string'; 119 | const hasValidMetadata = obj.metadata === undefined || 120 | (typeof obj.metadata === 'object' && obj.metadata !== null); 121 | 122 | return hasValidUrl && hasValidKey && hasValidTimeout && hasValidRetries && 123 | hasValidInstanceId && hasValidSessionId && hasValidMetadata; 124 | } 125 | 126 | /** 127 | * Validate and sanitize InstanceContext 128 | * Provides field-specific error messages for better debugging 129 | */ 130 | export function validateInstanceContext(context: InstanceContext): { 131 | valid: boolean; 132 | errors?: string[] 133 | } { 134 | const errors: string[] = []; 135 | 136 | // Validate URL if provided (even empty string should be validated) 137 | if (context.n8nApiUrl !== undefined) { 138 | if (context.n8nApiUrl === '') { 139 | errors.push(`Invalid n8nApiUrl: empty string - URL is required when field is provided`); 140 | } else if (!isValidUrl(context.n8nApiUrl)) { 141 | // Provide specific reason for URL invalidity 142 | try { 143 | const parsed = new URL(context.n8nApiUrl); 144 | if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') { 145 | errors.push(`Invalid n8nApiUrl: URL must use HTTP or HTTPS protocol, got ${parsed.protocol}`); 146 | } 147 | } catch { 148 | errors.push(`Invalid n8nApiUrl: URL format is malformed or incomplete`); 149 | } 150 | } 151 | } 152 | 153 | // Validate API key if provided 154 | if (context.n8nApiKey !== undefined) { 155 | if (context.n8nApiKey === '') { 156 | errors.push(`Invalid n8nApiKey: empty string - API key is required when field is provided`); 157 | } else if (!isValidApiKey(context.n8nApiKey)) { 158 | // Provide specific reason for API key invalidity 159 | if (context.n8nApiKey.toLowerCase().includes('your_api_key')) { 160 | errors.push(`Invalid n8nApiKey: contains placeholder 'your_api_key' - Please provide actual API key`); 161 | } else if (context.n8nApiKey.toLowerCase().includes('placeholder')) { 162 | errors.push(`Invalid n8nApiKey: contains placeholder text - Please provide actual API key`); 163 | } else if (context.n8nApiKey.toLowerCase().includes('example')) { 164 | errors.push(`Invalid n8nApiKey: contains example text - Please provide actual API key`); 165 | } else { 166 | errors.push(`Invalid n8nApiKey: format validation failed - Ensure key is valid`); 167 | } 168 | } 169 | } 170 | 171 | // Validate timeout 172 | if (context.n8nApiTimeout !== undefined) { 173 | if (typeof context.n8nApiTimeout !== 'number') { 174 | errors.push(`Invalid n8nApiTimeout: ${context.n8nApiTimeout} - Must be a number, got ${typeof context.n8nApiTimeout}`); 175 | } else if (context.n8nApiTimeout <= 0) { 176 | errors.push(`Invalid n8nApiTimeout: ${context.n8nApiTimeout} - Must be positive (greater than 0)`); 177 | } else if (!isFinite(context.n8nApiTimeout)) { 178 | errors.push(`Invalid n8nApiTimeout: ${context.n8nApiTimeout} - Must be a finite number (not Infinity or NaN)`); 179 | } 180 | } 181 | 182 | // Validate retries 183 | if (context.n8nApiMaxRetries !== undefined) { 184 | if (typeof context.n8nApiMaxRetries !== 'number') { 185 | errors.push(`Invalid n8nApiMaxRetries: ${context.n8nApiMaxRetries} - Must be a number, got ${typeof context.n8nApiMaxRetries}`); 186 | } else if (context.n8nApiMaxRetries < 0) { 187 | errors.push(`Invalid n8nApiMaxRetries: ${context.n8nApiMaxRetries} - Must be non-negative (0 or greater)`); 188 | } else if (!isFinite(context.n8nApiMaxRetries)) { 189 | errors.push(`Invalid n8nApiMaxRetries: ${context.n8nApiMaxRetries} - Must be a finite number (not Infinity or NaN)`); 190 | } 191 | } 192 | 193 | return { 194 | valid: errors.length === 0, 195 | errors: errors.length > 0 ? errors : undefined 196 | }; 197 | } ``` -------------------------------------------------------------------------------- /.github/workflows/benchmark.yml: -------------------------------------------------------------------------------- ```yaml 1 | name: Performance Benchmarks 2 | 3 | on: 4 | push: 5 | branches: [main, feat/comprehensive-testing-suite] 6 | paths-ignore: 7 | - '**.md' 8 | - '**.txt' 9 | - 'docs/**' 10 | - 'examples/**' 11 | - '.github/FUNDING.yml' 12 | - '.github/ISSUE_TEMPLATE/**' 13 | - '.github/pull_request_template.md' 14 | - '.gitignore' 15 | - 'LICENSE*' 16 | - 'ATTRIBUTION.md' 17 | - 'SECURITY.md' 18 | - 'CODE_OF_CONDUCT.md' 19 | pull_request: 20 | branches: [main] 21 | paths-ignore: 22 | - '**.md' 23 | - '**.txt' 24 | - 'docs/**' 25 | - 'examples/**' 26 | - '.github/FUNDING.yml' 27 | - '.github/ISSUE_TEMPLATE/**' 28 | - '.github/pull_request_template.md' 29 | - '.gitignore' 30 | - 'LICENSE*' 31 | - 'ATTRIBUTION.md' 32 | - 'SECURITY.md' 33 | - 'CODE_OF_CONDUCT.md' 34 | workflow_dispatch: 35 | 36 | permissions: 37 | # For PR comments 38 | pull-requests: write 39 | # For pushing to gh-pages branch 40 | contents: write 41 | # For deployment to GitHub Pages 42 | pages: write 43 | id-token: write 44 | 45 | jobs: 46 | benchmark: 47 | runs-on: ubuntu-latest 48 | steps: 49 | - uses: actions/checkout@v4 50 | with: 51 | # Fetch all history for proper benchmark comparison 52 | fetch-depth: 0 53 | 54 | - name: Setup Node.js 55 | uses: actions/setup-node@v4 56 | with: 57 | node-version: 20 58 | cache: 'npm' 59 | 60 | - name: Install dependencies 61 | run: npm ci 62 | 63 | - name: Build project 64 | run: npm run build 65 | 66 | - name: Run benchmarks 67 | run: npm run benchmark:ci 68 | 69 | - name: Format benchmark results 70 | run: node scripts/format-benchmark-results.js 71 | 72 | - name: Upload benchmark artifacts 73 | uses: actions/upload-artifact@v4 74 | with: 75 | name: benchmark-results 76 | path: | 77 | benchmark-results.json 78 | benchmark-results-formatted.json 79 | benchmark-summary.json 80 | 81 | # Ensure gh-pages branch exists 82 | - name: Check and create gh-pages branch 83 | run: | 84 | git fetch origin gh-pages:gh-pages 2>/dev/null || { 85 | echo "gh-pages branch doesn't exist. Creating it..." 86 | git checkout --orphan gh-pages 87 | git rm -rf . 88 | echo "# Benchmark Results" > README.md 89 | git add README.md 90 | git config user.name "github-actions[bot]" 91 | git config user.email "github-actions[bot]@users.noreply.github.com" 92 | git commit -m "Initial gh-pages commit" 93 | git push origin gh-pages 94 | git checkout ${{ github.ref_name }} 95 | } 96 | 97 | # Clean up workspace before benchmark action 98 | - name: Clean workspace 99 | run: | 100 | git add -A 101 | git stash || true 102 | 103 | # Store benchmark results and compare 104 | - name: Store benchmark result 105 | uses: benchmark-action/github-action-benchmark@v1 106 | continue-on-error: true 107 | id: benchmark 108 | with: 109 | name: n8n-mcp Benchmarks 110 | tool: 'customSmallerIsBetter' 111 | output-file-path: benchmark-results-formatted.json 112 | github-token: ${{ secrets.GITHUB_TOKEN }} 113 | auto-push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} 114 | # Where to store benchmark data 115 | benchmark-data-dir-path: 'benchmarks' 116 | # Alert when performance regresses by 10% 117 | alert-threshold: '110%' 118 | # Comment on PR when regression is detected 119 | comment-on-alert: true 120 | alert-comment-cc-users: '@czlonkowski' 121 | # Summary always 122 | summary-always: true 123 | # Max number of data points to retain 124 | max-items-in-chart: 50 125 | fail-on-alert: false 126 | 127 | # Comment on PR with benchmark results 128 | - name: Comment PR with results 129 | uses: actions/github-script@v7 130 | if: github.event_name == 'pull_request' 131 | continue-on-error: true 132 | with: 133 | github-token: ${{ secrets.GITHUB_TOKEN }} 134 | script: | 135 | try { 136 | const fs = require('fs'); 137 | const summary = JSON.parse(fs.readFileSync('benchmark-summary.json', 'utf8')); 138 | 139 | // Format results for PR comment 140 | let comment = '## 📊 Performance Benchmark Results\n\n'; 141 | comment += `🕐 Run at: ${new Date(summary.timestamp).toLocaleString()}\n\n`; 142 | comment += '| Benchmark | Time | Ops/sec | Range |\n'; 143 | comment += '|-----------|------|---------|-------|\n'; 144 | 145 | // Group benchmarks by category 146 | const categories = {}; 147 | for (const benchmark of summary.benchmarks) { 148 | const [category, ...nameParts] = benchmark.name.split(' - '); 149 | if (!categories[category]) categories[category] = []; 150 | categories[category].push({ 151 | ...benchmark, 152 | shortName: nameParts.join(' - ') 153 | }); 154 | } 155 | 156 | // Display by category 157 | for (const [category, benchmarks] of Object.entries(categories)) { 158 | comment += `\n### ${category}\n`; 159 | for (const benchmark of benchmarks) { 160 | comment += `| ${benchmark.shortName} | ${benchmark.time} | ${benchmark.opsPerSec} | ${benchmark.range} |\n`; 161 | } 162 | } 163 | 164 | // Add comparison link 165 | comment += '\n\n📈 [View historical benchmark trends](https://czlonkowski.github.io/n8n-mcp/benchmarks/)\n'; 166 | comment += '\n⚡ Performance regressions >10% will be flagged automatically.\n'; 167 | 168 | await github.rest.issues.createComment({ 169 | issue_number: context.issue.number, 170 | owner: context.repo.owner, 171 | repo: context.repo.repo, 172 | body: comment 173 | }); 174 | } catch (error) { 175 | console.error('Failed to create PR comment:', error.message); 176 | console.log('This is likely due to insufficient permissions for external PRs.'); 177 | console.log('Benchmark results have been saved to artifacts instead.'); 178 | } 179 | 180 | # Deploy benchmark results to GitHub Pages 181 | deploy: 182 | needs: benchmark 183 | if: github.ref == 'refs/heads/main' 184 | runs-on: ubuntu-latest 185 | environment: 186 | name: github-pages 187 | url: ${{ steps.deployment.outputs.page_url }} 188 | steps: 189 | - name: Checkout 190 | uses: actions/checkout@v4 191 | with: 192 | ref: gh-pages 193 | continue-on-error: true 194 | 195 | # If gh-pages checkout failed, create a minimal structure 196 | - name: Ensure gh-pages content exists 197 | run: | 198 | if [ ! -f "index.html" ]; then 199 | echo "Creating minimal gh-pages structure..." 200 | mkdir -p benchmarks 201 | echo '<!DOCTYPE html><html><head><title>n8n-mcp Benchmarks</title></head><body><h1>n8n-mcp Benchmarks</h1><p>Benchmark data will appear here after the first run.</p></body></html>' > index.html 202 | fi 203 | 204 | - name: Setup Pages 205 | uses: actions/configure-pages@v4 206 | 207 | - name: Upload Pages artifact 208 | uses: actions/upload-pages-artifact@v3 209 | with: 210 | path: '.' 211 | 212 | - name: Deploy to GitHub Pages 213 | id: deployment 214 | uses: actions/deploy-pages@v4 ``` -------------------------------------------------------------------------------- /tests/helpers/env-helpers.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Test Environment Helper Utilities 3 | * 4 | * Common utilities for working with test environment configuration 5 | */ 6 | 7 | import { getTestConfig, TestConfig } from '../setup/test-env'; 8 | import * as path from 'path'; 9 | import * as fs from 'fs'; 10 | 11 | /** 12 | * Create a test database path with unique suffix 13 | */ 14 | export function createTestDatabasePath(suffix?: string): string { 15 | const config = getTestConfig(); 16 | if (config.database.path === ':memory:') { 17 | return ':memory:'; 18 | } 19 | 20 | const timestamp = Date.now(); 21 | const randomSuffix = Math.random().toString(36).substring(7); 22 | const dbName = suffix 23 | ? `test-${suffix}-${timestamp}-${randomSuffix}.db` 24 | : `test-${timestamp}-${randomSuffix}.db`; 25 | 26 | return path.join(config.paths.data, dbName); 27 | } 28 | 29 | /** 30 | * Clean up test databases 31 | */ 32 | export async function cleanupTestDatabases(pattern?: RegExp): Promise<void> { 33 | const config = getTestConfig(); 34 | const dataPath = path.resolve(config.paths.data); 35 | 36 | if (!fs.existsSync(dataPath)) { 37 | return; 38 | } 39 | 40 | const files = fs.readdirSync(dataPath); 41 | const testDbPattern = pattern || /^test-.*\.db$/; 42 | 43 | for (const file of files) { 44 | if (testDbPattern.test(file)) { 45 | try { 46 | fs.unlinkSync(path.join(dataPath, file)); 47 | } catch (error) { 48 | console.error(`Failed to delete test database: ${file}`, error); 49 | } 50 | } 51 | } 52 | } 53 | 54 | /** 55 | * Override environment variables temporarily 56 | */ 57 | export function withEnvOverrides<T>( 58 | overrides: Partial<NodeJS.ProcessEnv>, 59 | fn: () => T 60 | ): T { 61 | const originalValues: Partial<NodeJS.ProcessEnv> = {}; 62 | 63 | // Save original values and apply overrides 64 | for (const [key, value] of Object.entries(overrides)) { 65 | originalValues[key] = process.env[key]; 66 | if (value === undefined) { 67 | delete process.env[key]; 68 | } else { 69 | process.env[key] = value; 70 | } 71 | } 72 | 73 | try { 74 | return fn(); 75 | } finally { 76 | // Restore original values 77 | for (const [key, value] of Object.entries(originalValues)) { 78 | if (value === undefined) { 79 | delete process.env[key]; 80 | } else { 81 | process.env[key] = value; 82 | } 83 | } 84 | } 85 | } 86 | 87 | /** 88 | * Async version of withEnvOverrides 89 | */ 90 | export async function withEnvOverridesAsync<T>( 91 | overrides: Partial<NodeJS.ProcessEnv>, 92 | fn: () => Promise<T> 93 | ): Promise<T> { 94 | const originalValues: Partial<NodeJS.ProcessEnv> = {}; 95 | 96 | // Save original values and apply overrides 97 | for (const [key, value] of Object.entries(overrides)) { 98 | originalValues[key] = process.env[key]; 99 | if (value === undefined) { 100 | delete process.env[key]; 101 | } else { 102 | process.env[key] = value; 103 | } 104 | } 105 | 106 | try { 107 | return await fn(); 108 | } finally { 109 | // Restore original values 110 | for (const [key, value] of Object.entries(originalValues)) { 111 | if (value === undefined) { 112 | delete process.env[key]; 113 | } else { 114 | process.env[key] = value; 115 | } 116 | } 117 | } 118 | } 119 | 120 | /** 121 | * Create a mock API server URL 122 | */ 123 | export function getMockApiUrl(endpoint?: string): string { 124 | const config = getTestConfig(); 125 | const baseUrl = config.api.url; 126 | return endpoint ? `${baseUrl}${endpoint}` : baseUrl; 127 | } 128 | 129 | /** 130 | * Get test fixture path 131 | */ 132 | export function getFixturePath(fixtureName: string): string { 133 | const config = getTestConfig(); 134 | return path.resolve(config.paths.fixtures, fixtureName); 135 | } 136 | 137 | /** 138 | * Load test fixture data 139 | */ 140 | export function loadFixture<T = any>(fixtureName: string): T { 141 | const fixturePath = getFixturePath(fixtureName); 142 | 143 | if (!fs.existsSync(fixturePath)) { 144 | throw new Error(`Fixture not found: ${fixturePath}`); 145 | } 146 | 147 | const content = fs.readFileSync(fixturePath, 'utf-8'); 148 | 149 | if (fixturePath.endsWith('.json')) { 150 | return JSON.parse(content); 151 | } 152 | 153 | return content as any; 154 | } 155 | 156 | /** 157 | * Save test snapshot 158 | */ 159 | export function saveSnapshot(name: string, data: any): void { 160 | const config = getTestConfig(); 161 | const snapshotDir = path.resolve(config.paths.snapshots); 162 | 163 | if (!fs.existsSync(snapshotDir)) { 164 | fs.mkdirSync(snapshotDir, { recursive: true }); 165 | } 166 | 167 | const snapshotPath = path.join(snapshotDir, `${name}.snap`); 168 | const content = typeof data === 'string' ? data : JSON.stringify(data, null, 2); 169 | 170 | fs.writeFileSync(snapshotPath, content); 171 | } 172 | 173 | /** 174 | * Performance measurement helper 175 | */ 176 | export class PerformanceMeasure { 177 | private startTime: number; 178 | private marks: Map<string, number> = new Map(); 179 | 180 | constructor(private name: string) { 181 | this.startTime = performance.now(); 182 | } 183 | 184 | mark(label: string): void { 185 | this.marks.set(label, performance.now()); 186 | } 187 | 188 | end(): { total: number; marks: Record<string, number> } { 189 | const endTime = performance.now(); 190 | const total = endTime - this.startTime; 191 | 192 | const markTimes: Record<string, number> = {}; 193 | for (const [label, time] of this.marks) { 194 | markTimes[label] = time - this.startTime; 195 | } 196 | 197 | return { total, marks: markTimes }; 198 | } 199 | 200 | assertThreshold(threshold: keyof TestConfig['performance']['thresholds']): void { 201 | const config = getTestConfig(); 202 | const { total } = this.end(); 203 | const maxTime = config.performance.thresholds[threshold]; 204 | 205 | if (total > maxTime) { 206 | throw new Error( 207 | `Performance threshold exceeded for ${this.name}: ` + 208 | `${total.toFixed(2)}ms > ${maxTime}ms` 209 | ); 210 | } 211 | } 212 | } 213 | 214 | /** 215 | * Create a performance measure 216 | */ 217 | export function measurePerformance(name: string): PerformanceMeasure { 218 | return new PerformanceMeasure(name); 219 | } 220 | 221 | /** 222 | * Wait for a condition with timeout 223 | */ 224 | export async function waitForCondition( 225 | condition: () => boolean | Promise<boolean>, 226 | options: { 227 | timeout?: number; 228 | interval?: number; 229 | message?: string; 230 | } = {} 231 | ): Promise<void> { 232 | const { 233 | timeout = 5000, 234 | interval = 100, 235 | message = 'Condition not met' 236 | } = options; 237 | 238 | const startTime = Date.now(); 239 | 240 | while (Date.now() - startTime < timeout) { 241 | const result = await condition(); 242 | if (result) { 243 | return; 244 | } 245 | await new Promise(resolve => setTimeout(resolve, interval)); 246 | } 247 | 248 | throw new Error(`${message} (timeout: ${timeout}ms)`); 249 | } 250 | 251 | /** 252 | * Create a test logger that respects configuration 253 | */ 254 | export function createTestLogger(namespace: string) { 255 | const config = getTestConfig(); 256 | 257 | return { 258 | debug: (...args: any[]) => { 259 | if (config.logging.debug || config.logging.verbose) { 260 | console.debug(`[${namespace}]`, ...args); 261 | } 262 | }, 263 | info: (...args: any[]) => { 264 | if (config.logging.level !== 'error') { 265 | console.info(`[${namespace}]`, ...args); 266 | } 267 | }, 268 | warn: (...args: any[]) => { 269 | if (config.logging.level !== 'error') { 270 | console.warn(`[${namespace}]`, ...args); 271 | } 272 | }, 273 | error: (...args: any[]) => { 274 | console.error(`[${namespace}]`, ...args); 275 | } 276 | }; 277 | } 278 | 279 | /** 280 | * Check if running in CI environment 281 | */ 282 | export function isCI(): boolean { 283 | return process.env.CI === 'true' || 284 | process.env.CONTINUOUS_INTEGRATION === 'true' || 285 | process.env.GITHUB_ACTIONS === 'true' || 286 | process.env.GITLAB_CI === 'true' || 287 | process.env.CIRCLECI === 'true'; 288 | } 289 | 290 | /** 291 | * Get appropriate test timeout based on environment 292 | */ 293 | export function getAdaptiveTimeout(baseTimeout: number): number { 294 | const multiplier = isCI() ? 2 : 1; // Double timeouts in CI 295 | return baseTimeout * multiplier; 296 | } ``` -------------------------------------------------------------------------------- /tests/integration/database/node-fts5-search.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Integration tests for node FTS5 search functionality 3 | * Ensures the production search failures (Issue #296) are prevented 4 | */ 5 | import { describe, it, expect, beforeAll, afterAll } from 'vitest'; 6 | import { createDatabaseAdapter } from '../../../src/database/database-adapter'; 7 | import { NodeRepository } from '../../../src/database/node-repository'; 8 | import * as fs from 'fs'; 9 | import * as path from 'path'; 10 | 11 | describe('Node FTS5 Search Integration Tests', () => { 12 | let db: any; 13 | let repository: NodeRepository; 14 | 15 | beforeAll(async () => { 16 | // Use test database 17 | const testDbPath = './data/nodes.db'; 18 | db = await createDatabaseAdapter(testDbPath); 19 | repository = new NodeRepository(db); 20 | }); 21 | 22 | afterAll(() => { 23 | if (db) { 24 | db.close(); 25 | } 26 | }); 27 | 28 | describe('FTS5 Table Existence', () => { 29 | it('should have nodes_fts table in schema', () => { 30 | const schemaPath = path.join(__dirname, '../../../src/database/schema.sql'); 31 | const schema = fs.readFileSync(schemaPath, 'utf-8'); 32 | 33 | expect(schema).toContain('CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5'); 34 | expect(schema).toContain('CREATE TRIGGER IF NOT EXISTS nodes_fts_insert'); 35 | expect(schema).toContain('CREATE TRIGGER IF NOT EXISTS nodes_fts_update'); 36 | expect(schema).toContain('CREATE TRIGGER IF NOT EXISTS nodes_fts_delete'); 37 | }); 38 | 39 | it('should have nodes_fts table in database', () => { 40 | const result = db.prepare(` 41 | SELECT name FROM sqlite_master 42 | WHERE type='table' AND name='nodes_fts' 43 | `).get(); 44 | 45 | expect(result).toBeDefined(); 46 | expect(result.name).toBe('nodes_fts'); 47 | }); 48 | 49 | it('should have FTS5 triggers in database', () => { 50 | const triggers = db.prepare(` 51 | SELECT name FROM sqlite_master 52 | WHERE type='trigger' AND name LIKE 'nodes_fts_%' 53 | `).all(); 54 | 55 | expect(triggers).toHaveLength(3); 56 | const triggerNames = triggers.map((t: any) => t.name); 57 | expect(triggerNames).toContain('nodes_fts_insert'); 58 | expect(triggerNames).toContain('nodes_fts_update'); 59 | expect(triggerNames).toContain('nodes_fts_delete'); 60 | }); 61 | }); 62 | 63 | describe('FTS5 Index Population', () => { 64 | it('should have nodes_fts count matching nodes count', () => { 65 | const nodesCount = db.prepare('SELECT COUNT(*) as count FROM nodes').get(); 66 | const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get(); 67 | 68 | expect(nodesCount.count).toBeGreaterThan(500); // Should have both packages 69 | expect(ftsCount.count).toBe(nodesCount.count); 70 | }); 71 | 72 | it('should not have empty FTS5 index', () => { 73 | const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get(); 74 | 75 | expect(ftsCount.count).toBeGreaterThan(0); 76 | }); 77 | }); 78 | 79 | describe('Critical Node Searches (Production Failure Cases)', () => { 80 | it('should find webhook node via FTS5', () => { 81 | const results = db.prepare(` 82 | SELECT node_type FROM nodes_fts 83 | WHERE nodes_fts MATCH 'webhook' 84 | `).all(); 85 | 86 | expect(results.length).toBeGreaterThan(0); 87 | const nodeTypes = results.map((r: any) => r.node_type); 88 | expect(nodeTypes).toContain('nodes-base.webhook'); 89 | }); 90 | 91 | it('should find merge node via FTS5', () => { 92 | const results = db.prepare(` 93 | SELECT node_type FROM nodes_fts 94 | WHERE nodes_fts MATCH 'merge' 95 | `).all(); 96 | 97 | expect(results.length).toBeGreaterThan(0); 98 | const nodeTypes = results.map((r: any) => r.node_type); 99 | expect(nodeTypes).toContain('nodes-base.merge'); 100 | }); 101 | 102 | it('should find split batch node via FTS5', () => { 103 | const results = db.prepare(` 104 | SELECT node_type FROM nodes_fts 105 | WHERE nodes_fts MATCH 'split OR batch' 106 | `).all(); 107 | 108 | expect(results.length).toBeGreaterThan(0); 109 | const nodeTypes = results.map((r: any) => r.node_type); 110 | expect(nodeTypes).toContain('nodes-base.splitInBatches'); 111 | }); 112 | 113 | it('should find code node via FTS5', () => { 114 | const results = db.prepare(` 115 | SELECT node_type FROM nodes_fts 116 | WHERE nodes_fts MATCH 'code' 117 | `).all(); 118 | 119 | expect(results.length).toBeGreaterThan(0); 120 | const nodeTypes = results.map((r: any) => r.node_type); 121 | expect(nodeTypes).toContain('nodes-base.code'); 122 | }); 123 | 124 | it('should find http request node via FTS5', () => { 125 | const results = db.prepare(` 126 | SELECT node_type FROM nodes_fts 127 | WHERE nodes_fts MATCH 'http OR request' 128 | `).all(); 129 | 130 | expect(results.length).toBeGreaterThan(0); 131 | const nodeTypes = results.map((r: any) => r.node_type); 132 | expect(nodeTypes).toContain('nodes-base.httpRequest'); 133 | }); 134 | }); 135 | 136 | describe('FTS5 Search Quality', () => { 137 | it('should rank exact matches higher', () => { 138 | const results = db.prepare(` 139 | SELECT node_type, rank FROM nodes_fts 140 | WHERE nodes_fts MATCH 'webhook' 141 | ORDER BY rank 142 | LIMIT 10 143 | `).all(); 144 | 145 | expect(results.length).toBeGreaterThan(0); 146 | // Exact match should be in top results 147 | const topResults = results.slice(0, 3).map((r: any) => r.node_type); 148 | expect(topResults).toContain('nodes-base.webhook'); 149 | }); 150 | 151 | it('should support phrase searches', () => { 152 | const results = db.prepare(` 153 | SELECT node_type FROM nodes_fts 154 | WHERE nodes_fts MATCH '"http request"' 155 | `).all(); 156 | 157 | expect(results.length).toBeGreaterThan(0); 158 | }); 159 | 160 | it('should support boolean operators', () => { 161 | const andResults = db.prepare(` 162 | SELECT node_type FROM nodes_fts 163 | WHERE nodes_fts MATCH 'google AND sheets' 164 | `).all(); 165 | 166 | const orResults = db.prepare(` 167 | SELECT node_type FROM nodes_fts 168 | WHERE nodes_fts MATCH 'google OR sheets' 169 | `).all(); 170 | 171 | expect(andResults.length).toBeGreaterThan(0); 172 | expect(orResults.length).toBeGreaterThanOrEqual(andResults.length); 173 | }); 174 | }); 175 | 176 | describe('FTS5 Index Synchronization', () => { 177 | it('should keep FTS5 in sync after node updates', () => { 178 | // This test ensures triggers work properly 179 | const beforeCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get(); 180 | 181 | // Insert a test node 182 | db.prepare(` 183 | INSERT INTO nodes ( 184 | node_type, package_name, display_name, description, 185 | category, development_style, is_ai_tool, is_trigger, 186 | is_webhook, is_versioned, version, properties_schema, 187 | operations, credentials_required 188 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 189 | `).run( 190 | 'test.node', 191 | 'test-package', 192 | 'Test Node', 193 | 'A test node for FTS5 synchronization', 194 | 'Test', 195 | 'programmatic', 196 | 0, 0, 0, 0, 197 | '1.0', 198 | '[]', '[]', '[]' 199 | ); 200 | 201 | const afterInsert = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get(); 202 | expect(afterInsert.count).toBe(beforeCount.count + 1); 203 | 204 | // Verify the new node is searchable 205 | const searchResults = db.prepare(` 206 | SELECT node_type FROM nodes_fts 207 | WHERE nodes_fts MATCH 'test synchronization' 208 | `).all(); 209 | expect(searchResults.length).toBeGreaterThan(0); 210 | 211 | // Clean up 212 | db.prepare('DELETE FROM nodes WHERE node_type = ?').run('test.node'); 213 | 214 | const afterDelete = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get(); 215 | expect(afterDelete.count).toBe(beforeCount.count); 216 | }); 217 | }); 218 | }); 219 | ``` -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- ```json 1 | { 2 | "name": "n8n-mcp", 3 | "version": "2.19.6", 4 | "description": "Integration between n8n workflow automation and Model Context Protocol (MCP)", 5 | "main": "dist/index.js", 6 | "types": "dist/index.d.ts", 7 | "exports": { 8 | ".": { 9 | "types": "./dist/index.d.ts", 10 | "require": "./dist/index.js", 11 | "import": "./dist/index.js" 12 | } 13 | }, 14 | "bin": { 15 | "n8n-mcp": "./dist/mcp/index.js" 16 | }, 17 | "scripts": { 18 | "build": "tsc -p tsconfig.build.json", 19 | "rebuild": "node dist/scripts/rebuild.js", 20 | "rebuild:optimized": "node dist/scripts/rebuild-optimized.js", 21 | "validate": "node dist/scripts/validate.js", 22 | "test-nodes": "node dist/scripts/test-nodes.js", 23 | "start": "node dist/mcp/index.js", 24 | "start:http": "MCP_MODE=http node dist/mcp/index.js", 25 | "start:http:fixed": "MCP_MODE=http USE_FIXED_HTTP=true node dist/mcp/index.js", 26 | "start:n8n": "N8N_MODE=true MCP_MODE=http node dist/mcp/index.js", 27 | "http": "npm run build && npm run start:http:fixed", 28 | "dev": "npm run build && npm run rebuild && npm run validate", 29 | "dev:http": "MCP_MODE=http nodemon --watch src --ext ts --exec 'npm run build && npm run start:http'", 30 | "test:single-session": "./scripts/test-single-session.sh", 31 | "test:mcp-endpoint": "node scripts/test-mcp-endpoint.js", 32 | "test:mcp-endpoint:curl": "./scripts/test-mcp-endpoint.sh", 33 | "test:mcp-stdio": "npm run build && node scripts/test-mcp-stdio.js", 34 | "test": "vitest", 35 | "test:ui": "vitest --ui", 36 | "test:run": "vitest run", 37 | "test:coverage": "vitest run --coverage", 38 | "test:ci": "vitest run --coverage --coverage.thresholds.lines=0 --coverage.thresholds.functions=0 --coverage.thresholds.branches=0 --coverage.thresholds.statements=0 --reporter=default --reporter=junit", 39 | "test:watch": "vitest watch", 40 | "test:unit": "vitest run tests/unit", 41 | "test:integration": "vitest run --config vitest.config.integration.ts", 42 | "test:integration:n8n": "vitest run tests/integration/n8n-api", 43 | "test:cleanup:orphans": "tsx tests/integration/n8n-api/scripts/cleanup-orphans.ts", 44 | "test:e2e": "vitest run tests/e2e", 45 | "lint": "tsc --noEmit", 46 | "typecheck": "tsc --noEmit", 47 | "update:n8n": "node scripts/update-n8n-deps.js", 48 | "update:n8n:check": "node scripts/update-n8n-deps.js --dry-run", 49 | "fetch:templates": "node dist/scripts/fetch-templates.js", 50 | "fetch:templates:update": "node dist/scripts/fetch-templates.js --update", 51 | "fetch:templates:extract": "node dist/scripts/fetch-templates.js --extract-only", 52 | "fetch:templates:robust": "node dist/scripts/fetch-templates-robust.js", 53 | "prebuild:fts5": "npx tsx scripts/prebuild-fts5.ts", 54 | "test:templates": "node dist/scripts/test-templates.js", 55 | "test:protocol-negotiation": "npx tsx src/scripts/test-protocol-negotiation.ts", 56 | "test:workflow-validation": "node dist/scripts/test-workflow-validation.js", 57 | "test:template-validation": "node dist/scripts/test-template-validation.js", 58 | "test:essentials": "node dist/scripts/test-essentials.js", 59 | "test:enhanced-validation": "node dist/scripts/test-enhanced-validation.js", 60 | "test:ai-workflow-validation": "node dist/scripts/test-ai-workflow-validation.js", 61 | "test:mcp-tools": "node dist/scripts/test-mcp-tools.js", 62 | "test:n8n-manager": "node dist/scripts/test-n8n-manager-integration.js", 63 | "test:n8n-validate-workflow": "node dist/scripts/test-n8n-validate-workflow.js", 64 | "test:typeversion-validation": "node dist/scripts/test-typeversion-validation.js", 65 | "test:error-handling": "node dist/scripts/test-error-handling-validation.js", 66 | "test:workflow-diff": "node dist/scripts/test-workflow-diff.js", 67 | "test:transactional-diff": "node dist/scripts/test-transactional-diff.js", 68 | "test:tools-documentation": "node dist/scripts/test-tools-documentation.js", 69 | "test:url-configuration": "npm run build && ts-node scripts/test-url-configuration.ts", 70 | "test:search-improvements": "node dist/scripts/test-search-improvements.js", 71 | "test:fts5-search": "node dist/scripts/test-fts5-search.js", 72 | "migrate:fts5": "node dist/scripts/migrate-nodes-fts.js", 73 | "test:mcp:update-partial": "node dist/scripts/test-mcp-n8n-update-partial.js", 74 | "test:update-partial:debug": "node dist/scripts/test-update-partial-debug.js", 75 | "test:issue-45-fix": "node dist/scripts/test-issue-45-fix.js", 76 | "test:auth-logging": "tsx scripts/test-auth-logging.ts", 77 | "test:docker": "./scripts/test-docker-config.sh all", 78 | "test:docker:unit": "./scripts/test-docker-config.sh unit", 79 | "test:docker:integration": "./scripts/test-docker-config.sh integration", 80 | "test:docker:security": "./scripts/test-docker-config.sh security", 81 | "sanitize:templates": "node dist/scripts/sanitize-templates.js", 82 | "db:rebuild": "node dist/scripts/rebuild-database.js", 83 | "benchmark": "vitest bench --config vitest.config.benchmark.ts", 84 | "benchmark:watch": "vitest bench --watch --config vitest.config.benchmark.ts", 85 | "benchmark:ui": "vitest bench --ui --config vitest.config.benchmark.ts", 86 | "benchmark:ci": "CI=true node scripts/run-benchmarks-ci.js", 87 | "db:init": "node -e \"new (require('./dist/services/sqlite-storage-service').SQLiteStorageService)(); console.log('Database initialized')\"", 88 | "docs:rebuild": "ts-node src/scripts/rebuild-database.ts", 89 | "sync:runtime-version": "node scripts/sync-runtime-version.js", 90 | "update:readme-version": "node scripts/update-readme-version.js", 91 | "prepare:publish": "./scripts/publish-npm.sh", 92 | "update:all": "./scripts/update-and-publish-prep.sh", 93 | "test:release-automation": "node scripts/test-release-automation.js", 94 | "prepare:release": "node scripts/prepare-release.js" 95 | }, 96 | "repository": { 97 | "type": "git", 98 | "url": "git+https://github.com/czlonkowski/n8n-mcp.git" 99 | }, 100 | "keywords": [ 101 | "n8n", 102 | "mcp", 103 | "model-context-protocol", 104 | "ai", 105 | "workflow", 106 | "automation" 107 | ], 108 | "author": "Romuald Czlonkowski @ www.aiadvisors.pl/en", 109 | "license": "MIT", 110 | "bugs": { 111 | "url": "https://github.com/czlonkowski/n8n-mcp/issues" 112 | }, 113 | "homepage": "https://github.com/czlonkowski/n8n-mcp#readme", 114 | "files": [ 115 | "dist/**/*", 116 | "data/nodes.db", 117 | ".env.example", 118 | "README.md", 119 | "LICENSE", 120 | "package.runtime.json" 121 | ], 122 | "devDependencies": { 123 | "@faker-js/faker": "^9.9.0", 124 | "@testing-library/jest-dom": "^6.6.4", 125 | "@types/better-sqlite3": "^7.6.13", 126 | "@types/express": "^5.0.3", 127 | "@types/node": "^22.15.30", 128 | "@types/ws": "^8.18.1", 129 | "@vitest/coverage-v8": "^3.2.4", 130 | "@vitest/runner": "^3.2.4", 131 | "@vitest/ui": "^3.2.4", 132 | "axios": "^1.11.0", 133 | "axios-mock-adapter": "^2.1.0", 134 | "fishery": "^2.3.1", 135 | "msw": "^2.10.4", 136 | "nodemon": "^3.1.10", 137 | "ts-node": "^10.9.2", 138 | "typescript": "^5.8.3", 139 | "vitest": "^3.2.4" 140 | }, 141 | "dependencies": { 142 | "@modelcontextprotocol/sdk": "^1.13.2", 143 | "@n8n/n8n-nodes-langchain": "^1.114.1", 144 | "@supabase/supabase-js": "^2.57.4", 145 | "dotenv": "^16.5.0", 146 | "express": "^5.1.0", 147 | "express-rate-limit": "^7.1.5", 148 | "lru-cache": "^11.2.1", 149 | "n8n": "^1.115.2", 150 | "n8n-core": "^1.114.0", 151 | "n8n-workflow": "^1.112.0", 152 | "openai": "^4.77.0", 153 | "sql.js": "^1.13.0", 154 | "uuid": "^10.0.0", 155 | "zod": "^3.24.1" 156 | }, 157 | "optionalDependencies": { 158 | "@rollup/rollup-darwin-arm64": "^4.50.0", 159 | "@rollup/rollup-linux-x64-gnu": "^4.50.0", 160 | "better-sqlite3": "^11.10.0" 161 | }, 162 | "overrides": { 163 | "pyodide": "0.26.4" 164 | } 165 | } 166 | ``` -------------------------------------------------------------------------------- /scripts/extract-from-docker.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | const dotenv = require('dotenv'); 3 | const { NodeDocumentationService } = require('../dist/services/node-documentation-service'); 4 | const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor'); 5 | const { logger } = require('../dist/utils/logger'); 6 | const fs = require('fs').promises; 7 | const path = require('path'); 8 | 9 | // Load environment variables 10 | dotenv.config(); 11 | 12 | async function extractNodesFromDocker() { 13 | logger.info('🐳 Starting Docker-based node extraction...'); 14 | 15 | // Add Docker volume paths to environment for NodeSourceExtractor 16 | const dockerVolumePaths = [ 17 | process.env.N8N_MODULES_PATH || '/n8n-modules', 18 | process.env.N8N_CUSTOM_PATH || '/n8n-custom', 19 | ]; 20 | 21 | logger.info(`Docker volume paths: ${dockerVolumePaths.join(', ')}`); 22 | 23 | // Check if volumes are mounted 24 | for (const volumePath of dockerVolumePaths) { 25 | try { 26 | await fs.access(volumePath); 27 | logger.info(`✅ Volume mounted: ${volumePath}`); 28 | 29 | // List what's in the volume 30 | const entries = await fs.readdir(volumePath); 31 | logger.info(`Contents of ${volumePath}: ${entries.slice(0, 10).join(', ')}${entries.length > 10 ? '...' : ''}`); 32 | } catch (error) { 33 | logger.warn(`❌ Volume not accessible: ${volumePath}`); 34 | } 35 | } 36 | 37 | // Initialize services 38 | const docService = new NodeDocumentationService(); 39 | const extractor = new NodeSourceExtractor(); 40 | 41 | // Extend the extractor's search paths with Docker volumes 42 | extractor.n8nBasePaths.unshift(...dockerVolumePaths); 43 | 44 | // Clear existing nodes to ensure we only have latest versions 45 | logger.info('🧹 Clearing existing nodes...'); 46 | const db = docService.db; 47 | db.prepare('DELETE FROM nodes').run(); 48 | 49 | logger.info('🔍 Searching for n8n nodes in Docker volumes...'); 50 | 51 | // Known n8n packages to extract 52 | const n8nPackages = [ 53 | 'n8n-nodes-base', 54 | '@n8n/n8n-nodes-langchain', 55 | 'n8n-nodes-extras', 56 | ]; 57 | 58 | let totalExtracted = 0; 59 | let ifNodeVersion = null; 60 | 61 | for (const packageName of n8nPackages) { 62 | logger.info(`\n📦 Processing package: ${packageName}`); 63 | 64 | try { 65 | // Find package in Docker volumes 66 | let packagePath = null; 67 | 68 | for (const volumePath of dockerVolumePaths) { 69 | const possiblePaths = [ 70 | path.join(volumePath, packageName), 71 | path.join(volumePath, '.pnpm', `${packageName}@*`, 'node_modules', packageName), 72 | ]; 73 | 74 | for (const testPath of possiblePaths) { 75 | try { 76 | // Use glob pattern to find pnpm packages 77 | if (testPath.includes('*')) { 78 | const baseDir = path.dirname(testPath.split('*')[0]); 79 | const entries = await fs.readdir(baseDir); 80 | 81 | for (const entry of entries) { 82 | if (entry.includes(packageName.replace('/', '+'))) { 83 | const fullPath = path.join(baseDir, entry, 'node_modules', packageName); 84 | try { 85 | await fs.access(fullPath); 86 | packagePath = fullPath; 87 | break; 88 | } catch {} 89 | } 90 | } 91 | } else { 92 | await fs.access(testPath); 93 | packagePath = testPath; 94 | break; 95 | } 96 | } catch {} 97 | } 98 | 99 | if (packagePath) break; 100 | } 101 | 102 | if (!packagePath) { 103 | logger.warn(`Package ${packageName} not found in Docker volumes`); 104 | continue; 105 | } 106 | 107 | logger.info(`Found package at: ${packagePath}`); 108 | 109 | // Check package version 110 | try { 111 | const packageJsonPath = path.join(packagePath, 'package.json'); 112 | const packageJson = JSON.parse(await fs.readFile(packageJsonPath, 'utf-8')); 113 | logger.info(`Package version: ${packageJson.version}`); 114 | } catch {} 115 | 116 | // Find nodes directory 117 | const nodesPath = path.join(packagePath, 'dist', 'nodes'); 118 | 119 | try { 120 | await fs.access(nodesPath); 121 | logger.info(`Scanning nodes directory: ${nodesPath}`); 122 | 123 | // Extract all nodes from this package 124 | const nodeEntries = await scanForNodes(nodesPath); 125 | logger.info(`Found ${nodeEntries.length} nodes in ${packageName}`); 126 | 127 | for (const nodeEntry of nodeEntries) { 128 | try { 129 | const nodeName = nodeEntry.name.replace('.node.js', ''); 130 | const nodeType = `${packageName}.${nodeName}`; 131 | 132 | logger.info(`Extracting: ${nodeType}`); 133 | 134 | // Extract source info 135 | const sourceInfo = await extractor.extractNodeSource(nodeType); 136 | 137 | // Check if this is the If node 138 | if (nodeName === 'If') { 139 | // Look for version in the source code 140 | const versionMatch = sourceInfo.sourceCode.match(/version:\s*(\d+)/); 141 | if (versionMatch) { 142 | ifNodeVersion = versionMatch[1]; 143 | logger.info(`📍 Found If node version: ${ifNodeVersion}`); 144 | } 145 | } 146 | 147 | // Store in database 148 | await docService.storeNode({ 149 | nodeType: nodeType, 150 | name: nodeName, 151 | displayName: nodeName, 152 | description: `${nodeName} node from ${packageName}`, 153 | sourceCode: sourceInfo.sourceCode, 154 | credentialCode: sourceInfo.credentialCode, 155 | packageName: packageName, 156 | version: ifNodeVersion || '1', 157 | hasCredentials: !!sourceInfo.credentialCode, 158 | isTrigger: sourceInfo.sourceCode.includes('trigger: true') || nodeName.toLowerCase().includes('trigger'), 159 | isWebhook: sourceInfo.sourceCode.includes('webhook: true') || nodeName.toLowerCase().includes('webhook'), 160 | }); 161 | 162 | totalExtracted++; 163 | } catch (error) { 164 | logger.error(`Failed to extract ${nodeEntry.name}: ${error}`); 165 | } 166 | } 167 | } catch (error) { 168 | logger.error(`Failed to scan nodes directory: ${error}`); 169 | } 170 | } catch (error) { 171 | logger.error(`Failed to process package ${packageName}: ${error}`); 172 | } 173 | } 174 | 175 | logger.info(`\n✅ Extraction complete!`); 176 | logger.info(`📊 Total nodes extracted: ${totalExtracted}`); 177 | 178 | if (ifNodeVersion) { 179 | logger.info(`📍 If node version: ${ifNodeVersion}`); 180 | if (ifNodeVersion === '2' || ifNodeVersion === '2.2') { 181 | logger.info('✅ Successfully extracted latest If node (v2+)!'); 182 | } else { 183 | logger.warn(`⚠️ If node version is ${ifNodeVersion}, expected v2 or higher`); 184 | } 185 | } 186 | 187 | // Close database 188 | docService.close(); 189 | } 190 | 191 | async function scanForNodes(dirPath) { 192 | const nodes = []; 193 | 194 | async function scan(currentPath) { 195 | try { 196 | const entries = await fs.readdir(currentPath, { withFileTypes: true }); 197 | 198 | for (const entry of entries) { 199 | const fullPath = path.join(currentPath, entry.name); 200 | 201 | if (entry.isFile() && entry.name.endsWith('.node.js')) { 202 | nodes.push({ name: entry.name, path: fullPath }); 203 | } else if (entry.isDirectory() && entry.name !== 'node_modules') { 204 | await scan(fullPath); 205 | } 206 | } 207 | } catch (error) { 208 | logger.debug(`Failed to scan directory ${currentPath}: ${error}`); 209 | } 210 | } 211 | 212 | await scan(dirPath); 213 | return nodes; 214 | } 215 | 216 | // Run extraction 217 | extractNodesFromDocker().catch(error => { 218 | logger.error('Extraction failed:', error); 219 | process.exit(1); 220 | }); ``` -------------------------------------------------------------------------------- /docs/MCP_ESSENTIALS_README.md: -------------------------------------------------------------------------------- ```markdown 1 | # n8n MCP Essentials Tools - User Guide 2 | 3 | ## Overview 4 | 5 | The n8n MCP has been enhanced with new tools that dramatically improve the AI agent experience when building n8n workflows. The key improvement is the `get_node_essentials` tool which reduces response sizes by 95% while providing all the information needed for basic configuration. 6 | 7 | ## New Tools 8 | 9 | ### 1. `get_node_essentials` 10 | 11 | **Purpose**: Get only the 10-20 most important properties for a node instead of 200+ 12 | 13 | **When to use**: 14 | - Starting to configure a new node 15 | - Need quick access to common properties 16 | - Want working examples 17 | - Building basic workflows 18 | 19 | **Example usage**: 20 | ```json 21 | { 22 | "name": "get_node_essentials", 23 | "arguments": { 24 | "nodeType": "nodes-base.httpRequest" 25 | } 26 | } 27 | ``` 28 | 29 | **Response structure**: 30 | ```json 31 | { 32 | "nodeType": "nodes-base.httpRequest", 33 | "displayName": "HTTP Request", 34 | "description": "Makes HTTP requests and returns the response data", 35 | "requiredProperties": [ 36 | { 37 | "name": "url", 38 | "displayName": "URL", 39 | "type": "string", 40 | "description": "The URL to make the request to", 41 | "placeholder": "https://api.example.com/endpoint" 42 | } 43 | ], 44 | "commonProperties": [ 45 | { 46 | "name": "method", 47 | "type": "options", 48 | "options": [ 49 | { "value": "GET", "label": "GET" }, 50 | { "value": "POST", "label": "POST" } 51 | ], 52 | "default": "GET" 53 | } 54 | // ... 4-5 more common properties 55 | ], 56 | "examples": { 57 | "minimal": { 58 | "url": "https://api.example.com/data" 59 | }, 60 | "common": { 61 | "method": "POST", 62 | "url": "https://api.example.com/users", 63 | "sendBody": true, 64 | "contentType": "json", 65 | "jsonBody": "{ \"name\": \"John\" }" 66 | } 67 | }, 68 | "metadata": { 69 | "totalProperties": 245, 70 | "isAITool": false, 71 | "isTrigger": false 72 | } 73 | } 74 | ``` 75 | 76 | **Benefits**: 77 | - 95% smaller response (5KB vs 100KB+) 78 | - Only shows properties you actually need 79 | - Includes working examples 80 | - No duplicate or confusing properties 81 | - Clear indication of what's required 82 | 83 | ### 2. `search_node_properties` 84 | 85 | **Purpose**: Find specific properties within a node without downloading everything 86 | 87 | **When to use**: 88 | - Looking for authentication options 89 | - Finding specific configuration like headers or body 90 | - Exploring what options are available 91 | - Need to configure advanced features 92 | 93 | **Example usage**: 94 | ```json 95 | { 96 | "name": "search_node_properties", 97 | "arguments": { 98 | "nodeType": "nodes-base.httpRequest", 99 | "query": "auth" 100 | } 101 | } 102 | ``` 103 | 104 | **Response structure**: 105 | ```json 106 | { 107 | "nodeType": "nodes-base.httpRequest", 108 | "query": "auth", 109 | "matches": [ 110 | { 111 | "name": "authentication", 112 | "displayName": "Authentication", 113 | "type": "options", 114 | "description": "Method of authentication to use", 115 | "path": "authentication", 116 | "options": [ 117 | { "value": "none", "label": "None" }, 118 | { "value": "basicAuth", "label": "Basic Auth" } 119 | ] 120 | }, 121 | { 122 | "name": "genericAuthType", 123 | "path": "genericAuthType", 124 | "showWhen": { "authentication": "genericCredentialType" } 125 | } 126 | ], 127 | "totalMatches": 5, 128 | "searchedIn": "245 properties" 129 | } 130 | ``` 131 | 132 | ## Recommended Workflow 133 | 134 | ### For Basic Configuration: 135 | 136 | 1. **Start with essentials**: 137 | ``` 138 | get_node_essentials("nodes-base.httpRequest") 139 | ``` 140 | 141 | 2. **Use the provided examples**: 142 | - Start with `minimal` example 143 | - Upgrade to `common` for typical use cases 144 | - Modify based on your needs 145 | 146 | 3. **Search for specific features** (if needed): 147 | ``` 148 | search_node_properties("nodes-base.httpRequest", "header") 149 | ``` 150 | 151 | ### For Complex Configuration: 152 | 153 | 1. **Get documentation first**: 154 | ``` 155 | get_node_documentation("nodes-base.httpRequest") 156 | ``` 157 | 158 | 2. **Get essentials for the basics**: 159 | ``` 160 | get_node_essentials("nodes-base.httpRequest") 161 | ``` 162 | 163 | 3. **Search for advanced properties**: 164 | ``` 165 | search_node_properties("nodes-base.httpRequest", "proxy") 166 | ``` 167 | 168 | 4. **Only use get_node_info if absolutely necessary**: 169 | ``` 170 | get_node_info("nodes-base.httpRequest") // Last resort - 100KB+ response 171 | ``` 172 | 173 | ## Common Patterns 174 | 175 | ### Making API Calls: 176 | ```javascript 177 | // Start with essentials 178 | const essentials = get_node_essentials("nodes-base.httpRequest"); 179 | 180 | // Use the POST example 181 | const config = essentials.examples.common; 182 | 183 | // Modify for your needs 184 | config.url = "https://api.myservice.com/endpoint"; 185 | config.jsonBody = JSON.stringify({ my: "data" }); 186 | ``` 187 | 188 | ### Setting up Webhooks: 189 | ```javascript 190 | // Get webhook essentials 191 | const essentials = get_node_essentials("nodes-base.webhook"); 192 | 193 | // Start with minimal 194 | const config = essentials.examples.minimal; 195 | config.path = "my-webhook-endpoint"; 196 | ``` 197 | 198 | ### Database Operations: 199 | ```javascript 200 | // Get database essentials 201 | const essentials = get_node_essentials("nodes-base.postgres"); 202 | 203 | // Check available operations 204 | const operations = essentials.operations; 205 | 206 | // Use appropriate example 207 | const config = essentials.examples.common; 208 | ``` 209 | 210 | ## Tips for AI Agents 211 | 212 | 1. **Always start with get_node_essentials** - It has everything needed for 90% of use cases 213 | 214 | 2. **Use examples as templates** - They're tested, working configurations 215 | 216 | 3. **Search before diving deep** - Use search_node_properties to find specific options 217 | 218 | 4. **Check metadata** - Know if you need credentials, if it's a trigger, etc. 219 | 220 | 5. **Progressive disclosure** - Start simple, add complexity only when needed 221 | 222 | ## Supported Nodes 223 | 224 | The essentials tool has optimized configurations for 20+ commonly used nodes: 225 | 226 | - **Core**: httpRequest, webhook, code, set, if, merge, splitInBatches 227 | - **Databases**: postgres, mysql, mongodb, redis 228 | - **Communication**: slack, email, discord 229 | - **Files**: ftp, ssh, googleSheets 230 | - **AI**: openAi, agent 231 | - **Utilities**: executeCommand, function 232 | 233 | For other nodes, the tool automatically extracts the most important properties. 234 | 235 | ## Performance Metrics 236 | 237 | Based on testing with top 10 nodes: 238 | 239 | - **Average size reduction**: 94.3% 240 | - **Response time improvement**: 78% 241 | - **Properties shown**: 10-20 (vs 200+) 242 | - **Usability improvement**: Dramatic 243 | 244 | ## Migration Guide 245 | 246 | If you're currently using `get_node_info`, here's how to migrate: 247 | 248 | ### Before: 249 | ```javascript 250 | const node = get_node_info("nodes-base.httpRequest"); 251 | // Parse through 200+ properties 252 | // Figure out what's required 253 | // Deal with duplicates and conditionals 254 | ``` 255 | 256 | ### After: 257 | ```javascript 258 | const essentials = get_node_essentials("nodes-base.httpRequest"); 259 | // Use essentials.requiredProperties 260 | // Use essentials.commonProperties 261 | // Start with essentials.examples.common 262 | ``` 263 | 264 | ## Troubleshooting 265 | 266 | **Q: The tool says node not found** 267 | A: Use the full node type with prefix: `nodes-base.httpRequest` not just `httpRequest` 268 | 269 | **Q: I need a property that's not in essentials** 270 | A: Use `search_node_properties` to find it, or `get_node_info` as last resort 271 | 272 | **Q: The examples don't cover my use case** 273 | A: Start with the closest example and modify. Use search to find additional properties. 274 | 275 | **Q: How do I know what properties are available?** 276 | A: Check `metadata.totalProperties` to see how many are available, then search for what you need 277 | 278 | ## Future Improvements 279 | 280 | Planned enhancements: 281 | - Task-based configurations (e.g., "post_json_with_auth") 282 | - Configuration validation 283 | - Property dependency resolution 284 | - More node coverage 285 | 286 | ## Summary 287 | 288 | The new essentials tools make n8n workflow building with AI agents actually practical. Instead of overwhelming agents with hundreds of properties, we provide just what's needed, when it's needed. This results in faster, more accurate workflow creation with fewer errors. ``` -------------------------------------------------------------------------------- /src/scripts/extract-from-docker.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env node 2 | import * as dotenv from 'dotenv'; 3 | import { NodeDocumentationService } from '../services/node-documentation-service'; 4 | import { NodeSourceExtractor } from '../utils/node-source-extractor'; 5 | import { logger } from '../utils/logger'; 6 | import * as fs from 'fs/promises'; 7 | import * as path from 'path'; 8 | 9 | // Load environment variables 10 | dotenv.config(); 11 | 12 | async function extractNodesFromDocker() { 13 | logger.info('🐳 Starting Docker-based node extraction...'); 14 | 15 | // Add Docker volume paths to environment for NodeSourceExtractor 16 | const dockerVolumePaths = [ 17 | process.env.N8N_MODULES_PATH || '/n8n-modules', 18 | process.env.N8N_CUSTOM_PATH || '/n8n-custom', 19 | ]; 20 | 21 | logger.info(`Docker volume paths: ${dockerVolumePaths.join(', ')}`); 22 | 23 | // Check if volumes are mounted 24 | for (const volumePath of dockerVolumePaths) { 25 | try { 26 | await fs.access(volumePath); 27 | logger.info(`✅ Volume mounted: ${volumePath}`); 28 | 29 | // List what's in the volume 30 | const entries = await fs.readdir(volumePath); 31 | logger.info(`Contents of ${volumePath}: ${entries.slice(0, 10).join(', ')}${entries.length > 10 ? '...' : ''}`); 32 | } catch (error) { 33 | logger.warn(`❌ Volume not accessible: ${volumePath}`); 34 | } 35 | } 36 | 37 | // Initialize services 38 | const docService = new NodeDocumentationService(); 39 | const extractor = new NodeSourceExtractor(); 40 | 41 | // Extend the extractor's search paths with Docker volumes 42 | (extractor as any).n8nBasePaths.unshift(...dockerVolumePaths); 43 | 44 | // Clear existing nodes to ensure we only have latest versions 45 | logger.info('🧹 Clearing existing nodes...'); 46 | const db = (docService as any).db; 47 | db.prepare('DELETE FROM nodes').run(); 48 | 49 | logger.info('🔍 Searching for n8n nodes in Docker volumes...'); 50 | 51 | // Known n8n packages to extract 52 | const n8nPackages = [ 53 | 'n8n-nodes-base', 54 | '@n8n/n8n-nodes-langchain', 55 | 'n8n-nodes-extras', 56 | ]; 57 | 58 | let totalExtracted = 0; 59 | let ifNodeVersion = null; 60 | 61 | for (const packageName of n8nPackages) { 62 | logger.info(`\n📦 Processing package: ${packageName}`); 63 | 64 | try { 65 | // Find package in Docker volumes 66 | let packagePath = null; 67 | 68 | for (const volumePath of dockerVolumePaths) { 69 | const possiblePaths = [ 70 | path.join(volumePath, packageName), 71 | path.join(volumePath, '.pnpm', `${packageName}@*`, 'node_modules', packageName), 72 | ]; 73 | 74 | for (const testPath of possiblePaths) { 75 | try { 76 | // Use glob pattern to find pnpm packages 77 | if (testPath.includes('*')) { 78 | const baseDir = path.dirname(testPath.split('*')[0]); 79 | const entries = await fs.readdir(baseDir); 80 | 81 | for (const entry of entries) { 82 | if (entry.includes(packageName.replace('/', '+'))) { 83 | const fullPath = path.join(baseDir, entry, 'node_modules', packageName); 84 | try { 85 | await fs.access(fullPath); 86 | packagePath = fullPath; 87 | break; 88 | } catch {} 89 | } 90 | } 91 | } else { 92 | await fs.access(testPath); 93 | packagePath = testPath; 94 | break; 95 | } 96 | } catch {} 97 | } 98 | 99 | if (packagePath) break; 100 | } 101 | 102 | if (!packagePath) { 103 | logger.warn(`Package ${packageName} not found in Docker volumes`); 104 | continue; 105 | } 106 | 107 | logger.info(`Found package at: ${packagePath}`); 108 | 109 | // Check package version 110 | try { 111 | const packageJsonPath = path.join(packagePath, 'package.json'); 112 | const packageJson = JSON.parse(await fs.readFile(packageJsonPath, 'utf-8')); 113 | logger.info(`Package version: ${packageJson.version}`); 114 | } catch {} 115 | 116 | // Find nodes directory 117 | const nodesPath = path.join(packagePath, 'dist', 'nodes'); 118 | 119 | try { 120 | await fs.access(nodesPath); 121 | logger.info(`Scanning nodes directory: ${nodesPath}`); 122 | 123 | // Extract all nodes from this package 124 | const nodeEntries = await scanForNodes(nodesPath); 125 | logger.info(`Found ${nodeEntries.length} nodes in ${packageName}`); 126 | 127 | for (const nodeEntry of nodeEntries) { 128 | try { 129 | const nodeName = nodeEntry.name.replace('.node.js', ''); 130 | const nodeType = `${packageName}.${nodeName}`; 131 | 132 | logger.info(`Extracting: ${nodeType}`); 133 | 134 | // Extract source info 135 | const sourceInfo = await extractor.extractNodeSource(nodeType); 136 | 137 | // Check if this is the If node 138 | if (nodeName === 'If') { 139 | // Look for version in the source code 140 | const versionMatch = sourceInfo.sourceCode.match(/version:\s*(\d+)/); 141 | if (versionMatch) { 142 | ifNodeVersion = versionMatch[1]; 143 | logger.info(`📍 Found If node version: ${ifNodeVersion}`); 144 | } 145 | } 146 | 147 | // Store in database 148 | await docService.storeNode({ 149 | nodeType: nodeType, 150 | name: nodeName, 151 | displayName: nodeName, 152 | description: `${nodeName} node from ${packageName}`, 153 | sourceCode: sourceInfo.sourceCode, 154 | credentialCode: sourceInfo.credentialCode, 155 | packageName: packageName, 156 | version: ifNodeVersion || '1', 157 | hasCredentials: !!sourceInfo.credentialCode, 158 | isTrigger: sourceInfo.sourceCode.includes('trigger: true') || nodeName.toLowerCase().includes('trigger'), 159 | isWebhook: sourceInfo.sourceCode.includes('webhook: true') || nodeName.toLowerCase().includes('webhook'), 160 | }); 161 | 162 | totalExtracted++; 163 | } catch (error) { 164 | logger.error(`Failed to extract ${nodeEntry.name}: ${error}`); 165 | } 166 | } 167 | } catch (error) { 168 | logger.error(`Failed to scan nodes directory: ${error}`); 169 | } 170 | } catch (error) { 171 | logger.error(`Failed to process package ${packageName}: ${error}`); 172 | } 173 | } 174 | 175 | logger.info(`\n✅ Extraction complete!`); 176 | logger.info(`📊 Total nodes extracted: ${totalExtracted}`); 177 | 178 | if (ifNodeVersion) { 179 | logger.info(`📍 If node version: ${ifNodeVersion}`); 180 | if (ifNodeVersion === '2' || ifNodeVersion === '2.2') { 181 | logger.info('✅ Successfully extracted latest If node (v2+)!'); 182 | } else { 183 | logger.warn(`⚠️ If node version is ${ifNodeVersion}, expected v2 or higher`); 184 | } 185 | } 186 | 187 | // Close database 188 | await docService.close(); 189 | } 190 | 191 | async function scanForNodes(dirPath: string): Promise<{ name: string; path: string }[]> { 192 | const nodes: { name: string; path: string }[] = []; 193 | 194 | async function scan(currentPath: string) { 195 | try { 196 | const entries = await fs.readdir(currentPath, { withFileTypes: true }); 197 | 198 | for (const entry of entries) { 199 | const fullPath = path.join(currentPath, entry.name); 200 | 201 | if (entry.isFile() && entry.name.endsWith('.node.js')) { 202 | nodes.push({ name: entry.name, path: fullPath }); 203 | } else if (entry.isDirectory() && entry.name !== 'node_modules') { 204 | await scan(fullPath); 205 | } 206 | } 207 | } catch (error) { 208 | logger.debug(`Failed to scan directory ${currentPath}: ${error}`); 209 | } 210 | } 211 | 212 | await scan(dirPath); 213 | return nodes; 214 | } 215 | 216 | // Run extraction 217 | extractNodesFromDocker().catch(error => { 218 | logger.error('Extraction failed:', error); 219 | process.exit(1); 220 | }); ``` -------------------------------------------------------------------------------- /src/scripts/test-autofix-workflow.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Test script for n8n_autofix_workflow functionality 3 | * 4 | * Tests the automatic fixing of common workflow validation errors: 5 | * 1. Expression format errors (missing = prefix) 6 | * 2. TypeVersion corrections 7 | * 3. Error output configuration issues 8 | */ 9 | 10 | import { WorkflowAutoFixer } from '../services/workflow-auto-fixer'; 11 | import { WorkflowValidator } from '../services/workflow-validator'; 12 | import { EnhancedConfigValidator } from '../services/enhanced-config-validator'; 13 | import { ExpressionFormatValidator } from '../services/expression-format-validator'; 14 | import { NodeRepository } from '../database/node-repository'; 15 | import { Logger } from '../utils/logger'; 16 | import { createDatabaseAdapter } from '../database/database-adapter'; 17 | import * as path from 'path'; 18 | 19 | const logger = new Logger({ prefix: '[TestAutofix]' }); 20 | 21 | async function testAutofix() { 22 | // Initialize database and repository 23 | const dbPath = path.join(__dirname, '../../data/nodes.db'); 24 | const dbAdapter = await createDatabaseAdapter(dbPath); 25 | const repository = new NodeRepository(dbAdapter); 26 | 27 | // Test workflow with various issues 28 | const testWorkflow = { 29 | id: 'test_workflow_1', 30 | name: 'Test Workflow for Autofix', 31 | nodes: [ 32 | { 33 | id: 'webhook_1', 34 | name: 'Webhook', 35 | type: 'n8n-nodes-base.webhook', 36 | typeVersion: 1.1, 37 | position: [250, 300], 38 | parameters: { 39 | httpMethod: 'GET', 40 | path: 'test-webhook', 41 | responseMode: 'onReceived', 42 | responseData: 'firstEntryJson' 43 | } 44 | }, 45 | { 46 | id: 'http_1', 47 | name: 'HTTP Request', 48 | type: 'n8n-nodes-base.httpRequest', 49 | typeVersion: 5.0, // Invalid - max is 4.2 50 | position: [450, 300], 51 | parameters: { 52 | method: 'GET', 53 | url: '{{ $json.webhookUrl }}', // Missing = prefix 54 | sendHeaders: true, 55 | headerParameters: { 56 | parameters: [ 57 | { 58 | name: 'Authorization', 59 | value: '{{ $json.token }}' // Missing = prefix 60 | } 61 | ] 62 | } 63 | }, 64 | onError: 'continueErrorOutput' // Has onError but no error connections 65 | }, 66 | { 67 | id: 'set_1', 68 | name: 'Set', 69 | type: 'n8n-nodes-base.set', 70 | typeVersion: 3.5, // Invalid version 71 | position: [650, 300], 72 | parameters: { 73 | mode: 'manual', 74 | duplicateItem: false, 75 | values: { 76 | values: [ 77 | { 78 | name: 'status', 79 | value: '{{ $json.success }}' // Missing = prefix 80 | } 81 | ] 82 | } 83 | } 84 | } 85 | ], 86 | connections: { 87 | 'Webhook': { 88 | main: [ 89 | [ 90 | { 91 | node: 'HTTP Request', 92 | type: 'main', 93 | index: 0 94 | } 95 | ] 96 | ] 97 | }, 98 | 'HTTP Request': { 99 | main: [ 100 | [ 101 | { 102 | node: 'Set', 103 | type: 'main', 104 | index: 0 105 | } 106 | ] 107 | // Missing error output connection for onError: 'continueErrorOutput' 108 | ] 109 | } 110 | } 111 | }; 112 | 113 | logger.info('=== Testing Workflow Auto-Fixer ===\n'); 114 | 115 | // Step 1: Validate the workflow to identify issues 116 | logger.info('Step 1: Validating workflow to identify issues...'); 117 | const validator = new WorkflowValidator(repository, EnhancedConfigValidator); 118 | const validationResult = await validator.validateWorkflow(testWorkflow as any, { 119 | validateNodes: true, 120 | validateConnections: true, 121 | validateExpressions: true, 122 | profile: 'ai-friendly' 123 | }); 124 | 125 | logger.info(`Found ${validationResult.errors.length} errors and ${validationResult.warnings.length} warnings`); 126 | 127 | // Step 2: Check for expression format issues 128 | logger.info('\nStep 2: Checking for expression format issues...'); 129 | const allFormatIssues: any[] = []; 130 | for (const node of testWorkflow.nodes) { 131 | const formatContext = { 132 | nodeType: node.type, 133 | nodeName: node.name, 134 | nodeId: node.id 135 | }; 136 | 137 | const nodeFormatIssues = ExpressionFormatValidator.validateNodeParameters( 138 | node.parameters, 139 | formatContext 140 | ); 141 | 142 | // Add node information to each format issue 143 | const enrichedIssues = nodeFormatIssues.map(issue => ({ 144 | ...issue, 145 | nodeName: node.name, 146 | nodeId: node.id 147 | })); 148 | 149 | allFormatIssues.push(...enrichedIssues); 150 | } 151 | 152 | logger.info(`Found ${allFormatIssues.length} expression format issues`); 153 | 154 | // Debug: Show the actual format issues 155 | if (allFormatIssues.length > 0) { 156 | logger.info('\nExpression format issues found:'); 157 | for (const issue of allFormatIssues) { 158 | logger.info(` - ${issue.fieldPath}: ${issue.issueType} (${issue.severity})`); 159 | logger.info(` Current: ${JSON.stringify(issue.currentValue)}`); 160 | logger.info(` Fixed: ${JSON.stringify(issue.correctedValue)}`); 161 | } 162 | } 163 | 164 | // Step 3: Generate fixes in preview mode 165 | logger.info('\nStep 3: Generating fixes (preview mode)...'); 166 | const autoFixer = new WorkflowAutoFixer(); 167 | const previewResult = autoFixer.generateFixes( 168 | testWorkflow as any, 169 | validationResult, 170 | allFormatIssues, 171 | { 172 | applyFixes: false, // Preview mode 173 | confidenceThreshold: 'medium' 174 | } 175 | ); 176 | 177 | logger.info(`\nGenerated ${previewResult.fixes.length} fixes:`); 178 | logger.info(`Summary: ${previewResult.summary}`); 179 | logger.info('\nFixes by type:'); 180 | for (const [type, count] of Object.entries(previewResult.stats.byType)) { 181 | if (count > 0) { 182 | logger.info(` - ${type}: ${count}`); 183 | } 184 | } 185 | 186 | logger.info('\nFixes by confidence:'); 187 | for (const [confidence, count] of Object.entries(previewResult.stats.byConfidence)) { 188 | if (count > 0) { 189 | logger.info(` - ${confidence}: ${count}`); 190 | } 191 | } 192 | 193 | // Step 4: Display individual fixes 194 | logger.info('\nDetailed fixes:'); 195 | for (const fix of previewResult.fixes) { 196 | logger.info(`\n[${fix.confidence.toUpperCase()}] ${fix.node}.${fix.field} (${fix.type})`); 197 | logger.info(` Before: ${JSON.stringify(fix.before)}`); 198 | logger.info(` After: ${JSON.stringify(fix.after)}`); 199 | logger.info(` Description: ${fix.description}`); 200 | } 201 | 202 | // Step 5: Display generated operations 203 | logger.info('\n\nGenerated diff operations:'); 204 | for (const op of previewResult.operations) { 205 | logger.info(`\nOperation: ${op.type}`); 206 | logger.info(` Details: ${JSON.stringify(op, null, 2)}`); 207 | } 208 | 209 | // Step 6: Test with different confidence thresholds 210 | logger.info('\n\n=== Testing Different Confidence Thresholds ==='); 211 | 212 | for (const threshold of ['high', 'medium', 'low'] as const) { 213 | const result = autoFixer.generateFixes( 214 | testWorkflow as any, 215 | validationResult, 216 | allFormatIssues, 217 | { 218 | applyFixes: false, 219 | confidenceThreshold: threshold 220 | } 221 | ); 222 | logger.info(`\nThreshold "${threshold}": ${result.fixes.length} fixes`); 223 | } 224 | 225 | // Step 7: Test with specific fix types 226 | logger.info('\n\n=== Testing Specific Fix Types ==='); 227 | 228 | const fixTypes = ['expression-format', 'typeversion-correction', 'error-output-config'] as const; 229 | for (const fixType of fixTypes) { 230 | const result = autoFixer.generateFixes( 231 | testWorkflow as any, 232 | validationResult, 233 | allFormatIssues, 234 | { 235 | applyFixes: false, 236 | fixTypes: [fixType] 237 | } 238 | ); 239 | logger.info(`\nFix type "${fixType}": ${result.fixes.length} fixes`); 240 | } 241 | 242 | logger.info('\n\n✅ Autofix test completed successfully!'); 243 | 244 | await dbAdapter.close(); 245 | } 246 | 247 | // Run the test 248 | testAutofix().catch(error => { 249 | logger.error('Test failed:', error); 250 | process.exit(1); 251 | }); ``` -------------------------------------------------------------------------------- /src/parsers/property-extractor.ts: -------------------------------------------------------------------------------- ```typescript 1 | import type { NodeClass } from '../types/node-types'; 2 | 3 | export class PropertyExtractor { 4 | /** 5 | * Extract properties with proper handling of n8n's complex structures 6 | */ 7 | extractProperties(nodeClass: NodeClass): any[] { 8 | const properties: any[] = []; 9 | 10 | // First try to get instance-level properties 11 | let instance: any; 12 | try { 13 | instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass; 14 | } catch (e) { 15 | // Failed to instantiate 16 | } 17 | 18 | // Handle versioned nodes - check instance for nodeVersions 19 | if (instance?.nodeVersions) { 20 | const versions = Object.keys(instance.nodeVersions).map(Number); 21 | if (versions.length > 0) { 22 | const latestVersion = Math.max(...versions); 23 | if (!isNaN(latestVersion)) { 24 | const versionedNode = instance.nodeVersions[latestVersion]; 25 | 26 | if (versionedNode?.description?.properties) { 27 | return this.normalizeProperties(versionedNode.description.properties); 28 | } 29 | } 30 | } 31 | } 32 | 33 | // Check for description with properties 34 | const description = instance?.description || instance?.baseDescription || 35 | this.getNodeDescription(nodeClass); 36 | 37 | if (description?.properties) { 38 | return this.normalizeProperties(description.properties); 39 | } 40 | 41 | return properties; 42 | } 43 | 44 | private getNodeDescription(nodeClass: NodeClass): any { 45 | // Try to get description from the class first 46 | let description: any; 47 | 48 | if (typeof nodeClass === 'function') { 49 | // Try to instantiate to get description 50 | try { 51 | const instance = new nodeClass(); 52 | // Strategic any assertion for instance properties 53 | const inst = instance as any; 54 | description = inst.description || inst.baseDescription || {}; 55 | } catch (e) { 56 | // Some nodes might require parameters to instantiate 57 | // Strategic any assertion for class-level properties 58 | const nodeClassAny = nodeClass as any; 59 | description = nodeClassAny.description || {}; 60 | } 61 | } else { 62 | // Strategic any assertion for instance properties 63 | const inst = nodeClass as any; 64 | description = inst.description || {}; 65 | } 66 | 67 | return description; 68 | } 69 | 70 | /** 71 | * Extract operations from both declarative and programmatic nodes 72 | */ 73 | extractOperations(nodeClass: NodeClass): any[] { 74 | const operations: any[] = []; 75 | 76 | // First try to get instance-level data 77 | let instance: any; 78 | try { 79 | instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass; 80 | } catch (e) { 81 | // Failed to instantiate 82 | } 83 | 84 | // Handle versioned nodes 85 | if (instance?.nodeVersions) { 86 | const versions = Object.keys(instance.nodeVersions).map(Number); 87 | if (versions.length > 0) { 88 | const latestVersion = Math.max(...versions); 89 | if (!isNaN(latestVersion)) { 90 | const versionedNode = instance.nodeVersions[latestVersion]; 91 | 92 | if (versionedNode?.description) { 93 | return this.extractOperationsFromDescription(versionedNode.description); 94 | } 95 | } 96 | } 97 | } 98 | 99 | // Get description 100 | const description = instance?.description || instance?.baseDescription || 101 | this.getNodeDescription(nodeClass); 102 | 103 | return this.extractOperationsFromDescription(description); 104 | } 105 | 106 | private extractOperationsFromDescription(description: any): any[] { 107 | const operations: any[] = []; 108 | 109 | if (!description) return operations; 110 | 111 | // Declarative nodes (with routing) 112 | if (description.routing) { 113 | const routing = description.routing; 114 | 115 | // Extract from request.resource and request.operation 116 | if (routing.request?.resource) { 117 | const resources = routing.request.resource.options || []; 118 | const operationOptions = routing.request.operation?.options || {}; 119 | 120 | resources.forEach((resource: any) => { 121 | const resourceOps = operationOptions[resource.value] || []; 122 | resourceOps.forEach((op: any) => { 123 | operations.push({ 124 | resource: resource.value, 125 | operation: op.value, 126 | name: `${resource.name} - ${op.name}`, 127 | action: op.action 128 | }); 129 | }); 130 | }); 131 | } 132 | } 133 | 134 | // Programmatic nodes - look for operation property in properties 135 | if (description.properties && Array.isArray(description.properties)) { 136 | const operationProp = description.properties.find( 137 | (p: any) => p.name === 'operation' || p.name === 'action' 138 | ); 139 | 140 | if (operationProp?.options) { 141 | operationProp.options.forEach((op: any) => { 142 | operations.push({ 143 | operation: op.value, 144 | name: op.name, 145 | description: op.description 146 | }); 147 | }); 148 | } 149 | } 150 | 151 | return operations; 152 | } 153 | 154 | /** 155 | * Deep search for AI tool capability 156 | */ 157 | detectAIToolCapability(nodeClass: NodeClass): boolean { 158 | const description = this.getNodeDescription(nodeClass); 159 | 160 | // Direct property check 161 | if (description?.usableAsTool === true) return true; 162 | 163 | // Check in actions for declarative nodes 164 | if (description?.actions?.some((a: any) => a.usableAsTool === true)) return true; 165 | 166 | // Check versioned nodes 167 | // Strategic any assertion for nodeVersions property 168 | const nodeClassAny = nodeClass as any; 169 | if (nodeClassAny.nodeVersions) { 170 | for (const version of Object.values(nodeClassAny.nodeVersions)) { 171 | if ((version as any).description?.usableAsTool === true) return true; 172 | } 173 | } 174 | 175 | // Check for specific AI-related properties 176 | const aiIndicators = ['openai', 'anthropic', 'huggingface', 'cohere', 'ai']; 177 | const nodeName = description?.name?.toLowerCase() || ''; 178 | 179 | return aiIndicators.some(indicator => nodeName.includes(indicator)); 180 | } 181 | 182 | /** 183 | * Extract credential requirements with proper structure 184 | */ 185 | extractCredentials(nodeClass: NodeClass): any[] { 186 | const credentials: any[] = []; 187 | 188 | // First try to get instance-level data 189 | let instance: any; 190 | try { 191 | instance = typeof nodeClass === 'function' ? new nodeClass() : nodeClass; 192 | } catch (e) { 193 | // Failed to instantiate 194 | } 195 | 196 | // Handle versioned nodes 197 | if (instance?.nodeVersions) { 198 | const versions = Object.keys(instance.nodeVersions).map(Number); 199 | if (versions.length > 0) { 200 | const latestVersion = Math.max(...versions); 201 | if (!isNaN(latestVersion)) { 202 | const versionedNode = instance.nodeVersions[latestVersion]; 203 | 204 | if (versionedNode?.description?.credentials) { 205 | return versionedNode.description.credentials; 206 | } 207 | } 208 | } 209 | } 210 | 211 | // Check for description with credentials 212 | const description = instance?.description || instance?.baseDescription || 213 | this.getNodeDescription(nodeClass); 214 | 215 | if (description?.credentials) { 216 | return description.credentials; 217 | } 218 | 219 | return credentials; 220 | } 221 | 222 | private normalizeProperties(properties: any[]): any[] { 223 | // Ensure all properties have consistent structure 224 | return properties.map(prop => ({ 225 | displayName: prop.displayName, 226 | name: prop.name, 227 | type: prop.type, 228 | default: prop.default, 229 | description: prop.description, 230 | options: prop.options, 231 | required: prop.required, 232 | displayOptions: prop.displayOptions, 233 | typeOptions: prop.typeOptions, 234 | modes: prop.modes, // For resourceLocator type properties - modes are at top level 235 | noDataExpression: prop.noDataExpression 236 | })); 237 | } 238 | } ``` -------------------------------------------------------------------------------- /docs/local/DEEP_DIVE_ANALYSIS_README.md: -------------------------------------------------------------------------------- ```markdown 1 | # N8N-MCP Deep Dive Analysis - October 2, 2025 2 | 3 | ## Overview 4 | 5 | This directory contains a comprehensive deep-dive analysis of n8n-mcp usage data from September 26 - October 2, 2025. 6 | 7 | **Data Volume Analyzed:** 8 | - 212,375 telemetry events 9 | - 5,751 workflow creations 10 | - 2,119 unique users 11 | - 6 days of usage data 12 | 13 | ## Report Structure 14 | 15 | 16 | ###: `DEEP_DIVE_ANALYSIS_2025-10-02.md` (Main Report) 17 | 18 | **Sections Covered:** 19 | 1. **Executive Summary** - Key findings and recommendations 20 | 2. **Tool Performance Analysis** - Success rates, performance metrics, critical findings 21 | 3. **Validation Catastrophe** - The node type prefix disaster analysis 22 | 4. **Usage Patterns & User Segmentation** - User distribution, daily trends 23 | 5. **Tool Sequence Analysis** - How AI agents use tools together 24 | 6. **Workflow Creation Patterns** - Complexity distribution, popular nodes 25 | 7. **Platform & Version Distribution** - OS, architecture, version adoption 26 | 8. **Error Patterns & Root Causes** - TypeErrors, validation errors, discovery failures 27 | 9. **P0-P1 Refactoring Recommendations** - Detailed implementation guides 28 | 29 | **Sections Covered:** 30 | - Remaining P1 and P2 recommendations 31 | - Architectural refactoring suggestions 32 | - Telemetry enhancements 33 | - CHANGELOG integration 34 | - Final recommendations summary 35 | 36 | ## Key Findings Summary 37 | 38 | ### Critical Issues (P0 - Fix Immediately) 39 | 40 | 1. **Node Type Prefix Validation Catastrophe** 41 | - 5,000+ validation errors from single root cause 42 | - `nodes-base.X` vs `n8n-nodes-base.X` confusion 43 | - **Solution**: Auto-normalize prefixes (2-4 hours effort) 44 | 45 | 2. **TypeError in Node Information Tools** 46 | - 10-18% failure rate in get_node_essentials/info 47 | - 1,000+ failures affecting hundreds of users 48 | - **Solution**: Complete null-safety audit (1 day effort) 49 | 50 | 3. **Task Discovery Failures** 51 | - `get_node_for_task` failing 28% of the time 52 | - Worst-performing tool in entire system 53 | - **Solution**: Expand task library + fuzzy matching (3 days effort) 54 | 55 | ### Performance Metrics 56 | 57 | **Excellent Reliability (96-100% success):** 58 | - n8n_update_partial_workflow: 98.7% 59 | - search_nodes: 99.8% 60 | - n8n_create_workflow: 96.1% 61 | - All workflow management tools: 100% 62 | 63 | **User Distribution:** 64 | - Power Users (12): 2,112 events/user, 33 workflows 65 | - Heavy Users (47): 673 events/user, 18 workflows 66 | - Regular Users (516): 199 events/user, 7 workflows (CORE AUDIENCE) 67 | - Active Users (919): 52 events/user, 2 workflows 68 | - Casual Users (625): 8 events/user, 1 workflow 69 | 70 | ### Usage Insights 71 | 72 | **Most Used Tools:** 73 | 1. n8n_update_partial_workflow: 10,177 calls (iterative refinement) 74 | 2. search_nodes: 8,839 calls (node discovery) 75 | 3. n8n_create_workflow: 6,046 calls (workflow creation) 76 | 77 | **Most Common Tool Sequences:** 78 | 1. update → update → update (549x) - Iterative refinement pattern 79 | 2. create → update (297x) - Create then refine 80 | 3. update → get_workflow (265x) - Update then verify 81 | 82 | **Most Popular Nodes:** 83 | 1. code (53% of workflows) - AI agents love programmatic control 84 | 2. httpRequest (47%) - Integration-heavy usage 85 | 3. webhook (32%) - Event-driven automation 86 | 87 | ## SQL Analytical Views Created 88 | 89 | 15 comprehensive views were created in Supabase for ongoing analysis: 90 | 91 | 1. `vw_tool_performance` - Performance metrics per tool 92 | 2. `vw_error_analysis` - Error patterns and frequencies 93 | 3. `vw_validation_analysis` - Validation failure details 94 | 4. `vw_tool_sequences` - Tool-to-tool transition patterns 95 | 5. `vw_workflow_creation_patterns` - Workflow characteristics 96 | 6. `vw_node_usage_analysis` - Node popularity and complexity 97 | 7. `vw_node_cooccurrence` - Which nodes are used together 98 | 8. `vw_user_activity` - Per-user activity metrics 99 | 9. `vw_session_analysis` - Platform/version distribution 100 | 10. `vw_workflow_validation_failures` - Workflow validation issues 101 | 11. `vw_temporal_patterns` - Time-based usage patterns 102 | 12. `vw_tool_funnel` - User progression through tools 103 | 13. `vw_search_analysis` - Search behavior 104 | 14. `vw_tool_success_summary` - Success/failure rates 105 | 15. `vw_user_journeys` - Complete user session reconstruction 106 | 107 | ## Priority Recommendations 108 | 109 | ### Immediate Actions (This Week) 110 | 111 | ✅ **P0-R1**: Auto-normalize node type prefixes → Eliminate 4,800 errors 112 | ✅ **P0-R2**: Complete null-safety audit → Fix 10-18% TypeError failures 113 | ✅ **P0-R3**: Expand get_node_for_task library → 72% → 95% success rate 114 | 115 | **Expected Impact**: Reduce error rate from 5-10% to <2% overall 116 | 117 | ### Next Release (2-3 Weeks) 118 | 119 | ✅ **P1-R4**: Batch workflow operations → Save 30-50% tokens 120 | ✅ **P1-R5**: Proactive node suggestions → Reduce search iterations 121 | ✅ **P1-R6**: Auto-fix suggestions in errors → Self-service recovery 122 | 123 | **Expected Impact**: 40% faster workflow creation, better UX 124 | 125 | ### Future Roadmap (1-3 Months) 126 | 127 | ✅ **A1**: Service layer consolidation → Cleaner architecture 128 | ✅ **A2**: Repository caching → 50% faster node operations 129 | ✅ **R10**: Workflow template library from usage → 80% coverage 130 | ✅ **T1-T3**: Enhanced telemetry → Better observability 131 | 132 | **Expected Impact**: Scalable foundation for 10x growth 133 | 134 | ## Methodology 135 | 136 | ### Data Sources 137 | 138 | 1. **Supabase Telemetry Database** 139 | - `telemetry_events` table: 212,375 rows 140 | - `telemetry_workflows` table: 5,751 rows 141 | 142 | 2. **Analytical Views** 143 | - Created 15 SQL views for multi-dimensional analysis 144 | - Enabled complex queries and pattern recognition 145 | 146 | 3. **CHANGELOG Review** 147 | - Analyzed recent changes (v2.14.0 - v2.14.6) 148 | - Correlated fixes with error patterns 149 | 150 | ### Analysis Approach 151 | 152 | 1. **Quantitative Analysis** 153 | - Success/failure rates per tool 154 | - Performance metrics (avg, median, p95, p99) 155 | - User segmentation and cohort analysis 156 | - Temporal trends and growth patterns 157 | 158 | 2. **Pattern Recognition** 159 | - Tool sequence analysis (Markov chains) 160 | - Node co-occurrence patterns 161 | - Workflow complexity distribution 162 | - Error clustering and root cause analysis 163 | 164 | 3. **Qualitative Insights** 165 | - CHANGELOG integration 166 | - Error message analysis 167 | - User journey reconstruction 168 | - Best practice identification 169 | 170 | ## How to Use This Analysis 171 | 172 | ### For Development Priorities 173 | 174 | 1. Review **P0 Critical Recommendations** (Section 8) 175 | 2. Check estimated effort and impact 176 | 3. Prioritize based on ROI (impact/effort ratio) 177 | 4. Follow implementation guides with code examples 178 | 179 | ### For Architecture Decisions 180 | 181 | 1. Review **Architectural Recommendations** (Section 9) 182 | 2. Consider service layer consolidation 183 | 3. Evaluate repository caching opportunities 184 | 4. Plan for 10x scale 185 | 186 | ### For Product Strategy 187 | 188 | 1. Review **Usage Patterns** (Section 3 & 5) 189 | 2. Understand user segments (power vs casual) 190 | 3. Identify high-value features (most-used tools) 191 | 4. Focus on reliability over features (96% success rate target) 192 | 193 | ### For Telemetry Enhancement 194 | 195 | 1. Review **Telemetry Enhancements** (Section 10) 196 | 2. Add fine-grained timing metrics 197 | 3. Track workflow creation funnels 198 | 4. Monitor node-level analytics 199 | 200 | ## Contact & Feedback 201 | 202 | For questions about this analysis or to request additional insights: 203 | - Data Analyst: Claude Code with Supabase MCP 204 | - Analysis Date: October 2, 2025 205 | - Data Period: September 26 - October 2, 2025 206 | 207 | ## Change Log 208 | 209 | - **2025-10-02**: Initial comprehensive analysis completed 210 | - 15 SQL analytical views created 211 | - 13 sections of detailed findings 212 | - P0/P1/P2 recommendations with implementation guides 213 | - Code examples and effort estimates provided 214 | 215 | ## Next Steps 216 | 217 | 1. ✅ Review findings with development team 218 | 2. ✅ Prioritize P0 recommendations for immediate implementation 219 | 3. ✅ Plan P1 features for next release cycle 220 | 4. ✅ Set up monitoring for key metrics 221 | 5. ✅ Schedule follow-up analysis (weekly recommended) 222 | 223 | --- 224 | 225 | *This analysis represents a snapshot of n8n-mcp usage during early adoption phase. Patterns may evolve as the user base grows and matures.* 226 | ``` -------------------------------------------------------------------------------- /tests/integration/n8n-api/utils/webhook-workflows.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Webhook Workflow Configuration 3 | * 4 | * Provides configuration and setup instructions for webhook workflows 5 | * required for integration testing. 6 | * 7 | * These workflows must be created manually in n8n and activated because 8 | * the n8n API doesn't support workflow activation. 9 | */ 10 | 11 | import { Workflow, WorkflowNode } from '../../../../src/types/n8n-api'; 12 | 13 | export interface WebhookWorkflowConfig { 14 | name: string; 15 | description: string; 16 | httpMethod: 'GET' | 'POST' | 'PUT' | 'DELETE'; 17 | path: string; 18 | nodes: Array<Partial<WorkflowNode>>; 19 | connections: Record<string, any>; 20 | } 21 | 22 | /** 23 | * Configuration for required webhook workflows 24 | */ 25 | export const WEBHOOK_WORKFLOW_CONFIGS: Record<string, WebhookWorkflowConfig> = { 26 | GET: { 27 | name: '[MCP-TEST] Webhook GET', 28 | description: 'Pre-activated webhook for GET method testing', 29 | httpMethod: 'GET', 30 | path: 'mcp-test-get', 31 | nodes: [ 32 | { 33 | name: 'Webhook', 34 | type: 'n8n-nodes-base.webhook', 35 | typeVersion: 2, 36 | position: [250, 300], 37 | parameters: { 38 | httpMethod: 'GET', 39 | path: 'mcp-test-get', 40 | responseMode: 'lastNode', 41 | options: {} 42 | } 43 | }, 44 | { 45 | name: 'Respond to Webhook', 46 | type: 'n8n-nodes-base.respondToWebhook', 47 | typeVersion: 1.1, 48 | position: [450, 300], 49 | parameters: { 50 | options: {} 51 | } 52 | } 53 | ], 54 | connections: { 55 | Webhook: { 56 | main: [[{ node: 'Respond to Webhook', type: 'main', index: 0 }]] 57 | } 58 | } 59 | }, 60 | POST: { 61 | name: '[MCP-TEST] Webhook POST', 62 | description: 'Pre-activated webhook for POST method testing', 63 | httpMethod: 'POST', 64 | path: 'mcp-test-post', 65 | nodes: [ 66 | { 67 | name: 'Webhook', 68 | type: 'n8n-nodes-base.webhook', 69 | typeVersion: 2, 70 | position: [250, 300], 71 | parameters: { 72 | httpMethod: 'POST', 73 | path: 'mcp-test-post', 74 | responseMode: 'lastNode', 75 | options: {} 76 | } 77 | }, 78 | { 79 | name: 'Respond to Webhook', 80 | type: 'n8n-nodes-base.respondToWebhook', 81 | typeVersion: 1.1, 82 | position: [450, 300], 83 | parameters: { 84 | options: {} 85 | } 86 | } 87 | ], 88 | connections: { 89 | Webhook: { 90 | main: [[{ node: 'Respond to Webhook', type: 'main', index: 0 }]] 91 | } 92 | } 93 | }, 94 | PUT: { 95 | name: '[MCP-TEST] Webhook PUT', 96 | description: 'Pre-activated webhook for PUT method testing', 97 | httpMethod: 'PUT', 98 | path: 'mcp-test-put', 99 | nodes: [ 100 | { 101 | name: 'Webhook', 102 | type: 'n8n-nodes-base.webhook', 103 | typeVersion: 2, 104 | position: [250, 300], 105 | parameters: { 106 | httpMethod: 'PUT', 107 | path: 'mcp-test-put', 108 | responseMode: 'lastNode', 109 | options: {} 110 | } 111 | }, 112 | { 113 | name: 'Respond to Webhook', 114 | type: 'n8n-nodes-base.respondToWebhook', 115 | typeVersion: 1.1, 116 | position: [450, 300], 117 | parameters: { 118 | options: {} 119 | } 120 | } 121 | ], 122 | connections: { 123 | Webhook: { 124 | main: [[{ node: 'Respond to Webhook', type: 'main', index: 0 }]] 125 | } 126 | } 127 | }, 128 | DELETE: { 129 | name: '[MCP-TEST] Webhook DELETE', 130 | description: 'Pre-activated webhook for DELETE method testing', 131 | httpMethod: 'DELETE', 132 | path: 'mcp-test-delete', 133 | nodes: [ 134 | { 135 | name: 'Webhook', 136 | type: 'n8n-nodes-base.webhook', 137 | typeVersion: 2, 138 | position: [250, 300], 139 | parameters: { 140 | httpMethod: 'DELETE', 141 | path: 'mcp-test-delete', 142 | responseMode: 'lastNode', 143 | options: {} 144 | } 145 | }, 146 | { 147 | name: 'Respond to Webhook', 148 | type: 'n8n-nodes-base.respondToWebhook', 149 | typeVersion: 1.1, 150 | position: [450, 300], 151 | parameters: { 152 | options: {} 153 | } 154 | } 155 | ], 156 | connections: { 157 | Webhook: { 158 | main: [[{ node: 'Respond to Webhook', type: 'main', index: 0 }]] 159 | } 160 | } 161 | } 162 | }; 163 | 164 | /** 165 | * Print setup instructions for webhook workflows 166 | */ 167 | export function printSetupInstructions(): void { 168 | console.log(` 169 | ╔════════════════════════════════════════════════════════════════╗ 170 | ║ WEBHOOK WORKFLOW SETUP REQUIRED ║ 171 | ╠════════════════════════════════════════════════════════════════╣ 172 | ║ ║ 173 | ║ Integration tests require 4 pre-activated webhook workflows: ║ 174 | ║ ║ 175 | ║ 1. Create workflows manually in n8n UI ║ 176 | ║ 2. Use the configurations shown below ║ 177 | ║ 3. ACTIVATE each workflow in n8n UI ║ 178 | ║ 4. Copy workflow IDs to .env file ║ 179 | ║ ║ 180 | ╚════════════════════════════════════════════════════════════════╝ 181 | 182 | Required workflows: 183 | `); 184 | 185 | Object.entries(WEBHOOK_WORKFLOW_CONFIGS).forEach(([method, config]) => { 186 | console.log(` 187 | ${method} Method: 188 | Name: ${config.name} 189 | Path: ${config.path} 190 | .env variable: N8N_TEST_WEBHOOK_${method}_ID 191 | 192 | Workflow Structure: 193 | 1. Webhook node (${method} method, path: ${config.path}) 194 | 2. Respond to Webhook node 195 | 196 | After creating: 197 | 1. Save the workflow 198 | 2. ACTIVATE the workflow (toggle in UI) 199 | 3. Copy the workflow ID 200 | 4. Add to .env: N8N_TEST_WEBHOOK_${method}_ID=<workflow-id> 201 | `); 202 | }); 203 | 204 | console.log(` 205 | See docs/local/integration-testing-plan.md for detailed instructions. 206 | `); 207 | } 208 | 209 | /** 210 | * Generate workflow JSON for a webhook workflow 211 | * 212 | * @param method - HTTP method 213 | * @returns Partial workflow ready to create 214 | */ 215 | export function generateWebhookWorkflowJson( 216 | method: 'GET' | 'POST' | 'PUT' | 'DELETE' 217 | ): Partial<Workflow> { 218 | const config = WEBHOOK_WORKFLOW_CONFIGS[method]; 219 | 220 | return { 221 | name: config.name, 222 | nodes: config.nodes as any, 223 | connections: config.connections, 224 | active: false, // Will need to be activated manually 225 | settings: { 226 | executionOrder: 'v1' 227 | }, 228 | tags: ['mcp-integration-test', 'webhook-test'] 229 | }; 230 | } 231 | 232 | /** 233 | * Export all webhook workflow JSONs 234 | * 235 | * Returns an object with all 4 webhook workflow configurations 236 | * ready to be created in n8n. 237 | * 238 | * @returns Object with workflow configurations 239 | */ 240 | export function exportAllWebhookWorkflows(): Record<string, Partial<Workflow>> { 241 | return { 242 | GET: generateWebhookWorkflowJson('GET'), 243 | POST: generateWebhookWorkflowJson('POST'), 244 | PUT: generateWebhookWorkflowJson('PUT'), 245 | DELETE: generateWebhookWorkflowJson('DELETE') 246 | }; 247 | } 248 | 249 | /** 250 | * Get webhook URL for a given n8n instance and HTTP method 251 | * 252 | * @param n8nUrl - n8n instance URL 253 | * @param method - HTTP method 254 | * @returns Webhook URL 255 | */ 256 | export function getWebhookUrl( 257 | n8nUrl: string, 258 | method: 'GET' | 'POST' | 'PUT' | 'DELETE' 259 | ): string { 260 | const config = WEBHOOK_WORKFLOW_CONFIGS[method]; 261 | const baseUrl = n8nUrl.replace(/\/$/, ''); // Remove trailing slash 262 | return `${baseUrl}/webhook/${config.path}`; 263 | } 264 | 265 | /** 266 | * Validate webhook workflow structure 267 | * 268 | * Checks if a workflow matches the expected webhook workflow structure. 269 | * 270 | * @param workflow - Workflow to validate 271 | * @param method - Expected HTTP method 272 | * @returns true if valid 273 | */ 274 | export function isValidWebhookWorkflow( 275 | workflow: Partial<Workflow>, 276 | method: 'GET' | 'POST' | 'PUT' | 'DELETE' 277 | ): boolean { 278 | if (!workflow.nodes || workflow.nodes.length < 1) { 279 | return false; 280 | } 281 | 282 | const webhookNode = workflow.nodes.find(n => n.type === 'n8n-nodes-base.webhook'); 283 | if (!webhookNode) { 284 | return false; 285 | } 286 | 287 | const params = webhookNode.parameters as any; 288 | return params.httpMethod === method; 289 | } 290 | ``` -------------------------------------------------------------------------------- /tests/unit/utils/template-node-resolver.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect } from 'vitest'; 2 | import { resolveTemplateNodeTypes } from '../../../src/utils/template-node-resolver'; 3 | 4 | describe('Template Node Resolver', () => { 5 | describe('resolveTemplateNodeTypes', () => { 6 | it('should handle bare node names', () => { 7 | const result = resolveTemplateNodeTypes(['slack']); 8 | 9 | expect(result).toContain('n8n-nodes-base.slack'); 10 | expect(result).toContain('n8n-nodes-base.slackTrigger'); 11 | }); 12 | 13 | it('should handle HTTP variations', () => { 14 | const result = resolveTemplateNodeTypes(['http']); 15 | 16 | expect(result).toContain('n8n-nodes-base.httpRequest'); 17 | expect(result).toContain('n8n-nodes-base.webhook'); 18 | }); 19 | 20 | it('should handle httpRequest variations', () => { 21 | const result = resolveTemplateNodeTypes(['httprequest']); 22 | 23 | expect(result).toContain('n8n-nodes-base.httpRequest'); 24 | }); 25 | 26 | it('should handle partial prefix formats', () => { 27 | const result = resolveTemplateNodeTypes(['nodes-base.webhook']); 28 | 29 | expect(result).toContain('n8n-nodes-base.webhook'); 30 | expect(result).not.toContain('nodes-base.webhook'); 31 | }); 32 | 33 | it('should handle langchain nodes', () => { 34 | const result = resolveTemplateNodeTypes(['nodes-langchain.agent']); 35 | 36 | expect(result).toContain('@n8n/n8n-nodes-langchain.agent'); 37 | expect(result).not.toContain('nodes-langchain.agent'); 38 | }); 39 | 40 | it('should handle already correct formats', () => { 41 | const input = ['n8n-nodes-base.slack', '@n8n/n8n-nodes-langchain.agent']; 42 | const result = resolveTemplateNodeTypes(input); 43 | 44 | expect(result).toContain('n8n-nodes-base.slack'); 45 | expect(result).toContain('@n8n/n8n-nodes-langchain.agent'); 46 | }); 47 | 48 | it('should handle Google services', () => { 49 | const result = resolveTemplateNodeTypes(['google']); 50 | 51 | expect(result).toContain('n8n-nodes-base.googleSheets'); 52 | expect(result).toContain('n8n-nodes-base.googleDrive'); 53 | expect(result).toContain('n8n-nodes-base.googleCalendar'); 54 | }); 55 | 56 | it('should handle database variations', () => { 57 | const result = resolveTemplateNodeTypes(['database']); 58 | 59 | expect(result).toContain('n8n-nodes-base.postgres'); 60 | expect(result).toContain('n8n-nodes-base.mysql'); 61 | expect(result).toContain('n8n-nodes-base.mongoDb'); 62 | expect(result).toContain('n8n-nodes-base.postgresDatabase'); 63 | expect(result).toContain('n8n-nodes-base.mysqlDatabase'); 64 | }); 65 | 66 | it('should handle AI/LLM variations', () => { 67 | const result = resolveTemplateNodeTypes(['ai']); 68 | 69 | expect(result).toContain('n8n-nodes-base.openAi'); 70 | expect(result).toContain('@n8n/n8n-nodes-langchain.agent'); 71 | expect(result).toContain('@n8n/n8n-nodes-langchain.lmChatOpenAi'); 72 | }); 73 | 74 | it('should handle email variations', () => { 75 | const result = resolveTemplateNodeTypes(['email']); 76 | 77 | expect(result).toContain('n8n-nodes-base.emailSend'); 78 | expect(result).toContain('n8n-nodes-base.emailReadImap'); 79 | expect(result).toContain('n8n-nodes-base.gmail'); 80 | expect(result).toContain('n8n-nodes-base.gmailTrigger'); 81 | }); 82 | 83 | it('should handle schedule/cron variations', () => { 84 | const result = resolveTemplateNodeTypes(['schedule']); 85 | 86 | expect(result).toContain('n8n-nodes-base.scheduleTrigger'); 87 | expect(result).toContain('n8n-nodes-base.cron'); 88 | }); 89 | 90 | it('should handle multiple inputs', () => { 91 | const result = resolveTemplateNodeTypes(['slack', 'webhook', 'http']); 92 | 93 | expect(result).toContain('n8n-nodes-base.slack'); 94 | expect(result).toContain('n8n-nodes-base.slackTrigger'); 95 | expect(result).toContain('n8n-nodes-base.webhook'); 96 | expect(result).toContain('n8n-nodes-base.httpRequest'); 97 | }); 98 | 99 | it('should not duplicate entries', () => { 100 | const result = resolveTemplateNodeTypes(['slack', 'n8n-nodes-base.slack']); 101 | 102 | const slackCount = result.filter(r => r === 'n8n-nodes-base.slack').length; 103 | expect(slackCount).toBe(1); 104 | }); 105 | 106 | it('should handle mixed case inputs', () => { 107 | const result = resolveTemplateNodeTypes(['Slack', 'WEBHOOK', 'HttpRequest']); 108 | 109 | expect(result).toContain('n8n-nodes-base.slack'); 110 | expect(result).toContain('n8n-nodes-base.webhook'); 111 | expect(result).toContain('n8n-nodes-base.httpRequest'); 112 | }); 113 | 114 | it('should handle common misspellings', () => { 115 | const result = resolveTemplateNodeTypes(['postgres', 'postgresql']); 116 | 117 | expect(result).toContain('n8n-nodes-base.postgres'); 118 | expect(result).toContain('n8n-nodes-base.postgresDatabase'); 119 | }); 120 | 121 | it('should handle code/javascript/python variations', () => { 122 | const result = resolveTemplateNodeTypes(['javascript', 'python', 'js']); 123 | 124 | result.forEach(() => { 125 | expect(result).toContain('n8n-nodes-base.code'); 126 | }); 127 | }); 128 | 129 | it('should handle trigger suffix variations', () => { 130 | const result = resolveTemplateNodeTypes(['slacktrigger', 'gmailtrigger']); 131 | 132 | expect(result).toContain('n8n-nodes-base.slackTrigger'); 133 | expect(result).toContain('n8n-nodes-base.gmailTrigger'); 134 | }); 135 | 136 | it('should handle sheet/sheets variations', () => { 137 | const result = resolveTemplateNodeTypes(['googlesheet', 'googlesheets']); 138 | 139 | result.forEach(() => { 140 | expect(result).toContain('n8n-nodes-base.googleSheets'); 141 | }); 142 | }); 143 | 144 | it('should return empty array for empty input', () => { 145 | const result = resolveTemplateNodeTypes([]); 146 | 147 | expect(result).toEqual([]); 148 | }); 149 | }); 150 | 151 | describe('Edge cases', () => { 152 | it('should handle undefined-like strings gracefully', () => { 153 | const result = resolveTemplateNodeTypes(['undefined', 'null', '']); 154 | 155 | // Should process them as regular strings 156 | expect(result).toBeDefined(); 157 | expect(Array.isArray(result)).toBe(true); 158 | }); 159 | 160 | it('should handle very long node names', () => { 161 | const longName = 'a'.repeat(100); 162 | const result = resolveTemplateNodeTypes([longName]); 163 | 164 | expect(result).toBeDefined(); 165 | expect(Array.isArray(result)).toBe(true); 166 | }); 167 | 168 | it('should handle special characters in node names', () => { 169 | const result = resolveTemplateNodeTypes(['node-with-dashes', 'node_with_underscores']); 170 | 171 | expect(result).toBeDefined(); 172 | expect(Array.isArray(result)).toBe(true); 173 | }); 174 | }); 175 | 176 | describe('Real-world scenarios from AI agents', () => { 177 | it('should handle common AI agent queries', () => { 178 | // These are actual queries that AI agents commonly try 179 | const testCases = [ 180 | { input: ['slack'], shouldContain: 'n8n-nodes-base.slack' }, 181 | { input: ['webhook'], shouldContain: 'n8n-nodes-base.webhook' }, 182 | { input: ['http'], shouldContain: 'n8n-nodes-base.httpRequest' }, 183 | { input: ['email'], shouldContain: 'n8n-nodes-base.gmail' }, 184 | { input: ['gpt'], shouldContain: 'n8n-nodes-base.openAi' }, 185 | { input: ['chatgpt'], shouldContain: 'n8n-nodes-base.openAi' }, 186 | { input: ['agent'], shouldContain: '@n8n/n8n-nodes-langchain.agent' }, 187 | { input: ['sql'], shouldContain: 'n8n-nodes-base.postgres' }, 188 | { input: ['api'], shouldContain: 'n8n-nodes-base.httpRequest' }, 189 | { input: ['csv'], shouldContain: 'n8n-nodes-base.spreadsheetFile' }, 190 | ]; 191 | 192 | testCases.forEach(({ input, shouldContain }) => { 193 | const result = resolveTemplateNodeTypes(input); 194 | expect(result).toContain(shouldContain); 195 | }); 196 | }); 197 | }); 198 | }); ``` -------------------------------------------------------------------------------- /tests/integration/setup/msw-test-server.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { setupServer } from 'msw/node'; 2 | import { HttpResponse, http } from 'msw'; 3 | import type { RequestHandler } from 'msw'; 4 | import { handlers as defaultHandlers } from '../../mocks/n8n-api/handlers'; 5 | 6 | /** 7 | * MSW server instance for integration tests 8 | * This is separate from the global MSW setup to allow for more control 9 | * in integration tests that may need specific handler configurations 10 | */ 11 | export const integrationTestServer = setupServer(...defaultHandlers); 12 | 13 | /** 14 | * Enhanced server controls for integration tests 15 | */ 16 | export const mswTestServer = { 17 | /** 18 | * Start the server with specific options 19 | */ 20 | start: (options?: { 21 | onUnhandledRequest?: 'error' | 'warn' | 'bypass'; 22 | quiet?: boolean; 23 | }) => { 24 | integrationTestServer.listen({ 25 | onUnhandledRequest: options?.onUnhandledRequest || 'warn', 26 | }); 27 | 28 | if (!options?.quiet && process.env.MSW_DEBUG === 'true') { 29 | integrationTestServer.events.on('request:start', ({ request }) => { 30 | console.log('[Integration MSW] %s %s', request.method, request.url); 31 | }); 32 | } 33 | }, 34 | 35 | /** 36 | * Stop the server 37 | */ 38 | stop: () => { 39 | integrationTestServer.close(); 40 | }, 41 | 42 | /** 43 | * Reset handlers to defaults 44 | */ 45 | reset: () => { 46 | integrationTestServer.resetHandlers(); 47 | }, 48 | 49 | /** 50 | * Add handlers for a specific test 51 | */ 52 | use: (...handlers: RequestHandler[]) => { 53 | integrationTestServer.use(...handlers); 54 | }, 55 | 56 | /** 57 | * Replace all handlers (useful for isolated test scenarios) 58 | */ 59 | replaceAll: (...handlers: RequestHandler[]) => { 60 | integrationTestServer.resetHandlers(...handlers); 61 | }, 62 | 63 | /** 64 | * Wait for a specific number of requests to be made 65 | */ 66 | waitForRequests: (count: number, timeout = 5000): Promise<Request[]> => { 67 | return new Promise((resolve, reject) => { 68 | const requests: Request[] = []; 69 | let timeoutId: NodeJS.Timeout | null = null; 70 | 71 | // Event handler function to allow cleanup 72 | const handleRequest = ({ request }: { request: Request }) => { 73 | requests.push(request); 74 | if (requests.length === count) { 75 | cleanup(); 76 | resolve(requests); 77 | } 78 | }; 79 | 80 | // Cleanup function to remove listener and clear timeout 81 | const cleanup = () => { 82 | if (timeoutId) { 83 | clearTimeout(timeoutId); 84 | timeoutId = null; 85 | } 86 | integrationTestServer.events.removeListener('request:match', handleRequest); 87 | }; 88 | 89 | // Set timeout 90 | timeoutId = setTimeout(() => { 91 | cleanup(); 92 | reject(new Error(`Timeout waiting for ${count} requests. Got ${requests.length}`)); 93 | }, timeout); 94 | 95 | // Add event listener 96 | integrationTestServer.events.on('request:match', handleRequest); 97 | }); 98 | }, 99 | 100 | /** 101 | * Verify no unhandled requests were made 102 | */ 103 | verifyNoUnhandledRequests: (): Promise<void> => { 104 | return new Promise((resolve, reject) => { 105 | let hasUnhandled = false; 106 | let timeoutId: NodeJS.Timeout | null = null; 107 | 108 | const handleUnhandled = ({ request }: { request: Request }) => { 109 | hasUnhandled = true; 110 | cleanup(); 111 | reject(new Error(`Unhandled request: ${request.method} ${request.url}`)); 112 | }; 113 | 114 | const cleanup = () => { 115 | if (timeoutId) { 116 | clearTimeout(timeoutId); 117 | timeoutId = null; 118 | } 119 | integrationTestServer.events.removeListener('request:unhandled', handleUnhandled); 120 | }; 121 | 122 | // Add event listener 123 | integrationTestServer.events.on('request:unhandled', handleUnhandled); 124 | 125 | // Give a small delay to allow any pending requests 126 | timeoutId = setTimeout(() => { 127 | cleanup(); 128 | if (!hasUnhandled) { 129 | resolve(); 130 | } 131 | }, 100); 132 | }); 133 | }, 134 | 135 | /** 136 | * Create a scoped server for a specific test 137 | * Automatically starts and stops the server 138 | */ 139 | withScope: async <T>( 140 | handlers: RequestHandler[], 141 | testFn: () => Promise<T> 142 | ): Promise<T> => { 143 | // Save current handlers 144 | const currentHandlers = [...defaultHandlers]; 145 | 146 | try { 147 | // Replace with scoped handlers 148 | integrationTestServer.resetHandlers(...handlers); 149 | 150 | // Run the test 151 | return await testFn(); 152 | } finally { 153 | // Restore original handlers 154 | integrationTestServer.resetHandlers(...currentHandlers); 155 | } 156 | } 157 | }; 158 | 159 | /** 160 | * Integration test utilities for n8n API mocking 161 | */ 162 | export const n8nApiMock = { 163 | /** 164 | * Mock a successful workflow creation 165 | */ 166 | mockWorkflowCreate: (response?: any) => { 167 | return http.post('*/api/v1/workflows', async ({ request }) => { 168 | const body = await request.json() as Record<string, any>; 169 | return HttpResponse.json({ 170 | data: { 171 | id: 'test-workflow-id', 172 | ...body, 173 | ...response, 174 | createdAt: new Date().toISOString(), 175 | updatedAt: new Date().toISOString() 176 | } 177 | }, { status: 201 }); 178 | }); 179 | }, 180 | 181 | /** 182 | * Mock a workflow validation endpoint 183 | */ 184 | mockWorkflowValidate: (validationResult: { valid: boolean; errors?: any[] }) => { 185 | return http.post('*/api/v1/workflows/validate', async () => { 186 | return HttpResponse.json(validationResult); 187 | }); 188 | }, 189 | 190 | /** 191 | * Mock webhook execution 192 | */ 193 | mockWebhookExecution: (webhookPath: string, response: any) => { 194 | return http.all(`*/webhook/${webhookPath}`, async ({ request }) => { 195 | const body = request.body ? await request.json() : undefined; 196 | 197 | // Simulate webhook processing 198 | return HttpResponse.json({ 199 | ...response, 200 | webhookReceived: { 201 | path: webhookPath, 202 | method: request.method, 203 | body, 204 | timestamp: new Date().toISOString() 205 | } 206 | }); 207 | }); 208 | }, 209 | 210 | /** 211 | * Mock API error responses 212 | */ 213 | mockError: (endpoint: string, error: { status: number; message: string; code?: string }) => { 214 | return http.all(endpoint, () => { 215 | return HttpResponse.json( 216 | { 217 | message: error.message, 218 | code: error.code || 'ERROR', 219 | timestamp: new Date().toISOString() 220 | }, 221 | { status: error.status } 222 | ); 223 | }); 224 | }, 225 | 226 | /** 227 | * Mock rate limiting 228 | */ 229 | mockRateLimit: (endpoint: string) => { 230 | let requestCount = 0; 231 | const limit = 5; 232 | 233 | return http.all(endpoint, () => { 234 | requestCount++; 235 | 236 | if (requestCount > limit) { 237 | return HttpResponse.json( 238 | { 239 | message: 'Rate limit exceeded', 240 | code: 'RATE_LIMIT', 241 | retryAfter: 60 242 | }, 243 | { 244 | status: 429, 245 | headers: { 246 | 'X-RateLimit-Limit': String(limit), 247 | 'X-RateLimit-Remaining': '0', 248 | 'X-RateLimit-Reset': String(Date.now() + 60000) 249 | } 250 | } 251 | ); 252 | } 253 | 254 | return HttpResponse.json({ success: true }); 255 | }); 256 | } 257 | }; 258 | 259 | /** 260 | * Test data builders for integration tests 261 | */ 262 | export const testDataBuilders = { 263 | /** 264 | * Build a workflow for testing 265 | */ 266 | workflow: (overrides?: any) => ({ 267 | name: 'Integration Test Workflow', 268 | nodes: [ 269 | { 270 | id: 'start', 271 | name: 'Start', 272 | type: 'n8n-nodes-base.start', 273 | typeVersion: 1, 274 | position: [250, 300], 275 | parameters: {} 276 | } 277 | ], 278 | connections: {}, 279 | settings: {}, 280 | active: false, 281 | ...overrides 282 | }), 283 | 284 | /** 285 | * Build an execution result 286 | */ 287 | execution: (workflowId: string, overrides?: any) => ({ 288 | id: `exec_${Date.now()}`, 289 | workflowId, 290 | status: 'success', 291 | mode: 'manual', 292 | startedAt: new Date().toISOString(), 293 | stoppedAt: new Date().toISOString(), 294 | data: { 295 | resultData: { 296 | runData: {} 297 | } 298 | }, 299 | ...overrides 300 | }) 301 | }; ```