This is page 34 of 59. Use http://codebase.md/czlonkowski/n8n-mcp?lines=true&page={x} to view the full context. # Directory Structure ``` ├── _config.yml ├── .claude │ └── agents │ ├── code-reviewer.md │ ├── context-manager.md │ ├── debugger.md │ ├── deployment-engineer.md │ ├── mcp-backend-engineer.md │ ├── n8n-mcp-tester.md │ ├── technical-researcher.md │ └── test-automator.md ├── .dockerignore ├── .env.docker ├── .env.example ├── .env.n8n.example ├── .env.test ├── .env.test.example ├── .github │ ├── ABOUT.md │ ├── BENCHMARK_THRESHOLDS.md │ ├── FUNDING.yml │ ├── gh-pages.yml │ ├── secret_scanning.yml │ └── workflows │ ├── benchmark-pr.yml │ ├── benchmark.yml │ ├── docker-build-fast.yml │ ├── docker-build-n8n.yml │ ├── docker-build.yml │ ├── release.yml │ ├── test.yml │ └── update-n8n-deps.yml ├── .gitignore ├── .npmignore ├── ATTRIBUTION.md ├── CHANGELOG.md ├── CLAUDE.md ├── codecov.yml ├── coverage.json ├── data │ ├── .gitkeep │ ├── nodes.db │ ├── nodes.db-shm │ ├── nodes.db-wal │ └── templates.db ├── deploy │ └── quick-deploy-n8n.sh ├── docker │ ├── docker-entrypoint.sh │ ├── n8n-mcp │ ├── parse-config.js │ └── README.md ├── docker-compose.buildkit.yml ├── docker-compose.extract.yml ├── docker-compose.n8n.yml ├── docker-compose.override.yml.example ├── docker-compose.test-n8n.yml ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.railway ├── Dockerfile.test ├── docs │ ├── AUTOMATED_RELEASES.md │ ├── BENCHMARKS.md │ ├── CHANGELOG.md │ ├── CLAUDE_CODE_SETUP.md │ ├── CLAUDE_INTERVIEW.md │ ├── CODECOV_SETUP.md │ ├── CODEX_SETUP.md │ ├── CURSOR_SETUP.md │ ├── DEPENDENCY_UPDATES.md │ ├── DOCKER_README.md │ ├── DOCKER_TROUBLESHOOTING.md │ ├── FINAL_AI_VALIDATION_SPEC.md │ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md │ ├── HTTP_DEPLOYMENT.md │ ├── img │ │ ├── cc_command.png │ │ ├── cc_connected.png │ │ ├── codex_connected.png │ │ ├── cursor_tut.png │ │ ├── Railway_api.png │ │ ├── Railway_server_address.png │ │ ├── vsc_ghcp_chat_agent_mode.png │ │ ├── vsc_ghcp_chat_instruction_files.png │ │ ├── vsc_ghcp_chat_thinking_tool.png │ │ └── windsurf_tut.png │ ├── INSTALLATION.md │ ├── LIBRARY_USAGE.md │ ├── local │ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md │ │ ├── DEEP_DIVE_ANALYSIS_README.md │ │ ├── Deep_dive_p1_p2.md │ │ ├── integration-testing-plan.md │ │ ├── integration-tests-phase1-summary.md │ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md │ │ ├── P0_IMPLEMENTATION_PLAN.md │ │ └── TEMPLATE_MINING_ANALYSIS.md │ ├── MCP_ESSENTIALS_README.md │ ├── MCP_QUICK_START_GUIDE.md │ ├── N8N_DEPLOYMENT.md │ ├── RAILWAY_DEPLOYMENT.md │ ├── README_CLAUDE_SETUP.md │ ├── README.md │ ├── tools-documentation-usage.md │ ├── VS_CODE_PROJECT_SETUP.md │ ├── WINDSURF_SETUP.md │ └── workflow-diff-examples.md ├── examples │ └── enhanced-documentation-demo.js ├── fetch_log.txt ├── LICENSE ├── MEMORY_N8N_UPDATE.md ├── MEMORY_TEMPLATE_UPDATE.md ├── monitor_fetch.sh ├── N8N_HTTP_STREAMABLE_SETUP.md ├── n8n-nodes.db ├── P0-R3-TEST-PLAN.md ├── package-lock.json ├── package.json ├── package.runtime.json ├── PRIVACY.md ├── railway.json ├── README.md ├── renovate.json ├── scripts │ ├── analyze-optimization.sh │ ├── audit-schema-coverage.ts │ ├── build-optimized.sh │ ├── compare-benchmarks.js │ ├── demo-optimization.sh │ ├── deploy-http.sh │ ├── deploy-to-vm.sh │ ├── export-webhook-workflows.ts │ ├── extract-changelog.js │ ├── extract-from-docker.js │ ├── extract-nodes-docker.sh │ ├── extract-nodes-simple.sh │ ├── format-benchmark-results.js │ ├── generate-benchmark-stub.js │ ├── generate-detailed-reports.js │ ├── generate-test-summary.js │ ├── http-bridge.js │ ├── mcp-http-client.js │ ├── migrate-nodes-fts.ts │ ├── migrate-tool-docs.ts │ ├── n8n-docs-mcp.service │ ├── nginx-n8n-mcp.conf │ ├── prebuild-fts5.ts │ ├── prepare-release.js │ ├── publish-npm-quick.sh │ ├── publish-npm.sh │ ├── quick-test.ts │ ├── run-benchmarks-ci.js │ ├── sync-runtime-version.js │ ├── test-ai-validation-debug.ts │ ├── test-code-node-enhancements.ts │ ├── test-code-node-fixes.ts │ ├── test-docker-config.sh │ ├── test-docker-fingerprint.ts │ ├── test-docker-optimization.sh │ ├── test-docker.sh │ ├── test-empty-connection-validation.ts │ ├── test-error-message-tracking.ts │ ├── test-error-output-validation.ts │ ├── test-error-validation.js │ ├── test-essentials.ts │ ├── test-expression-code-validation.ts │ ├── test-expression-format-validation.js │ ├── test-fts5-search.ts │ ├── test-fuzzy-fix.ts │ ├── test-fuzzy-simple.ts │ ├── test-helpers-validation.ts │ ├── test-http-search.ts │ ├── test-http.sh │ ├── test-jmespath-validation.ts │ ├── test-multi-tenant-simple.ts │ ├── test-multi-tenant.ts │ ├── test-n8n-integration.sh │ ├── test-node-info.js │ ├── test-node-type-validation.ts │ ├── test-nodes-base-prefix.ts │ ├── test-operation-validation.ts │ ├── test-optimized-docker.sh │ ├── test-release-automation.js │ ├── test-search-improvements.ts │ ├── test-security.ts │ ├── test-single-session.sh │ ├── test-sqljs-triggers.ts │ ├── test-telemetry-debug.ts │ ├── test-telemetry-direct.ts │ ├── test-telemetry-env.ts │ ├── test-telemetry-integration.ts │ ├── test-telemetry-no-select.ts │ ├── test-telemetry-security.ts │ ├── test-telemetry-simple.ts │ ├── test-typeversion-validation.ts │ ├── test-url-configuration.ts │ ├── test-user-id-persistence.ts │ ├── test-webhook-validation.ts │ ├── test-workflow-insert.ts │ ├── test-workflow-sanitizer.ts │ ├── test-workflow-tracking-debug.ts │ ├── update-and-publish-prep.sh │ ├── update-n8n-deps.js │ ├── update-readme-version.js │ ├── vitest-benchmark-json-reporter.js │ └── vitest-benchmark-reporter.ts ├── SECURITY.md ├── src │ ├── config │ │ └── n8n-api.ts │ ├── data │ │ └── canonical-ai-tool-examples.json │ ├── database │ │ ├── database-adapter.ts │ │ ├── migrations │ │ │ └── add-template-node-configs.sql │ │ ├── node-repository.ts │ │ ├── nodes.db │ │ ├── schema-optimized.sql │ │ └── schema.sql │ ├── errors │ │ └── validation-service-error.ts │ ├── http-server-single-session.ts │ ├── http-server.ts │ ├── index.ts │ ├── loaders │ │ └── node-loader.ts │ ├── mappers │ │ └── docs-mapper.ts │ ├── mcp │ │ ├── handlers-n8n-manager.ts │ │ ├── handlers-workflow-diff.ts │ │ ├── index.ts │ │ ├── server.ts │ │ ├── stdio-wrapper.ts │ │ ├── tool-docs │ │ │ ├── configuration │ │ │ │ ├── get-node-as-tool-info.ts │ │ │ │ ├── get-node-documentation.ts │ │ │ │ ├── get-node-essentials.ts │ │ │ │ ├── get-node-info.ts │ │ │ │ ├── get-property-dependencies.ts │ │ │ │ ├── index.ts │ │ │ │ └── search-node-properties.ts │ │ │ ├── discovery │ │ │ │ ├── get-database-statistics.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-ai-tools.ts │ │ │ │ ├── list-nodes.ts │ │ │ │ └── search-nodes.ts │ │ │ ├── guides │ │ │ │ ├── ai-agents-guide.ts │ │ │ │ └── index.ts │ │ │ ├── index.ts │ │ │ ├── system │ │ │ │ ├── index.ts │ │ │ │ ├── n8n-diagnostic.ts │ │ │ │ ├── n8n-health-check.ts │ │ │ │ ├── n8n-list-available-tools.ts │ │ │ │ └── tools-documentation.ts │ │ │ ├── templates │ │ │ │ ├── get-template.ts │ │ │ │ ├── get-templates-for-task.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-node-templates.ts │ │ │ │ ├── list-tasks.ts │ │ │ │ ├── search-templates-by-metadata.ts │ │ │ │ └── search-templates.ts │ │ │ ├── types.ts │ │ │ ├── validation │ │ │ │ ├── index.ts │ │ │ │ ├── validate-node-minimal.ts │ │ │ │ ├── validate-node-operation.ts │ │ │ │ ├── validate-workflow-connections.ts │ │ │ │ ├── validate-workflow-expressions.ts │ │ │ │ └── validate-workflow.ts │ │ │ └── workflow_management │ │ │ ├── index.ts │ │ │ ├── n8n-autofix-workflow.ts │ │ │ ├── n8n-create-workflow.ts │ │ │ ├── n8n-delete-execution.ts │ │ │ ├── n8n-delete-workflow.ts │ │ │ ├── n8n-get-execution.ts │ │ │ ├── n8n-get-workflow-details.ts │ │ │ ├── n8n-get-workflow-minimal.ts │ │ │ ├── n8n-get-workflow-structure.ts │ │ │ ├── n8n-get-workflow.ts │ │ │ ├── n8n-list-executions.ts │ │ │ ├── n8n-list-workflows.ts │ │ │ ├── n8n-trigger-webhook-workflow.ts │ │ │ ├── n8n-update-full-workflow.ts │ │ │ ├── n8n-update-partial-workflow.ts │ │ │ └── n8n-validate-workflow.ts │ │ ├── tools-documentation.ts │ │ ├── tools-n8n-friendly.ts │ │ ├── tools-n8n-manager.ts │ │ ├── tools.ts │ │ └── workflow-examples.ts │ ├── mcp-engine.ts │ ├── mcp-tools-engine.ts │ ├── n8n │ │ ├── MCPApi.credentials.ts │ │ └── MCPNode.node.ts │ ├── parsers │ │ ├── node-parser.ts │ │ ├── property-extractor.ts │ │ └── simple-parser.ts │ ├── scripts │ │ ├── debug-http-search.ts │ │ ├── extract-from-docker.ts │ │ ├── fetch-templates-robust.ts │ │ ├── fetch-templates.ts │ │ ├── rebuild-database.ts │ │ ├── rebuild-optimized.ts │ │ ├── rebuild.ts │ │ ├── sanitize-templates.ts │ │ ├── seed-canonical-ai-examples.ts │ │ ├── test-autofix-documentation.ts │ │ ├── test-autofix-workflow.ts │ │ ├── test-execution-filtering.ts │ │ ├── test-node-suggestions.ts │ │ ├── test-protocol-negotiation.ts │ │ ├── test-summary.ts │ │ ├── test-webhook-autofix.ts │ │ ├── validate.ts │ │ └── validation-summary.ts │ ├── services │ │ ├── ai-node-validator.ts │ │ ├── ai-tool-validators.ts │ │ ├── confidence-scorer.ts │ │ ├── config-validator.ts │ │ ├── enhanced-config-validator.ts │ │ ├── example-generator.ts │ │ ├── execution-processor.ts │ │ ├── expression-format-validator.ts │ │ ├── expression-validator.ts │ │ ├── n8n-api-client.ts │ │ ├── n8n-validation.ts │ │ ├── node-documentation-service.ts │ │ ├── node-sanitizer.ts │ │ ├── node-similarity-service.ts │ │ ├── node-specific-validators.ts │ │ ├── operation-similarity-service.ts │ │ ├── property-dependencies.ts │ │ ├── property-filter.ts │ │ ├── resource-similarity-service.ts │ │ ├── sqlite-storage-service.ts │ │ ├── task-templates.ts │ │ ├── universal-expression-validator.ts │ │ ├── workflow-auto-fixer.ts │ │ ├── workflow-diff-engine.ts │ │ └── workflow-validator.ts │ ├── telemetry │ │ ├── batch-processor.ts │ │ ├── config-manager.ts │ │ ├── early-error-logger.ts │ │ ├── error-sanitization-utils.ts │ │ ├── error-sanitizer.ts │ │ ├── event-tracker.ts │ │ ├── event-validator.ts │ │ ├── index.ts │ │ ├── performance-monitor.ts │ │ ├── rate-limiter.ts │ │ ├── startup-checkpoints.ts │ │ ├── telemetry-error.ts │ │ ├── telemetry-manager.ts │ │ ├── telemetry-types.ts │ │ └── workflow-sanitizer.ts │ ├── templates │ │ ├── batch-processor.ts │ │ ├── metadata-generator.ts │ │ ├── README.md │ │ ├── template-fetcher.ts │ │ ├── template-repository.ts │ │ └── template-service.ts │ ├── types │ │ ├── index.ts │ │ ├── instance-context.ts │ │ ├── n8n-api.ts │ │ ├── node-types.ts │ │ └── workflow-diff.ts │ └── utils │ ├── auth.ts │ ├── bridge.ts │ ├── cache-utils.ts │ ├── console-manager.ts │ ├── documentation-fetcher.ts │ ├── enhanced-documentation-fetcher.ts │ ├── error-handler.ts │ ├── example-generator.ts │ ├── fixed-collection-validator.ts │ ├── logger.ts │ ├── mcp-client.ts │ ├── n8n-errors.ts │ ├── node-source-extractor.ts │ ├── node-type-normalizer.ts │ ├── node-type-utils.ts │ ├── node-utils.ts │ ├── npm-version-checker.ts │ ├── protocol-version.ts │ ├── simple-cache.ts │ ├── ssrf-protection.ts │ ├── template-node-resolver.ts │ ├── template-sanitizer.ts │ ├── url-detector.ts │ ├── validation-schemas.ts │ └── version.ts ├── test-output.txt ├── test-reinit-fix.sh ├── tests │ ├── __snapshots__ │ │ └── .gitkeep │ ├── auth.test.ts │ ├── benchmarks │ │ ├── database-queries.bench.ts │ │ ├── index.ts │ │ ├── mcp-tools.bench.ts │ │ ├── mcp-tools.bench.ts.disabled │ │ ├── mcp-tools.bench.ts.skip │ │ ├── node-loading.bench.ts.disabled │ │ ├── README.md │ │ ├── search-operations.bench.ts.disabled │ │ └── validation-performance.bench.ts.disabled │ ├── bridge.test.ts │ ├── comprehensive-extraction-test.js │ ├── data │ │ └── .gitkeep │ ├── debug-slack-doc.js │ ├── demo-enhanced-documentation.js │ ├── docker-tests-README.md │ ├── error-handler.test.ts │ ├── examples │ │ └── using-database-utils.test.ts │ ├── extracted-nodes-db │ │ ├── database-import.json │ │ ├── extraction-report.json │ │ ├── insert-nodes.sql │ │ ├── n8n-nodes-base__Airtable.json │ │ ├── n8n-nodes-base__Discord.json │ │ ├── n8n-nodes-base__Function.json │ │ ├── n8n-nodes-base__HttpRequest.json │ │ ├── n8n-nodes-base__If.json │ │ ├── n8n-nodes-base__Slack.json │ │ ├── n8n-nodes-base__SplitInBatches.json │ │ └── n8n-nodes-base__Webhook.json │ ├── factories │ │ ├── node-factory.ts │ │ └── property-definition-factory.ts │ ├── fixtures │ │ ├── .gitkeep │ │ ├── database │ │ │ └── test-nodes.json │ │ ├── factories │ │ │ ├── node.factory.ts │ │ │ └── parser-node.factory.ts │ │ └── template-configs.ts │ ├── helpers │ │ └── env-helpers.ts │ ├── http-server-auth.test.ts │ ├── integration │ │ ├── ai-validation │ │ │ ├── ai-agent-validation.test.ts │ │ │ ├── ai-tool-validation.test.ts │ │ │ ├── chat-trigger-validation.test.ts │ │ │ ├── e2e-validation.test.ts │ │ │ ├── helpers.ts │ │ │ ├── llm-chain-validation.test.ts │ │ │ ├── README.md │ │ │ └── TEST_REPORT.md │ │ ├── ci │ │ │ └── database-population.test.ts │ │ ├── database │ │ │ ├── connection-management.test.ts │ │ │ ├── empty-database.test.ts │ │ │ ├── fts5-search.test.ts │ │ │ ├── node-fts5-search.test.ts │ │ │ ├── node-repository.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── sqljs-memory-leak.test.ts │ │ │ ├── template-node-configs.test.ts │ │ │ ├── template-repository.test.ts │ │ │ ├── test-utils.ts │ │ │ └── transactions.test.ts │ │ ├── database-integration.test.ts │ │ ├── docker │ │ │ ├── docker-config.test.ts │ │ │ ├── docker-entrypoint.test.ts │ │ │ └── test-helpers.ts │ │ ├── flexible-instance-config.test.ts │ │ ├── mcp │ │ │ └── template-examples-e2e.test.ts │ │ ├── mcp-protocol │ │ │ ├── basic-connection.test.ts │ │ │ ├── error-handling.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── protocol-compliance.test.ts │ │ │ ├── README.md │ │ │ ├── session-management.test.ts │ │ │ ├── test-helpers.ts │ │ │ ├── tool-invocation.test.ts │ │ │ └── workflow-error-validation.test.ts │ │ ├── msw-setup.test.ts │ │ ├── n8n-api │ │ │ ├── executions │ │ │ │ ├── delete-execution.test.ts │ │ │ │ ├── get-execution.test.ts │ │ │ │ ├── list-executions.test.ts │ │ │ │ └── trigger-webhook.test.ts │ │ │ ├── scripts │ │ │ │ └── cleanup-orphans.ts │ │ │ ├── system │ │ │ │ ├── diagnostic.test.ts │ │ │ │ ├── health-check.test.ts │ │ │ │ └── list-tools.test.ts │ │ │ ├── test-connection.ts │ │ │ ├── types │ │ │ │ └── mcp-responses.ts │ │ │ ├── utils │ │ │ │ ├── cleanup-helpers.ts │ │ │ │ ├── credentials.ts │ │ │ │ ├── factories.ts │ │ │ │ ├── fixtures.ts │ │ │ │ ├── mcp-context.ts │ │ │ │ ├── n8n-client.ts │ │ │ │ ├── node-repository.ts │ │ │ │ ├── response-types.ts │ │ │ │ ├── test-context.ts │ │ │ │ └── webhook-workflows.ts │ │ │ └── workflows │ │ │ ├── autofix-workflow.test.ts │ │ │ ├── create-workflow.test.ts │ │ │ ├── delete-workflow.test.ts │ │ │ ├── get-workflow-details.test.ts │ │ │ ├── get-workflow-minimal.test.ts │ │ │ ├── get-workflow-structure.test.ts │ │ │ ├── get-workflow.test.ts │ │ │ ├── list-workflows.test.ts │ │ │ ├── smart-parameters.test.ts │ │ │ ├── update-partial-workflow.test.ts │ │ │ ├── update-workflow.test.ts │ │ │ └── validate-workflow.test.ts │ │ ├── security │ │ │ ├── command-injection-prevention.test.ts │ │ │ └── rate-limiting.test.ts │ │ ├── setup │ │ │ ├── integration-setup.ts │ │ │ └── msw-test-server.ts │ │ ├── telemetry │ │ │ ├── docker-user-id-stability.test.ts │ │ │ └── mcp-telemetry.test.ts │ │ ├── templates │ │ │ └── metadata-operations.test.ts │ │ └── workflow-creation-node-type-format.test.ts │ ├── logger.test.ts │ ├── MOCKING_STRATEGY.md │ ├── mocks │ │ ├── n8n-api │ │ │ ├── data │ │ │ │ ├── credentials.ts │ │ │ │ ├── executions.ts │ │ │ │ └── workflows.ts │ │ │ ├── handlers.ts │ │ │ └── index.ts │ │ └── README.md │ ├── node-storage-export.json │ ├── setup │ │ ├── global-setup.ts │ │ ├── msw-setup.ts │ │ ├── TEST_ENV_DOCUMENTATION.md │ │ └── test-env.ts │ ├── test-database-extraction.js │ ├── test-direct-extraction.js │ ├── test-enhanced-documentation.js │ ├── test-enhanced-integration.js │ ├── test-mcp-extraction.js │ ├── test-mcp-server-extraction.js │ ├── test-mcp-tools-integration.js │ ├── test-node-documentation-service.js │ ├── test-node-list.js │ ├── test-package-info.js │ ├── test-parsing-operations.js │ ├── test-slack-node-complete.js │ ├── test-small-rebuild.js │ ├── test-sqlite-search.js │ ├── test-storage-system.js │ ├── unit │ │ ├── __mocks__ │ │ │ ├── n8n-nodes-base.test.ts │ │ │ ├── n8n-nodes-base.ts │ │ │ └── README.md │ │ ├── database │ │ │ ├── __mocks__ │ │ │ │ └── better-sqlite3.ts │ │ │ ├── database-adapter-unit.test.ts │ │ │ ├── node-repository-core.test.ts │ │ │ ├── node-repository-operations.test.ts │ │ │ ├── node-repository-outputs.test.ts │ │ │ ├── README.md │ │ │ └── template-repository-core.test.ts │ │ ├── docker │ │ │ ├── config-security.test.ts │ │ │ ├── edge-cases.test.ts │ │ │ ├── parse-config.test.ts │ │ │ └── serve-command.test.ts │ │ ├── errors │ │ │ └── validation-service-error.test.ts │ │ ├── examples │ │ │ └── using-n8n-nodes-base-mock.test.ts │ │ ├── flexible-instance-security-advanced.test.ts │ │ ├── flexible-instance-security.test.ts │ │ ├── http-server │ │ │ └── multi-tenant-support.test.ts │ │ ├── http-server-n8n-mode.test.ts │ │ ├── http-server-n8n-reinit.test.ts │ │ ├── http-server-session-management.test.ts │ │ ├── loaders │ │ │ └── node-loader.test.ts │ │ ├── mappers │ │ │ └── docs-mapper.test.ts │ │ ├── mcp │ │ │ ├── get-node-essentials-examples.test.ts │ │ │ ├── handlers-n8n-manager-simple.test.ts │ │ │ ├── handlers-n8n-manager.test.ts │ │ │ ├── handlers-workflow-diff.test.ts │ │ │ ├── lru-cache-behavior.test.ts │ │ │ ├── multi-tenant-tool-listing.test.ts.disabled │ │ │ ├── parameter-validation.test.ts │ │ │ ├── search-nodes-examples.test.ts │ │ │ ├── tools-documentation.test.ts │ │ │ └── tools.test.ts │ │ ├── monitoring │ │ │ └── cache-metrics.test.ts │ │ ├── MULTI_TENANT_TEST_COVERAGE.md │ │ ├── multi-tenant-integration.test.ts │ │ ├── parsers │ │ │ ├── node-parser-outputs.test.ts │ │ │ ├── node-parser.test.ts │ │ │ ├── property-extractor.test.ts │ │ │ └── simple-parser.test.ts │ │ ├── scripts │ │ │ └── fetch-templates-extraction.test.ts │ │ ├── services │ │ │ ├── ai-node-validator.test.ts │ │ │ ├── ai-tool-validators.test.ts │ │ │ ├── confidence-scorer.test.ts │ │ │ ├── config-validator-basic.test.ts │ │ │ ├── config-validator-edge-cases.test.ts │ │ │ ├── config-validator-node-specific.test.ts │ │ │ ├── config-validator-security.test.ts │ │ │ ├── debug-validator.test.ts │ │ │ ├── enhanced-config-validator-integration.test.ts │ │ │ ├── enhanced-config-validator-operations.test.ts │ │ │ ├── enhanced-config-validator.test.ts │ │ │ ├── example-generator.test.ts │ │ │ ├── execution-processor.test.ts │ │ │ ├── expression-format-validator.test.ts │ │ │ ├── expression-validator-edge-cases.test.ts │ │ │ ├── expression-validator.test.ts │ │ │ ├── fixed-collection-validation.test.ts │ │ │ ├── loop-output-edge-cases.test.ts │ │ │ ├── n8n-api-client.test.ts │ │ │ ├── n8n-validation.test.ts │ │ │ ├── node-sanitizer.test.ts │ │ │ ├── node-similarity-service.test.ts │ │ │ ├── node-specific-validators.test.ts │ │ │ ├── operation-similarity-service-comprehensive.test.ts │ │ │ ├── operation-similarity-service.test.ts │ │ │ ├── property-dependencies.test.ts │ │ │ ├── property-filter-edge-cases.test.ts │ │ │ ├── property-filter.test.ts │ │ │ ├── resource-similarity-service-comprehensive.test.ts │ │ │ ├── resource-similarity-service.test.ts │ │ │ ├── task-templates.test.ts │ │ │ ├── template-service.test.ts │ │ │ ├── universal-expression-validator.test.ts │ │ │ ├── validation-fixes.test.ts │ │ │ ├── workflow-auto-fixer.test.ts │ │ │ ├── workflow-diff-engine.test.ts │ │ │ ├── workflow-fixed-collection-validation.test.ts │ │ │ ├── workflow-validator-comprehensive.test.ts │ │ │ ├── workflow-validator-edge-cases.test.ts │ │ │ ├── workflow-validator-error-outputs.test.ts │ │ │ ├── workflow-validator-expression-format.test.ts │ │ │ ├── workflow-validator-loops-simple.test.ts │ │ │ ├── workflow-validator-loops.test.ts │ │ │ ├── workflow-validator-mocks.test.ts │ │ │ ├── workflow-validator-performance.test.ts │ │ │ ├── workflow-validator-with-mocks.test.ts │ │ │ └── workflow-validator.test.ts │ │ ├── telemetry │ │ │ ├── batch-processor.test.ts │ │ │ ├── config-manager.test.ts │ │ │ ├── event-tracker.test.ts │ │ │ ├── event-validator.test.ts │ │ │ ├── rate-limiter.test.ts │ │ │ ├── telemetry-error.test.ts │ │ │ ├── telemetry-manager.test.ts │ │ │ ├── v2.18.3-fixes-verification.test.ts │ │ │ └── workflow-sanitizer.test.ts │ │ ├── templates │ │ │ ├── batch-processor.test.ts │ │ │ ├── metadata-generator.test.ts │ │ │ ├── template-repository-metadata.test.ts │ │ │ └── template-repository-security.test.ts │ │ ├── test-env-example.test.ts │ │ ├── test-infrastructure.test.ts │ │ ├── types │ │ │ ├── instance-context-coverage.test.ts │ │ │ └── instance-context-multi-tenant.test.ts │ │ ├── utils │ │ │ ├── auth-timing-safe.test.ts │ │ │ ├── cache-utils.test.ts │ │ │ ├── console-manager.test.ts │ │ │ ├── database-utils.test.ts │ │ │ ├── fixed-collection-validator.test.ts │ │ │ ├── n8n-errors.test.ts │ │ │ ├── node-type-normalizer.test.ts │ │ │ ├── node-type-utils.test.ts │ │ │ ├── node-utils.test.ts │ │ │ ├── simple-cache-memory-leak-fix.test.ts │ │ │ ├── ssrf-protection.test.ts │ │ │ └── template-node-resolver.test.ts │ │ └── validation-fixes.test.ts │ └── utils │ ├── assertions.ts │ ├── builders │ │ └── workflow.builder.ts │ ├── data-generators.ts │ ├── database-utils.ts │ ├── README.md │ └── test-helpers.ts ├── thumbnail.png ├── tsconfig.build.json ├── tsconfig.json ├── types │ ├── mcp.d.ts │ └── test-env.d.ts ├── verify-telemetry-fix.js ├── versioned-nodes.md ├── vitest.config.benchmark.ts ├── vitest.config.integration.ts └── vitest.config.ts ``` # Files -------------------------------------------------------------------------------- /src/services/n8n-validation.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { z } from 'zod'; 2 | import { WorkflowNode, WorkflowConnection, Workflow } from '../types/n8n-api'; 3 | 4 | // Zod schemas for n8n API validation 5 | 6 | export const workflowNodeSchema = z.object({ 7 | id: z.string(), 8 | name: z.string(), 9 | type: z.string(), 10 | typeVersion: z.number(), 11 | position: z.tuple([z.number(), z.number()]), 12 | parameters: z.record(z.unknown()), 13 | credentials: z.record(z.unknown()).optional(), 14 | disabled: z.boolean().optional(), 15 | notes: z.string().optional(), 16 | notesInFlow: z.boolean().optional(), 17 | continueOnFail: z.boolean().optional(), 18 | retryOnFail: z.boolean().optional(), 19 | maxTries: z.number().optional(), 20 | waitBetweenTries: z.number().optional(), 21 | alwaysOutputData: z.boolean().optional(), 22 | executeOnce: z.boolean().optional(), 23 | }); 24 | 25 | export const workflowConnectionSchema = z.record( 26 | z.object({ 27 | main: z.array( 28 | z.array( 29 | z.object({ 30 | node: z.string(), 31 | type: z.string(), 32 | index: z.number(), 33 | }) 34 | ) 35 | ), 36 | }) 37 | ); 38 | 39 | export const workflowSettingsSchema = z.object({ 40 | executionOrder: z.enum(['v0', 'v1']).default('v1'), 41 | timezone: z.string().optional(), 42 | saveDataErrorExecution: z.enum(['all', 'none']).default('all'), 43 | saveDataSuccessExecution: z.enum(['all', 'none']).default('all'), 44 | saveManualExecutions: z.boolean().default(true), 45 | saveExecutionProgress: z.boolean().default(true), 46 | executionTimeout: z.number().optional(), 47 | errorWorkflow: z.string().optional(), 48 | callerPolicy: z.enum(['any', 'workflowsFromSameOwner', 'workflowsFromAList']).optional(), 49 | }); 50 | 51 | // Default settings for workflow creation 52 | export const defaultWorkflowSettings = { 53 | executionOrder: 'v1' as const, 54 | saveDataErrorExecution: 'all' as const, 55 | saveDataSuccessExecution: 'all' as const, 56 | saveManualExecutions: true, 57 | saveExecutionProgress: true, 58 | }; 59 | 60 | // Validation functions 61 | export function validateWorkflowNode(node: unknown): WorkflowNode { 62 | return workflowNodeSchema.parse(node); 63 | } 64 | 65 | export function validateWorkflowConnections(connections: unknown): WorkflowConnection { 66 | return workflowConnectionSchema.parse(connections); 67 | } 68 | 69 | export function validateWorkflowSettings(settings: unknown): z.infer<typeof workflowSettingsSchema> { 70 | return workflowSettingsSchema.parse(settings); 71 | } 72 | 73 | // Clean workflow data for API operations 74 | export function cleanWorkflowForCreate(workflow: Partial<Workflow>): Partial<Workflow> { 75 | const { 76 | // Remove read-only fields 77 | id, 78 | createdAt, 79 | updatedAt, 80 | versionId, 81 | meta, 82 | // Remove fields that cause API errors during creation 83 | active, 84 | tags, 85 | // Keep everything else 86 | ...cleanedWorkflow 87 | } = workflow; 88 | 89 | // Ensure settings are present with defaults 90 | if (!cleanedWorkflow.settings) { 91 | cleanedWorkflow.settings = defaultWorkflowSettings; 92 | } 93 | 94 | return cleanedWorkflow; 95 | } 96 | 97 | /** 98 | * Clean workflow data for update operations. 99 | * 100 | * This function removes read-only and computed fields that should not be sent 101 | * in API update requests. It does NOT add any default values or new fields. 102 | * 103 | * Note: Unlike cleanWorkflowForCreate, this function does not add default settings. 104 | * The n8n API will reject update requests that include properties not present in 105 | * the original workflow ("settings must NOT have additional properties" error). 106 | * 107 | * Settings are filtered to only include whitelisted properties to prevent API 108 | * errors when workflows from n8n contain UI-only or deprecated properties. 109 | * 110 | * @param workflow - The workflow object to clean 111 | * @returns A cleaned partial workflow suitable for API updates 112 | */ 113 | export function cleanWorkflowForUpdate(workflow: Workflow): Partial<Workflow> { 114 | const { 115 | // Remove read-only/computed fields 116 | id, 117 | createdAt, 118 | updatedAt, 119 | versionId, 120 | meta, 121 | staticData, 122 | // Remove fields that cause API errors 123 | pinData, 124 | tags, 125 | // Remove additional fields that n8n API doesn't accept 126 | isArchived, 127 | usedCredentials, 128 | sharedWithProjects, 129 | triggerCount, 130 | shared, 131 | active, 132 | // Keep everything else 133 | ...cleanedWorkflow 134 | } = workflow as any; 135 | 136 | // CRITICAL FIX for Issue #248: 137 | // The n8n API has version-specific behavior for settings in workflow updates: 138 | // 139 | // PROBLEM: 140 | // - Some versions reject updates with settings properties (community forum reports) 141 | // - Cloud versions REQUIRE settings property to be present (n8n.estyl.team) 142 | // - Properties like callerPolicy cause "additional properties" errors 143 | // 144 | // SOLUTION: 145 | // - Filter settings to only include whitelisted properties (OpenAPI spec) 146 | // - If no settings provided, use empty object {} for safety 147 | // - Empty object satisfies "required property" validation (cloud API) 148 | // - Whitelisted properties prevent "additional properties" errors 149 | // 150 | // References: 151 | // - https://community.n8n.io/t/api-workflow-update-endpoint-doesnt-support-setting-callerpolicy/161916 152 | // - OpenAPI spec: workflowSettings schema 153 | // - Tested on n8n.estyl.team (cloud) and localhost (self-hosted) 154 | 155 | // Whitelisted settings properties from n8n OpenAPI spec 156 | const safeSettingsProperties = [ 157 | 'saveExecutionProgress', 158 | 'saveManualExecutions', 159 | 'saveDataErrorExecution', 160 | 'saveDataSuccessExecution', 161 | 'executionTimeout', 162 | 'errorWorkflow', 163 | 'timezone', 164 | 'executionOrder' 165 | ]; 166 | 167 | if (cleanedWorkflow.settings && typeof cleanedWorkflow.settings === 'object') { 168 | // Filter to only safe properties 169 | const filteredSettings: any = {}; 170 | for (const key of safeSettingsProperties) { 171 | if (key in cleanedWorkflow.settings) { 172 | filteredSettings[key] = (cleanedWorkflow.settings as any)[key]; 173 | } 174 | } 175 | cleanedWorkflow.settings = filteredSettings; 176 | } else { 177 | // No settings provided - use empty object for safety 178 | cleanedWorkflow.settings = {}; 179 | } 180 | 181 | return cleanedWorkflow; 182 | } 183 | 184 | // Validate workflow structure 185 | export function validateWorkflowStructure(workflow: Partial<Workflow>): string[] { 186 | const errors: string[] = []; 187 | 188 | // Check required fields 189 | if (!workflow.name) { 190 | errors.push('Workflow name is required'); 191 | } 192 | 193 | if (!workflow.nodes || workflow.nodes.length === 0) { 194 | errors.push('Workflow must have at least one node'); 195 | } 196 | 197 | if (!workflow.connections) { 198 | errors.push('Workflow connections are required'); 199 | } 200 | 201 | // Check for minimum viable workflow 202 | if (workflow.nodes && workflow.nodes.length === 1) { 203 | const singleNode = workflow.nodes[0]; 204 | const isWebhookOnly = singleNode.type === 'n8n-nodes-base.webhook' || 205 | singleNode.type === 'n8n-nodes-base.webhookTrigger'; 206 | 207 | if (!isWebhookOnly) { 208 | errors.push(`Single non-webhook node workflow is invalid. Current node: "${singleNode.name}" (${singleNode.type}). Add another node using: {type: 'addNode', node: {name: 'Process Data', type: 'n8n-nodes-base.set', typeVersion: 3.4, position: [450, 300], parameters: {}}}`); 209 | } 210 | } 211 | 212 | // Check for disconnected nodes in multi-node workflows 213 | if (workflow.nodes && workflow.nodes.length > 1 && workflow.connections) { 214 | const connectionCount = Object.keys(workflow.connections).length; 215 | 216 | // First check: workflow has no connections at all 217 | if (connectionCount === 0) { 218 | const nodeNames = workflow.nodes.slice(0, 2).map(n => n.name); 219 | errors.push(`Multi-node workflow has no connections between nodes. Add a connection using: {type: 'addConnection', source: '${nodeNames[0]}', target: '${nodeNames[1]}', sourcePort: 'main', targetPort: 'main'}`); 220 | } else { 221 | // Second check: detect disconnected nodes (nodes with no incoming or outgoing connections) 222 | const connectedNodes = new Set<string>(); 223 | 224 | // Collect all nodes that appear in connections (as source or target) 225 | Object.entries(workflow.connections).forEach(([sourceName, connection]) => { 226 | connectedNodes.add(sourceName); // Node has outgoing connection 227 | 228 | if (connection.main && Array.isArray(connection.main)) { 229 | connection.main.forEach((outputs) => { 230 | if (Array.isArray(outputs)) { 231 | outputs.forEach((target) => { 232 | connectedNodes.add(target.node); // Node has incoming connection 233 | }); 234 | } 235 | }); 236 | } 237 | }); 238 | 239 | // Find disconnected nodes (excluding webhook triggers which can be source-only) 240 | const webhookTypes = new Set([ 241 | 'n8n-nodes-base.webhook', 242 | 'n8n-nodes-base.webhookTrigger', 243 | 'n8n-nodes-base.manualTrigger' 244 | ]); 245 | 246 | const disconnectedNodes = workflow.nodes.filter(node => { 247 | const isConnected = connectedNodes.has(node.name); 248 | const isWebhookOrTrigger = webhookTypes.has(node.type); 249 | 250 | // Webhook/trigger nodes only need outgoing connections 251 | if (isWebhookOrTrigger) { 252 | return !workflow.connections?.[node.name]; // Disconnected if no outgoing connections 253 | } 254 | 255 | // Regular nodes need at least one connection (incoming or outgoing) 256 | return !isConnected; 257 | }); 258 | 259 | if (disconnectedNodes.length > 0) { 260 | const disconnectedList = disconnectedNodes.map(n => `"${n.name}" (${n.type})`).join(', '); 261 | const firstDisconnected = disconnectedNodes[0]; 262 | const suggestedSource = workflow.nodes.find(n => connectedNodes.has(n.name))?.name || workflow.nodes[0].name; 263 | 264 | errors.push(`Disconnected nodes detected: ${disconnectedList}. Each node must have at least one connection. Add a connection: {type: 'addConnection', source: '${suggestedSource}', target: '${firstDisconnected.name}', sourcePort: 'main', targetPort: 'main'}`); 265 | } 266 | } 267 | } 268 | 269 | // Validate nodes 270 | if (workflow.nodes) { 271 | workflow.nodes.forEach((node, index) => { 272 | try { 273 | validateWorkflowNode(node); 274 | 275 | // Additional check for common node type mistakes 276 | if (node.type.startsWith('nodes-base.')) { 277 | errors.push(`Invalid node type "${node.type}" at index ${index}. Use "n8n-nodes-base.${node.type.substring(11)}" instead.`); 278 | } else if (!node.type.includes('.')) { 279 | errors.push(`Invalid node type "${node.type}" at index ${index}. Node types must include package prefix (e.g., "n8n-nodes-base.webhook").`); 280 | } 281 | } catch (error) { 282 | errors.push(`Invalid node at index ${index}: ${error instanceof Error ? error.message : 'Unknown error'}`); 283 | } 284 | }); 285 | } 286 | 287 | // Validate filter-based nodes (IF v2.2+, Switch v3.2+) have complete metadata 288 | if (workflow.nodes) { 289 | workflow.nodes.forEach((node, index) => { 290 | const filterErrors = validateFilterBasedNodeMetadata(node); 291 | if (filterErrors.length > 0) { 292 | errors.push(...filterErrors.map(err => `Node "${node.name}" (index ${index}): ${err}`)); 293 | } 294 | }); 295 | } 296 | 297 | // Validate connections 298 | if (workflow.connections) { 299 | try { 300 | validateWorkflowConnections(workflow.connections); 301 | } catch (error) { 302 | errors.push(`Invalid connections: ${error instanceof Error ? error.message : 'Unknown error'}`); 303 | } 304 | } 305 | 306 | // Validate Switch and IF node connection structures match their rules 307 | if (workflow.nodes && workflow.connections) { 308 | const switchNodes = workflow.nodes.filter(n => { 309 | if (n.type !== 'n8n-nodes-base.switch') return false; 310 | const mode = (n.parameters as any)?.mode; 311 | return !mode || mode === 'rules'; // Default mode is 'rules' 312 | }); 313 | 314 | for (const switchNode of switchNodes) { 315 | const params = switchNode.parameters as any; 316 | const rules = params?.rules?.rules || []; 317 | const nodeConnections = workflow.connections[switchNode.name]; 318 | 319 | if (rules.length > 0 && nodeConnections?.main) { 320 | const outputBranches = nodeConnections.main.length; 321 | 322 | // Switch nodes in "rules" mode need output branches matching rules count 323 | if (outputBranches !== rules.length) { 324 | const ruleNames = rules.map((r: any, i: number) => 325 | r.outputKey ? `"${r.outputKey}" (index ${i})` : `Rule ${i}` 326 | ).join(', '); 327 | 328 | errors.push( 329 | `Switch node "${switchNode.name}" has ${rules.length} rules [${ruleNames}] ` + 330 | `but only ${outputBranches} output branch${outputBranches !== 1 ? 'es' : ''} in connections. ` + 331 | `Each rule needs its own output branch. When connecting to Switch outputs, specify sourceIndex: ` + 332 | rules.map((_: any, i: number) => i).join(', ') + 333 | ` (or use case parameter for clarity).` 334 | ); 335 | } 336 | 337 | // Check for empty output branches (except trailing ones) 338 | const nonEmptyBranches = nodeConnections.main.filter((branch: any[]) => branch.length > 0).length; 339 | if (nonEmptyBranches < rules.length) { 340 | const emptyIndices = nodeConnections.main 341 | .map((branch: any[], i: number) => branch.length === 0 ? i : -1) 342 | .filter((i: number) => i !== -1 && i < rules.length); 343 | 344 | if (emptyIndices.length > 0) { 345 | const ruleInfo = emptyIndices.map((i: number) => { 346 | const rule = rules[i]; 347 | return rule.outputKey ? `"${rule.outputKey}" (index ${i})` : `Rule ${i}`; 348 | }).join(', '); 349 | 350 | errors.push( 351 | `Switch node "${switchNode.name}" has unconnected output${emptyIndices.length !== 1 ? 's' : ''}: ${ruleInfo}. ` + 352 | `Add connection${emptyIndices.length !== 1 ? 's' : ''} using sourceIndex: ${emptyIndices.join(' or ')}.` 353 | ); 354 | } 355 | } 356 | } 357 | } 358 | } 359 | 360 | // Validate that all connection references exist and use node NAMES (not IDs) 361 | if (workflow.nodes && workflow.connections) { 362 | const nodeNames = new Set(workflow.nodes.map(node => node.name)); 363 | const nodeIds = new Set(workflow.nodes.map(node => node.id)); 364 | const nodeIdToName = new Map(workflow.nodes.map(node => [node.id, node.name])); 365 | 366 | Object.entries(workflow.connections).forEach(([sourceName, connection]) => { 367 | // Check if source exists by name (correct) 368 | if (!nodeNames.has(sourceName)) { 369 | // Check if they're using an ID instead of name 370 | if (nodeIds.has(sourceName)) { 371 | const correctName = nodeIdToName.get(sourceName); 372 | errors.push(`Connection uses node ID '${sourceName}' but must use node name '${correctName}'. Change connections.${sourceName} to connections['${correctName}']`); 373 | } else { 374 | errors.push(`Connection references non-existent node: ${sourceName}`); 375 | } 376 | } 377 | 378 | if (connection.main && Array.isArray(connection.main)) { 379 | connection.main.forEach((outputs, outputIndex) => { 380 | if (Array.isArray(outputs)) { 381 | outputs.forEach((target, targetIndex) => { 382 | // Check if target exists by name (correct) 383 | if (!nodeNames.has(target.node)) { 384 | // Check if they're using an ID instead of name 385 | if (nodeIds.has(target.node)) { 386 | const correctName = nodeIdToName.get(target.node); 387 | errors.push(`Connection target uses node ID '${target.node}' but must use node name '${correctName}' (from ${sourceName}[${outputIndex}][${targetIndex}])`); 388 | } else { 389 | errors.push(`Connection references non-existent target node: ${target.node} (from ${sourceName}[${outputIndex}][${targetIndex}])`); 390 | } 391 | } 392 | }); 393 | } 394 | }); 395 | } 396 | }); 397 | } 398 | 399 | return errors; 400 | } 401 | 402 | // Check if workflow has webhook trigger 403 | export function hasWebhookTrigger(workflow: Workflow): boolean { 404 | return workflow.nodes.some(node => 405 | node.type === 'n8n-nodes-base.webhook' || 406 | node.type === 'n8n-nodes-base.webhookTrigger' 407 | ); 408 | } 409 | 410 | /** 411 | * Validate filter-based node metadata (IF v2.2+, Switch v3.2+) 412 | * Returns array of error messages 413 | */ 414 | export function validateFilterBasedNodeMetadata(node: WorkflowNode): string[] { 415 | const errors: string[] = []; 416 | 417 | // Check if node is filter-based 418 | const isIFNode = node.type === 'n8n-nodes-base.if' && node.typeVersion >= 2.2; 419 | const isSwitchNode = node.type === 'n8n-nodes-base.switch' && node.typeVersion >= 3.2; 420 | 421 | if (!isIFNode && !isSwitchNode) { 422 | return errors; // Not a filter-based node 423 | } 424 | 425 | // Validate IF node 426 | if (isIFNode) { 427 | const conditions = (node.parameters.conditions as any); 428 | 429 | // Check conditions.options exists 430 | if (!conditions?.options) { 431 | errors.push( 432 | 'Missing required "conditions.options". ' + 433 | 'IF v2.2+ requires: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}' 434 | ); 435 | } else { 436 | // Validate required fields 437 | const requiredFields = { 438 | version: 2, 439 | leftValue: '', 440 | caseSensitive: 'boolean', 441 | typeValidation: 'strict' 442 | }; 443 | 444 | for (const [field, expectedValue] of Object.entries(requiredFields)) { 445 | if (!(field in conditions.options)) { 446 | errors.push( 447 | `Missing required field "conditions.options.${field}". ` + 448 | `Expected value: ${typeof expectedValue === 'string' ? `"${expectedValue}"` : expectedValue}` 449 | ); 450 | } 451 | } 452 | } 453 | 454 | // Validate operators in conditions 455 | if (conditions?.conditions && Array.isArray(conditions.conditions)) { 456 | conditions.conditions.forEach((condition: any, i: number) => { 457 | const operatorErrors = validateOperatorStructure(condition.operator, `conditions.conditions[${i}].operator`); 458 | errors.push(...operatorErrors); 459 | }); 460 | } 461 | } 462 | 463 | // Validate Switch node 464 | if (isSwitchNode) { 465 | const rules = (node.parameters.rules as any); 466 | 467 | if (rules?.rules && Array.isArray(rules.rules)) { 468 | rules.rules.forEach((rule: any, ruleIndex: number) => { 469 | // Check rule.conditions.options 470 | if (!rule.conditions?.options) { 471 | errors.push( 472 | `Missing required "rules.rules[${ruleIndex}].conditions.options". ` + 473 | 'Switch v3.2+ requires: {version: 2, leftValue: "", caseSensitive: true, typeValidation: "strict"}' 474 | ); 475 | } else { 476 | // Validate required fields 477 | const requiredFields = { 478 | version: 2, 479 | leftValue: '', 480 | caseSensitive: 'boolean', 481 | typeValidation: 'strict' 482 | }; 483 | 484 | for (const [field, expectedValue] of Object.entries(requiredFields)) { 485 | if (!(field in rule.conditions.options)) { 486 | errors.push( 487 | `Missing required field "rules.rules[${ruleIndex}].conditions.options.${field}". ` + 488 | `Expected value: ${typeof expectedValue === 'string' ? `"${expectedValue}"` : expectedValue}` 489 | ); 490 | } 491 | } 492 | } 493 | 494 | // Validate operators in rule conditions 495 | if (rule.conditions?.conditions && Array.isArray(rule.conditions.conditions)) { 496 | rule.conditions.conditions.forEach((condition: any, condIndex: number) => { 497 | const operatorErrors = validateOperatorStructure( 498 | condition.operator, 499 | `rules.rules[${ruleIndex}].conditions.conditions[${condIndex}].operator` 500 | ); 501 | errors.push(...operatorErrors); 502 | }); 503 | } 504 | }); 505 | } 506 | } 507 | 508 | return errors; 509 | } 510 | 511 | /** 512 | * Validate operator structure 513 | * Ensures operator has correct format: {type, operation, singleValue?} 514 | */ 515 | export function validateOperatorStructure(operator: any, path: string): string[] { 516 | const errors: string[] = []; 517 | 518 | if (!operator || typeof operator !== 'object') { 519 | errors.push(`${path}: operator is missing or not an object`); 520 | return errors; 521 | } 522 | 523 | // Check required field: type (data type, not operation name) 524 | if (!operator.type) { 525 | errors.push( 526 | `${path}: missing required field "type". ` + 527 | 'Must be a data type: "string", "number", "boolean", "dateTime", "array", or "object"' 528 | ); 529 | } else { 530 | const validTypes = ['string', 'number', 'boolean', 'dateTime', 'array', 'object']; 531 | if (!validTypes.includes(operator.type)) { 532 | errors.push( 533 | `${path}: invalid type "${operator.type}". ` + 534 | `Type must be a data type (${validTypes.join(', ')}), not an operation name. ` + 535 | 'Did you mean to use the "operation" field?' 536 | ); 537 | } 538 | } 539 | 540 | // Check required field: operation 541 | if (!operator.operation) { 542 | errors.push( 543 | `${path}: missing required field "operation". ` + 544 | 'Operation specifies the comparison type (e.g., "equals", "contains", "isNotEmpty")' 545 | ); 546 | } 547 | 548 | // Check singleValue based on operator type 549 | if (operator.operation) { 550 | const unaryOperators = ['isEmpty', 'isNotEmpty', 'true', 'false', 'isNumeric']; 551 | const isUnary = unaryOperators.includes(operator.operation); 552 | 553 | if (isUnary) { 554 | // Unary operators MUST have singleValue: true 555 | if (operator.singleValue !== true) { 556 | errors.push( 557 | `${path}: unary operator "${operator.operation}" requires "singleValue: true". ` + 558 | 'Unary operators do not use rightValue.' 559 | ); 560 | } 561 | } else { 562 | // Binary operators should NOT have singleValue: true 563 | if (operator.singleValue === true) { 564 | errors.push( 565 | `${path}: binary operator "${operator.operation}" should not have "singleValue: true". ` + 566 | 'Only unary operators (isEmpty, isNotEmpty, true, false, isNumeric) need this property.' 567 | ); 568 | } 569 | } 570 | } 571 | 572 | return errors; 573 | } 574 | 575 | // Get webhook URL from workflow 576 | export function getWebhookUrl(workflow: Workflow): string | null { 577 | const webhookNode = workflow.nodes.find(node => 578 | node.type === 'n8n-nodes-base.webhook' || 579 | node.type === 'n8n-nodes-base.webhookTrigger' 580 | ); 581 | 582 | if (!webhookNode || !webhookNode.parameters) { 583 | return null; 584 | } 585 | 586 | // Check for path parameter 587 | const path = webhookNode.parameters.path as string | undefined; 588 | if (!path) { 589 | return null; 590 | } 591 | 592 | // Note: We can't construct the full URL without knowing the n8n instance URL 593 | // The caller will need to prepend the base URL 594 | return path; 595 | } 596 | 597 | // Helper function to generate proper workflow structure examples 598 | export function getWorkflowStructureExample(): string { 599 | return ` 600 | Minimal Workflow Example: 601 | { 602 | "name": "My Workflow", 603 | "nodes": [ 604 | { 605 | "id": "manual-trigger-1", 606 | "name": "Manual Trigger", 607 | "type": "n8n-nodes-base.manualTrigger", 608 | "typeVersion": 1, 609 | "position": [250, 300], 610 | "parameters": {} 611 | }, 612 | { 613 | "id": "set-1", 614 | "name": "Set Data", 615 | "type": "n8n-nodes-base.set", 616 | "typeVersion": 3.4, 617 | "position": [450, 300], 618 | "parameters": { 619 | "mode": "manual", 620 | "assignments": { 621 | "assignments": [{ 622 | "id": "1", 623 | "name": "message", 624 | "value": "Hello World", 625 | "type": "string" 626 | }] 627 | } 628 | } 629 | } 630 | ], 631 | "connections": { 632 | "Manual Trigger": { 633 | "main": [[{ 634 | "node": "Set Data", 635 | "type": "main", 636 | "index": 0 637 | }]] 638 | } 639 | } 640 | } 641 | 642 | IMPORTANT: In connections, use the node NAME (e.g., "Manual Trigger"), NOT the node ID or type!`; 643 | } 644 | 645 | // Helper function to fix common workflow issues 646 | export function getWorkflowFixSuggestions(errors: string[]): string[] { 647 | const suggestions: string[] = []; 648 | 649 | if (errors.some(e => e.includes('empty connections'))) { 650 | suggestions.push('Add connections between your nodes. Each node (except endpoints) should connect to another node.'); 651 | suggestions.push('Connection format: connections: { "Source Node Name": { "main": [[{ "node": "Target Node Name", "type": "main", "index": 0 }]] } }'); 652 | } 653 | 654 | if (errors.some(e => e.includes('Single-node workflows'))) { 655 | suggestions.push('Add at least one more node to process data. Common patterns: Trigger → Process → Output'); 656 | suggestions.push('Examples: Manual Trigger → Set, Webhook → HTTP Request, Schedule Trigger → Database Query'); 657 | } 658 | 659 | if (errors.some(e => e.includes('node ID') && e.includes('instead of node name'))) { 660 | suggestions.push('Replace node IDs with node names in connections. The name is what appears in the node header.'); 661 | suggestions.push('Wrong: connections: { "set-1": {...} }, Right: connections: { "Set Data": {...} }'); 662 | } 663 | 664 | return suggestions; 665 | } ``` -------------------------------------------------------------------------------- /tests/unit/services/ai-tool-validators.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect } from 'vitest'; 2 | import { 3 | validateHTTPRequestTool, 4 | validateCodeTool, 5 | validateVectorStoreTool, 6 | validateWorkflowTool, 7 | validateAIAgentTool, 8 | validateMCPClientTool, 9 | validateCalculatorTool, 10 | validateThinkTool, 11 | validateSerpApiTool, 12 | validateWikipediaTool, 13 | validateSearXngTool, 14 | validateWolframAlphaTool, 15 | type WorkflowNode 16 | } from '@/services/ai-tool-validators'; 17 | 18 | describe('AI Tool Validators', () => { 19 | describe('validateHTTPRequestTool', () => { 20 | it('should error on missing toolDescription', () => { 21 | const node: WorkflowNode = { 22 | id: 'http1', 23 | name: 'Weather API', 24 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 25 | position: [0, 0], 26 | parameters: { 27 | method: 'GET', 28 | url: 'https://api.weather.com/data' 29 | } 30 | }; 31 | 32 | const issues = validateHTTPRequestTool(node); 33 | 34 | expect(issues).toContainEqual( 35 | expect.objectContaining({ 36 | severity: 'error', 37 | code: 'MISSING_TOOL_DESCRIPTION' 38 | }) 39 | ); 40 | }); 41 | 42 | it('should warn on short toolDescription', () => { 43 | const node: WorkflowNode = { 44 | id: 'http1', 45 | name: 'Weather API', 46 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 47 | position: [0, 0], 48 | parameters: { 49 | method: 'GET', 50 | url: 'https://api.weather.com/data', 51 | toolDescription: 'Weather' // Too short (7 chars, need 15) 52 | } 53 | }; 54 | 55 | const issues = validateHTTPRequestTool(node); 56 | 57 | expect(issues).toContainEqual( 58 | expect.objectContaining({ 59 | severity: 'warning', 60 | message: expect.stringContaining('toolDescription is too short') 61 | }) 62 | ); 63 | }); 64 | 65 | it('should error on missing URL', () => { 66 | const node: WorkflowNode = { 67 | id: 'http1', 68 | name: 'API Tool', 69 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 70 | position: [0, 0], 71 | parameters: { 72 | toolDescription: 'Fetches data from an API endpoint', 73 | method: 'GET' 74 | } 75 | }; 76 | 77 | const issues = validateHTTPRequestTool(node); 78 | 79 | expect(issues).toContainEqual( 80 | expect.objectContaining({ 81 | severity: 'error', 82 | code: 'MISSING_URL' 83 | }) 84 | ); 85 | }); 86 | 87 | it('should error on invalid URL protocol', () => { 88 | const node: WorkflowNode = { 89 | id: 'http1', 90 | name: 'FTP Tool', 91 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 92 | position: [0, 0], 93 | parameters: { 94 | toolDescription: 'Downloads files via FTP', 95 | url: 'ftp://files.example.com/data.txt' 96 | } 97 | }; 98 | 99 | const issues = validateHTTPRequestTool(node); 100 | 101 | expect(issues).toContainEqual( 102 | expect.objectContaining({ 103 | severity: 'error', 104 | code: 'INVALID_URL_PROTOCOL' 105 | }) 106 | ); 107 | }); 108 | 109 | it('should allow expressions in URL', () => { 110 | const node: WorkflowNode = { 111 | id: 'http1', 112 | name: 'Dynamic API', 113 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 114 | position: [0, 0], 115 | parameters: { 116 | toolDescription: 'Fetches data from dynamic endpoint', 117 | url: '={{$json.apiUrl}}/users' 118 | } 119 | }; 120 | 121 | const issues = validateHTTPRequestTool(node); 122 | 123 | // Should not error on URL format when it contains expressions 124 | const urlErrors = issues.filter(i => i.code === 'INVALID_URL_FORMAT'); 125 | expect(urlErrors).toHaveLength(0); 126 | }); 127 | 128 | it('should warn on missing placeholderDefinitions for parameterized URL', () => { 129 | const node: WorkflowNode = { 130 | id: 'http1', 131 | name: 'User API', 132 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 133 | position: [0, 0], 134 | parameters: { 135 | toolDescription: 'Fetches user data by ID', 136 | url: 'https://api.example.com/users/{userId}' 137 | } 138 | }; 139 | 140 | const issues = validateHTTPRequestTool(node); 141 | 142 | expect(issues).toContainEqual( 143 | expect.objectContaining({ 144 | severity: 'warning', 145 | message: expect.stringContaining('placeholderDefinitions') 146 | }) 147 | ); 148 | }); 149 | 150 | it('should validate placeholder definitions match URL', () => { 151 | const node: WorkflowNode = { 152 | id: 'http1', 153 | name: 'User API', 154 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 155 | position: [0, 0], 156 | parameters: { 157 | toolDescription: 'Fetches user data', 158 | url: 'https://api.example.com/users/{userId}', 159 | placeholderDefinitions: { 160 | values: [ 161 | { name: 'wrongName', description: 'User identifier' } 162 | ] 163 | } 164 | } 165 | }; 166 | 167 | const issues = validateHTTPRequestTool(node); 168 | 169 | expect(issues).toContainEqual( 170 | expect.objectContaining({ 171 | severity: 'error', 172 | message: expect.stringContaining('Placeholder "userId" in URL') 173 | }) 174 | ); 175 | }); 176 | 177 | it('should pass valid HTTP Request Tool configuration', () => { 178 | const node: WorkflowNode = { 179 | id: 'http1', 180 | name: 'Weather API', 181 | type: '@n8n/n8n-nodes-langchain.toolHttpRequest', 182 | position: [0, 0], 183 | parameters: { 184 | toolDescription: 'Get current weather conditions for a specified city', 185 | method: 'GET', 186 | url: 'https://api.weather.com/v1/current?city={city}', 187 | placeholderDefinitions: { 188 | values: [ 189 | { name: 'city', description: 'City name (e.g. London, Tokyo)' } 190 | ] 191 | } 192 | } 193 | }; 194 | 195 | const issues = validateHTTPRequestTool(node); 196 | 197 | // Should have no errors 198 | const errors = issues.filter(i => i.severity === 'error'); 199 | expect(errors).toHaveLength(0); 200 | }); 201 | }); 202 | 203 | describe('validateCodeTool', () => { 204 | it('should error on missing toolDescription', () => { 205 | const node: WorkflowNode = { 206 | id: 'code1', 207 | name: 'Calculate Tax', 208 | type: '@n8n/n8n-nodes-langchain.toolCode', 209 | position: [0, 0], 210 | parameters: { 211 | language: 'javaScript', 212 | jsCode: 'return { tax: price * 0.1 };' 213 | } 214 | }; 215 | 216 | const issues = validateCodeTool(node); 217 | 218 | expect(issues).toContainEqual( 219 | expect.objectContaining({ 220 | severity: 'error', 221 | code: 'MISSING_TOOL_DESCRIPTION' 222 | }) 223 | ); 224 | }); 225 | 226 | it('should error on missing code', () => { 227 | const node: WorkflowNode = { 228 | id: 'code1', 229 | name: 'Empty Code', 230 | type: '@n8n/n8n-nodes-langchain.toolCode', 231 | position: [0, 0], 232 | parameters: { 233 | toolDescription: 'Performs calculations', 234 | language: 'javaScript' 235 | } 236 | }; 237 | 238 | const issues = validateCodeTool(node); 239 | 240 | expect(issues).toContainEqual( 241 | expect.objectContaining({ 242 | severity: 'error', 243 | message: expect.stringContaining('code is empty') 244 | }) 245 | ); 246 | }); 247 | 248 | it('should warn on missing schema for outputs', () => { 249 | const node: WorkflowNode = { 250 | id: 'code1', 251 | name: 'Calculate', 252 | type: '@n8n/n8n-nodes-langchain.toolCode', 253 | position: [0, 0], 254 | parameters: { 255 | toolDescription: 'Calculates shipping cost based on weight and distance', 256 | language: 'javaScript', 257 | jsCode: 'return { cost: weight * distance * 0.5 };' 258 | } 259 | }; 260 | 261 | const issues = validateCodeTool(node); 262 | 263 | expect(issues).toContainEqual( 264 | expect.objectContaining({ 265 | severity: 'warning', 266 | message: expect.stringContaining('schema') 267 | }) 268 | ); 269 | }); 270 | 271 | it('should pass valid Code Tool configuration', () => { 272 | const node: WorkflowNode = { 273 | id: 'code1', 274 | name: 'Shipping Calculator', 275 | type: '@n8n/n8n-nodes-langchain.toolCode', 276 | position: [0, 0], 277 | parameters: { 278 | toolDescription: 'Calculates shipping cost based on weight (kg) and distance (km)', 279 | language: 'javaScript', 280 | jsCode: `const { weight, distance } = $input; 281 | const baseCost = 5.00; 282 | const costPerKg = 2.50; 283 | const costPerKm = 0.15; 284 | const cost = baseCost + (weight * costPerKg) + (distance * costPerKm); 285 | return { cost: cost.toFixed(2) };`, 286 | specifyInputSchema: true, 287 | inputSchema: '{ "weight": "number", "distance": "number" }' 288 | } 289 | }; 290 | 291 | const issues = validateCodeTool(node); 292 | 293 | const errors = issues.filter(i => i.severity === 'error'); 294 | expect(errors).toHaveLength(0); 295 | }); 296 | }); 297 | 298 | describe('validateVectorStoreTool', () => { 299 | it('should error on missing toolDescription', () => { 300 | const node: WorkflowNode = { 301 | id: 'vector1', 302 | name: 'Product Search', 303 | type: '@n8n/n8n-nodes-langchain.toolVectorStore', 304 | position: [0, 0], 305 | parameters: { 306 | topK: 5 307 | } 308 | }; 309 | 310 | const reverseMap = new Map(); 311 | const workflow = { nodes: [node], connections: {} }; 312 | const issues = validateVectorStoreTool(node, reverseMap, workflow); 313 | 314 | expect(issues).toContainEqual( 315 | expect.objectContaining({ 316 | severity: 'error', 317 | code: 'MISSING_TOOL_DESCRIPTION' 318 | }) 319 | ); 320 | }); 321 | 322 | it('should warn on high topK value', () => { 323 | const node: WorkflowNode = { 324 | id: 'vector1', 325 | name: 'Document Search', 326 | type: '@n8n/n8n-nodes-langchain.toolVectorStore', 327 | position: [0, 0], 328 | parameters: { 329 | toolDescription: 'Search through product documentation', 330 | topK: 25 // Exceeds threshold of 20 331 | } 332 | }; 333 | 334 | const reverseMap = new Map(); 335 | const workflow = { nodes: [node], connections: {} }; 336 | const issues = validateVectorStoreTool(node, reverseMap, workflow); 337 | 338 | expect(issues).toContainEqual( 339 | expect.objectContaining({ 340 | severity: 'warning', 341 | message: expect.stringContaining('topK') 342 | }) 343 | ); 344 | }); 345 | 346 | it('should pass valid Vector Store Tool configuration', () => { 347 | const node: WorkflowNode = { 348 | id: 'vector1', 349 | name: 'Knowledge Base', 350 | type: '@n8n/n8n-nodes-langchain.toolVectorStore', 351 | position: [0, 0], 352 | parameters: { 353 | toolDescription: 'Search company knowledge base for relevant documentation', 354 | topK: 5 355 | } 356 | }; 357 | 358 | const reverseMap = new Map(); 359 | const workflow = { nodes: [node], connections: {} }; 360 | const issues = validateVectorStoreTool(node, reverseMap, workflow); 361 | 362 | const errors = issues.filter(i => i.severity === 'error'); 363 | expect(errors).toHaveLength(0); 364 | }); 365 | }); 366 | 367 | describe('validateWorkflowTool', () => { 368 | it('should error on missing toolDescription', () => { 369 | const node: WorkflowNode = { 370 | id: 'workflow1', 371 | name: 'Approval Process', 372 | type: '@n8n/n8n-nodes-langchain.toolWorkflow', 373 | position: [0, 0], 374 | parameters: {} 375 | }; 376 | 377 | const reverseMap = new Map(); 378 | const issues = validateWorkflowTool(node, reverseMap); 379 | 380 | expect(issues).toContainEqual( 381 | expect.objectContaining({ 382 | severity: 'error', 383 | code: 'MISSING_TOOL_DESCRIPTION' 384 | }) 385 | ); 386 | }); 387 | 388 | it('should error on missing workflowId', () => { 389 | const node: WorkflowNode = { 390 | id: 'workflow1', 391 | name: 'Data Processor', 392 | type: '@n8n/n8n-nodes-langchain.toolWorkflow', 393 | position: [0, 0], 394 | parameters: { 395 | toolDescription: 'Process data through specialized workflow' 396 | } 397 | }; 398 | 399 | const reverseMap = new Map(); 400 | const issues = validateWorkflowTool(node, reverseMap); 401 | 402 | expect(issues).toContainEqual( 403 | expect.objectContaining({ 404 | severity: 'error', 405 | message: expect.stringContaining('workflowId') 406 | }) 407 | ); 408 | }); 409 | 410 | it('should pass valid Workflow Tool configuration', () => { 411 | const node: WorkflowNode = { 412 | id: 'workflow1', 413 | name: 'Email Approval', 414 | type: '@n8n/n8n-nodes-langchain.toolWorkflow', 415 | position: [0, 0], 416 | parameters: { 417 | toolDescription: 'Send email and wait for approval response', 418 | workflowId: '123' 419 | } 420 | }; 421 | 422 | const reverseMap = new Map(); 423 | const issues = validateWorkflowTool(node, reverseMap); 424 | 425 | const errors = issues.filter(i => i.severity === 'error'); 426 | expect(errors).toHaveLength(0); 427 | }); 428 | }); 429 | 430 | describe('validateAIAgentTool', () => { 431 | it('should error on missing toolDescription', () => { 432 | const node: WorkflowNode = { 433 | id: 'agent1', 434 | name: 'Research Agent', 435 | type: '@n8n/n8n-nodes-langchain.agent', 436 | position: [0, 0], 437 | parameters: {} 438 | }; 439 | 440 | const reverseMap = new Map(); 441 | const issues = validateAIAgentTool(node, reverseMap); 442 | 443 | expect(issues).toContainEqual( 444 | expect.objectContaining({ 445 | severity: 'error', 446 | code: 'MISSING_TOOL_DESCRIPTION' 447 | }) 448 | ); 449 | }); 450 | 451 | it('should warn on high maxIterations', () => { 452 | const node: WorkflowNode = { 453 | id: 'agent1', 454 | name: 'Complex Agent', 455 | type: '@n8n/n8n-nodes-langchain.agent', 456 | position: [0, 0], 457 | parameters: { 458 | toolDescription: 'Performs complex research tasks', 459 | maxIterations: 60 // Exceeds threshold of 50 460 | } 461 | }; 462 | 463 | const reverseMap = new Map(); 464 | const issues = validateAIAgentTool(node, reverseMap); 465 | 466 | expect(issues).toContainEqual( 467 | expect.objectContaining({ 468 | severity: 'warning', 469 | message: expect.stringContaining('maxIterations') 470 | }) 471 | ); 472 | }); 473 | 474 | it('should pass valid AI Agent Tool configuration', () => { 475 | const node: WorkflowNode = { 476 | id: 'agent1', 477 | name: 'Research Specialist', 478 | type: '@n8n/n8n-nodes-langchain.agent', 479 | position: [0, 0], 480 | parameters: { 481 | toolDescription: 'Specialist agent for conducting in-depth research on technical topics', 482 | maxIterations: 10 483 | } 484 | }; 485 | 486 | const reverseMap = new Map(); 487 | const issues = validateAIAgentTool(node, reverseMap); 488 | 489 | const errors = issues.filter(i => i.severity === 'error'); 490 | expect(errors).toHaveLength(0); 491 | }); 492 | }); 493 | 494 | describe('validateMCPClientTool', () => { 495 | it('should error on missing toolDescription', () => { 496 | const node: WorkflowNode = { 497 | id: 'mcp1', 498 | name: 'File Access', 499 | type: '@n8n/n8n-nodes-langchain.mcpClientTool', 500 | position: [0, 0], 501 | parameters: { 502 | serverUrl: 'mcp://filesystem' 503 | } 504 | }; 505 | 506 | const issues = validateMCPClientTool(node); 507 | 508 | expect(issues).toContainEqual( 509 | expect.objectContaining({ 510 | severity: 'error', 511 | code: 'MISSING_TOOL_DESCRIPTION' 512 | }) 513 | ); 514 | }); 515 | 516 | it('should error on missing serverUrl', () => { 517 | const node: WorkflowNode = { 518 | id: 'mcp1', 519 | name: 'MCP Tool', 520 | type: '@n8n/n8n-nodes-langchain.mcpClientTool', 521 | position: [0, 0], 522 | parameters: { 523 | toolDescription: 'Access external MCP server' 524 | } 525 | }; 526 | 527 | const issues = validateMCPClientTool(node); 528 | 529 | expect(issues).toContainEqual( 530 | expect.objectContaining({ 531 | severity: 'error', 532 | message: expect.stringContaining('serverUrl') 533 | }) 534 | ); 535 | }); 536 | 537 | it('should pass valid MCP Client Tool configuration', () => { 538 | const node: WorkflowNode = { 539 | id: 'mcp1', 540 | name: 'Filesystem Access', 541 | type: '@n8n/n8n-nodes-langchain.mcpClientTool', 542 | position: [0, 0], 543 | parameters: { 544 | toolDescription: 'Read and write files in the local filesystem', 545 | serverUrl: 'mcp://filesystem' 546 | } 547 | }; 548 | 549 | const issues = validateMCPClientTool(node); 550 | 551 | const errors = issues.filter(i => i.severity === 'error'); 552 | expect(errors).toHaveLength(0); 553 | }); 554 | }); 555 | 556 | describe('validateCalculatorTool', () => { 557 | it('should not require toolDescription (has built-in description)', () => { 558 | const node: WorkflowNode = { 559 | id: 'calc1', 560 | name: 'Math Operations', 561 | type: '@n8n/n8n-nodes-langchain.toolCalculator', 562 | position: [0, 0], 563 | parameters: {} 564 | }; 565 | 566 | const issues = validateCalculatorTool(node); 567 | 568 | // Calculator Tool has built-in description, no validation needed 569 | expect(issues).toHaveLength(0); 570 | }); 571 | 572 | it('should pass valid Calculator Tool configuration', () => { 573 | const node: WorkflowNode = { 574 | id: 'calc1', 575 | name: 'Calculator', 576 | type: '@n8n/n8n-nodes-langchain.toolCalculator', 577 | position: [0, 0], 578 | parameters: { 579 | toolDescription: 'Perform mathematical calculations and solve equations' 580 | } 581 | }; 582 | 583 | const issues = validateCalculatorTool(node); 584 | 585 | const errors = issues.filter(i => i.severity === 'error'); 586 | expect(errors).toHaveLength(0); 587 | }); 588 | }); 589 | 590 | describe('validateThinkTool', () => { 591 | it('should not require toolDescription (has built-in description)', () => { 592 | const node: WorkflowNode = { 593 | id: 'think1', 594 | name: 'Think', 595 | type: '@n8n/n8n-nodes-langchain.toolThink', 596 | position: [0, 0], 597 | parameters: {} 598 | }; 599 | 600 | const issues = validateThinkTool(node); 601 | 602 | // Think Tool has built-in description, no validation needed 603 | expect(issues).toHaveLength(0); 604 | }); 605 | 606 | it('should pass valid Think Tool configuration', () => { 607 | const node: WorkflowNode = { 608 | id: 'think1', 609 | name: 'Think', 610 | type: '@n8n/n8n-nodes-langchain.toolThink', 611 | position: [0, 0], 612 | parameters: { 613 | toolDescription: 'Pause and think through complex problems step by step' 614 | } 615 | }; 616 | 617 | const issues = validateThinkTool(node); 618 | 619 | const errors = issues.filter(i => i.severity === 'error'); 620 | expect(errors).toHaveLength(0); 621 | }); 622 | }); 623 | 624 | describe('validateSerpApiTool', () => { 625 | it('should error on missing toolDescription', () => { 626 | const node: WorkflowNode = { 627 | id: 'serp1', 628 | name: 'Web Search', 629 | type: '@n8n/n8n-nodes-langchain.toolSerpapi', 630 | position: [0, 0], 631 | parameters: {} 632 | }; 633 | 634 | const issues = validateSerpApiTool(node); 635 | 636 | expect(issues).toContainEqual( 637 | expect.objectContaining({ 638 | severity: 'error', 639 | code: 'MISSING_TOOL_DESCRIPTION' 640 | }) 641 | ); 642 | }); 643 | 644 | it('should warn on missing credentials', () => { 645 | const node: WorkflowNode = { 646 | id: 'serp1', 647 | name: 'Search Engine', 648 | type: '@n8n/n8n-nodes-langchain.toolSerpapi', 649 | position: [0, 0], 650 | parameters: { 651 | toolDescription: 'Search the web for current information' 652 | } 653 | }; 654 | 655 | const issues = validateSerpApiTool(node); 656 | 657 | expect(issues).toContainEqual( 658 | expect.objectContaining({ 659 | severity: 'warning', 660 | message: expect.stringContaining('credentials') 661 | }) 662 | ); 663 | }); 664 | 665 | it('should pass valid SerpApi Tool configuration', () => { 666 | const node: WorkflowNode = { 667 | id: 'serp1', 668 | name: 'Web Search', 669 | type: '@n8n/n8n-nodes-langchain.toolSerpapi', 670 | position: [0, 0], 671 | parameters: { 672 | toolDescription: 'Search Google for current web information and news' 673 | }, 674 | credentials: { 675 | serpApiApi: 'serpapi-credentials' 676 | } 677 | }; 678 | 679 | const issues = validateSerpApiTool(node); 680 | 681 | const errors = issues.filter(i => i.severity === 'error'); 682 | expect(errors).toHaveLength(0); 683 | }); 684 | }); 685 | 686 | describe('validateWikipediaTool', () => { 687 | it('should error on missing toolDescription', () => { 688 | const node: WorkflowNode = { 689 | id: 'wiki1', 690 | name: 'Wiki Lookup', 691 | type: '@n8n/n8n-nodes-langchain.toolWikipedia', 692 | position: [0, 0], 693 | parameters: {} 694 | }; 695 | 696 | const issues = validateWikipediaTool(node); 697 | 698 | expect(issues).toContainEqual( 699 | expect.objectContaining({ 700 | severity: 'error', 701 | code: 'MISSING_TOOL_DESCRIPTION' 702 | }) 703 | ); 704 | }); 705 | 706 | it('should pass valid Wikipedia Tool configuration', () => { 707 | const node: WorkflowNode = { 708 | id: 'wiki1', 709 | name: 'Wikipedia', 710 | type: '@n8n/n8n-nodes-langchain.toolWikipedia', 711 | position: [0, 0], 712 | parameters: { 713 | toolDescription: 'Look up factual information from Wikipedia articles' 714 | } 715 | }; 716 | 717 | const issues = validateWikipediaTool(node); 718 | 719 | const errors = issues.filter(i => i.severity === 'error'); 720 | expect(errors).toHaveLength(0); 721 | }); 722 | }); 723 | 724 | describe('validateSearXngTool', () => { 725 | it('should error on missing toolDescription', () => { 726 | const node: WorkflowNode = { 727 | id: 'searx1', 728 | name: 'Privacy Search', 729 | type: '@n8n/n8n-nodes-langchain.toolSearxng', 730 | position: [0, 0], 731 | parameters: {} 732 | }; 733 | 734 | const issues = validateSearXngTool(node); 735 | 736 | expect(issues).toContainEqual( 737 | expect.objectContaining({ 738 | severity: 'error', 739 | code: 'MISSING_TOOL_DESCRIPTION' 740 | }) 741 | ); 742 | }); 743 | 744 | it('should error on missing baseUrl', () => { 745 | const node: WorkflowNode = { 746 | id: 'searx1', 747 | name: 'SearXNG', 748 | type: '@n8n/n8n-nodes-langchain.toolSearxng', 749 | position: [0, 0], 750 | parameters: { 751 | toolDescription: 'Private web search through SearXNG instance' 752 | } 753 | }; 754 | 755 | const issues = validateSearXngTool(node); 756 | 757 | expect(issues).toContainEqual( 758 | expect.objectContaining({ 759 | severity: 'error', 760 | message: expect.stringContaining('baseUrl') 761 | }) 762 | ); 763 | }); 764 | 765 | it('should pass valid SearXNG Tool configuration', () => { 766 | const node: WorkflowNode = { 767 | id: 'searx1', 768 | name: 'SearXNG', 769 | type: '@n8n/n8n-nodes-langchain.toolSearxng', 770 | position: [0, 0], 771 | parameters: { 772 | toolDescription: 'Privacy-focused web search through self-hosted SearXNG', 773 | baseUrl: 'https://searx.example.com' 774 | } 775 | }; 776 | 777 | const issues = validateSearXngTool(node); 778 | 779 | const errors = issues.filter(i => i.severity === 'error'); 780 | expect(errors).toHaveLength(0); 781 | }); 782 | }); 783 | 784 | describe('validateWolframAlphaTool', () => { 785 | it('should error on missing credentials', () => { 786 | const node: WorkflowNode = { 787 | id: 'wolfram1', 788 | name: 'Computational Knowledge', 789 | type: '@n8n/n8n-nodes-langchain.toolWolframAlpha', 790 | position: [0, 0], 791 | parameters: {} 792 | }; 793 | 794 | const issues = validateWolframAlphaTool(node); 795 | 796 | expect(issues).toContainEqual( 797 | expect.objectContaining({ 798 | severity: 'error', 799 | code: 'MISSING_CREDENTIALS' 800 | }) 801 | ); 802 | }); 803 | 804 | it('should provide info on missing custom description', () => { 805 | const node: WorkflowNode = { 806 | id: 'wolfram1', 807 | name: 'WolframAlpha', 808 | type: '@n8n/n8n-nodes-langchain.toolWolframAlpha', 809 | position: [0, 0], 810 | parameters: {}, 811 | credentials: { 812 | wolframAlpha: 'wolfram-credentials' 813 | } 814 | }; 815 | 816 | const issues = validateWolframAlphaTool(node); 817 | 818 | expect(issues).toContainEqual( 819 | expect.objectContaining({ 820 | severity: 'info', 821 | message: expect.stringContaining('description') 822 | }) 823 | ); 824 | }); 825 | 826 | it('should pass valid WolframAlpha Tool configuration', () => { 827 | const node: WorkflowNode = { 828 | id: 'wolfram1', 829 | name: 'WolframAlpha', 830 | type: '@n8n/n8n-nodes-langchain.toolWolframAlpha', 831 | position: [0, 0], 832 | parameters: { 833 | toolDescription: 'Computational knowledge engine for math, science, and factual queries' 834 | }, 835 | credentials: { 836 | wolframAlphaApi: 'wolfram-credentials' 837 | } 838 | }; 839 | 840 | const issues = validateWolframAlphaTool(node); 841 | 842 | const errors = issues.filter(i => i.severity === 'error'); 843 | expect(errors).toHaveLength(0); 844 | }); 845 | }); 846 | }); 847 | ``` -------------------------------------------------------------------------------- /src/mcp/tools.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDefinition } from '../types'; 2 | 3 | /** 4 | * n8n Documentation MCP Tools - FINAL OPTIMIZED VERSION 5 | * 6 | * Incorporates all lessons learned from real workflow building. 7 | * Designed to help AI agents avoid common pitfalls and build workflows efficiently. 8 | */ 9 | export const n8nDocumentationToolsFinal: ToolDefinition[] = [ 10 | { 11 | name: 'tools_documentation', 12 | description: `Get documentation for n8n MCP tools. Call without parameters for quick start guide. Use topic parameter to get documentation for specific tools. Use depth='full' for comprehensive documentation.`, 13 | inputSchema: { 14 | type: 'object', 15 | properties: { 16 | topic: { 17 | type: 'string', 18 | description: 'Tool name (e.g., "search_nodes") or "overview" for general guide. Leave empty for quick reference.', 19 | }, 20 | depth: { 21 | type: 'string', 22 | enum: ['essentials', 'full'], 23 | description: 'Level of detail. "essentials" (default) for quick reference, "full" for comprehensive docs.', 24 | default: 'essentials', 25 | }, 26 | }, 27 | }, 28 | }, 29 | { 30 | name: 'list_nodes', 31 | description: `List n8n nodes. Common: list_nodes({limit:200}) for all, list_nodes({category:'trigger'}) for triggers. Package: "n8n-nodes-base" or "@n8n/n8n-nodes-langchain". Categories: trigger/transform/output/input.`, 32 | inputSchema: { 33 | type: 'object', 34 | properties: { 35 | package: { 36 | type: 'string', 37 | description: '"n8n-nodes-base" (core) or "@n8n/n8n-nodes-langchain" (AI)', 38 | }, 39 | category: { 40 | type: 'string', 41 | description: 'trigger|transform|output|input|AI', 42 | }, 43 | developmentStyle: { 44 | type: 'string', 45 | enum: ['declarative', 'programmatic'], 46 | description: 'Usually "programmatic"', 47 | }, 48 | isAITool: { 49 | type: 'boolean', 50 | description: 'Filter AI-capable nodes', 51 | }, 52 | limit: { 53 | type: 'number', 54 | description: 'Max results (default 50, use 200+ for all)', 55 | default: 50, 56 | }, 57 | }, 58 | }, 59 | }, 60 | { 61 | name: 'get_node_info', 62 | description: `Get full node documentation. Pass nodeType as string with prefix. Example: nodeType="nodes-base.webhook"`, 63 | inputSchema: { 64 | type: 'object', 65 | properties: { 66 | nodeType: { 67 | type: 'string', 68 | description: 'Full type: "nodes-base.{name}" or "nodes-langchain.{name}". Examples: nodes-base.httpRequest, nodes-base.webhook, nodes-base.slack', 69 | }, 70 | }, 71 | required: ['nodeType'], 72 | }, 73 | }, 74 | { 75 | name: 'search_nodes', 76 | description: `Search n8n nodes by keyword with optional real-world examples. Pass query as string. Example: query="webhook" or query="database". Returns max 20 results. Use includeExamples=true to get top 2 template configs per node.`, 77 | inputSchema: { 78 | type: 'object', 79 | properties: { 80 | query: { 81 | type: 'string', 82 | description: 'Search terms. Use quotes for exact phrase.', 83 | }, 84 | limit: { 85 | type: 'number', 86 | description: 'Max results (default 20)', 87 | default: 20, 88 | }, 89 | mode: { 90 | type: 'string', 91 | enum: ['OR', 'AND', 'FUZZY'], 92 | description: 'OR=any word, AND=all words, FUZZY=typo-tolerant', 93 | default: 'OR', 94 | }, 95 | includeExamples: { 96 | type: 'boolean', 97 | description: 'Include top 2 real-world configuration examples from popular templates (default: false)', 98 | default: false, 99 | }, 100 | }, 101 | required: ['query'], 102 | }, 103 | }, 104 | { 105 | name: 'list_ai_tools', 106 | description: `List 263 AI-optimized nodes. Note: ANY node can be AI tool! Connect any node to AI Agent's tool port. Community nodes need N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true.`, 107 | inputSchema: { 108 | type: 'object', 109 | properties: {}, 110 | }, 111 | }, 112 | { 113 | name: 'get_node_documentation', 114 | description: `Get readable docs with examples/auth/patterns. Better than raw schema! 87% coverage. Format: "nodes-base.slack"`, 115 | inputSchema: { 116 | type: 'object', 117 | properties: { 118 | nodeType: { 119 | type: 'string', 120 | description: 'Full type with prefix: "nodes-base.slack"', 121 | }, 122 | }, 123 | required: ['nodeType'], 124 | }, 125 | }, 126 | { 127 | name: 'get_database_statistics', 128 | description: `Node stats: 525 total, 263 AI tools, 104 triggers, 87% docs coverage. Verifies MCP working.`, 129 | inputSchema: { 130 | type: 'object', 131 | properties: {}, 132 | }, 133 | }, 134 | { 135 | name: 'get_node_essentials', 136 | description: `Get node essential info with optional real-world examples from templates. Pass nodeType as string with prefix. Example: nodeType="nodes-base.slack". Use includeExamples=true to get top 3 template configs.`, 137 | inputSchema: { 138 | type: 'object', 139 | properties: { 140 | nodeType: { 141 | type: 'string', 142 | description: 'Full type: "nodes-base.httpRequest"', 143 | }, 144 | includeExamples: { 145 | type: 'boolean', 146 | description: 'Include top 3 real-world configuration examples from popular templates (default: false)', 147 | default: false, 148 | }, 149 | }, 150 | required: ['nodeType'], 151 | }, 152 | }, 153 | { 154 | name: 'search_node_properties', 155 | description: `Find specific properties in a node (auth, headers, body, etc). Returns paths and descriptions.`, 156 | inputSchema: { 157 | type: 'object', 158 | properties: { 159 | nodeType: { 160 | type: 'string', 161 | description: 'Full type with prefix', 162 | }, 163 | query: { 164 | type: 'string', 165 | description: 'Property to find: "auth", "header", "body", "json"', 166 | }, 167 | maxResults: { 168 | type: 'number', 169 | description: 'Max results (default 20)', 170 | default: 20, 171 | }, 172 | }, 173 | required: ['nodeType', 'query'], 174 | }, 175 | }, 176 | { 177 | name: 'list_tasks', 178 | description: `List task templates by category: HTTP/API, Webhooks, Database, AI, Data Processing, Communication.`, 179 | inputSchema: { 180 | type: 'object', 181 | properties: { 182 | category: { 183 | type: 'string', 184 | description: 'Filter by category (optional)', 185 | }, 186 | }, 187 | }, 188 | }, 189 | { 190 | name: 'validate_node_operation', 191 | description: `Validate n8n node configuration. Pass nodeType as string and config as object. Example: nodeType="nodes-base.slack", config={resource:"channel",operation:"create"}`, 192 | inputSchema: { 193 | type: 'object', 194 | properties: { 195 | nodeType: { 196 | type: 'string', 197 | description: 'Node type as string. Example: "nodes-base.slack"', 198 | }, 199 | config: { 200 | type: 'object', 201 | description: 'Configuration as object. For simple nodes use {}. For complex nodes include fields like {resource:"channel",operation:"create"}', 202 | }, 203 | profile: { 204 | type: 'string', 205 | enum: ['strict', 'runtime', 'ai-friendly', 'minimal'], 206 | description: 'Profile string: "minimal", "runtime", "ai-friendly", or "strict". Default is "ai-friendly"', 207 | default: 'ai-friendly', 208 | }, 209 | }, 210 | required: ['nodeType', 'config'], 211 | additionalProperties: false, 212 | }, 213 | outputSchema: { 214 | type: 'object', 215 | properties: { 216 | nodeType: { type: 'string' }, 217 | workflowNodeType: { type: 'string' }, 218 | displayName: { type: 'string' }, 219 | valid: { type: 'boolean' }, 220 | errors: { 221 | type: 'array', 222 | items: { 223 | type: 'object', 224 | properties: { 225 | type: { type: 'string' }, 226 | property: { type: 'string' }, 227 | message: { type: 'string' }, 228 | fix: { type: 'string' } 229 | } 230 | } 231 | }, 232 | warnings: { 233 | type: 'array', 234 | items: { 235 | type: 'object', 236 | properties: { 237 | type: { type: 'string' }, 238 | property: { type: 'string' }, 239 | message: { type: 'string' }, 240 | suggestion: { type: 'string' } 241 | } 242 | } 243 | }, 244 | suggestions: { type: 'array', items: { type: 'string' } }, 245 | summary: { 246 | type: 'object', 247 | properties: { 248 | hasErrors: { type: 'boolean' }, 249 | errorCount: { type: 'number' }, 250 | warningCount: { type: 'number' }, 251 | suggestionCount: { type: 'number' } 252 | } 253 | } 254 | }, 255 | required: ['nodeType', 'displayName', 'valid', 'errors', 'warnings', 'suggestions', 'summary'] 256 | }, 257 | }, 258 | { 259 | name: 'validate_node_minimal', 260 | description: `Check n8n node required fields. Pass nodeType as string and config as empty object {}. Example: nodeType="nodes-base.webhook", config={}`, 261 | inputSchema: { 262 | type: 'object', 263 | properties: { 264 | nodeType: { 265 | type: 'string', 266 | description: 'Node type as string. Example: "nodes-base.slack"', 267 | }, 268 | config: { 269 | type: 'object', 270 | description: 'Configuration object. Always pass {} for empty config', 271 | }, 272 | }, 273 | required: ['nodeType', 'config'], 274 | additionalProperties: false, 275 | }, 276 | outputSchema: { 277 | type: 'object', 278 | properties: { 279 | nodeType: { type: 'string' }, 280 | displayName: { type: 'string' }, 281 | valid: { type: 'boolean' }, 282 | missingRequiredFields: { 283 | type: 'array', 284 | items: { type: 'string' } 285 | } 286 | }, 287 | required: ['nodeType', 'displayName', 'valid', 'missingRequiredFields'] 288 | }, 289 | }, 290 | { 291 | name: 'get_property_dependencies', 292 | description: `Shows property dependencies and visibility rules. Example: sendBody=true reveals body fields. Test visibility with optional config.`, 293 | inputSchema: { 294 | type: 'object', 295 | properties: { 296 | nodeType: { 297 | type: 'string', 298 | description: 'The node type to analyze (e.g., "nodes-base.httpRequest")', 299 | }, 300 | config: { 301 | type: 'object', 302 | description: 'Optional partial configuration to check visibility impact', 303 | }, 304 | }, 305 | required: ['nodeType'], 306 | }, 307 | }, 308 | { 309 | name: 'get_node_as_tool_info', 310 | description: `How to use ANY node as AI tool. Shows requirements, use cases, examples. Works for all nodes, not just AI-marked ones.`, 311 | inputSchema: { 312 | type: 'object', 313 | properties: { 314 | nodeType: { 315 | type: 'string', 316 | description: 'Full node type WITH prefix: "nodes-base.slack", "nodes-base.googleSheets", etc.', 317 | }, 318 | }, 319 | required: ['nodeType'], 320 | }, 321 | }, 322 | { 323 | name: 'list_templates', 324 | description: `List all templates with minimal data (id, name, description, views, node count). Optionally include AI-generated metadata for smart filtering.`, 325 | inputSchema: { 326 | type: 'object', 327 | properties: { 328 | limit: { 329 | type: 'number', 330 | description: 'Number of results (1-100). Default 10.', 331 | default: 10, 332 | minimum: 1, 333 | maximum: 100, 334 | }, 335 | offset: { 336 | type: 'number', 337 | description: 'Pagination offset. Default 0.', 338 | default: 0, 339 | minimum: 0, 340 | }, 341 | sortBy: { 342 | type: 'string', 343 | enum: ['views', 'created_at', 'name'], 344 | description: 'Sort field. Default: views (popularity).', 345 | default: 'views', 346 | }, 347 | includeMetadata: { 348 | type: 'boolean', 349 | description: 'Include AI-generated metadata (categories, complexity, setup time, etc.). Default false.', 350 | default: false, 351 | }, 352 | }, 353 | }, 354 | }, 355 | { 356 | name: 'list_node_templates', 357 | description: `Find templates using specific nodes. Returns paginated results. Use FULL types: "n8n-nodes-base.httpRequest".`, 358 | inputSchema: { 359 | type: 'object', 360 | properties: { 361 | nodeTypes: { 362 | type: 'array', 363 | items: { type: 'string' }, 364 | description: 'Array of node types to search for (e.g., ["n8n-nodes-base.httpRequest", "n8n-nodes-base.openAi"])', 365 | }, 366 | limit: { 367 | type: 'number', 368 | description: 'Maximum number of templates to return. Default 10.', 369 | default: 10, 370 | minimum: 1, 371 | maximum: 100, 372 | }, 373 | offset: { 374 | type: 'number', 375 | description: 'Pagination offset. Default 0.', 376 | default: 0, 377 | minimum: 0, 378 | }, 379 | }, 380 | required: ['nodeTypes'], 381 | }, 382 | }, 383 | { 384 | name: 'get_template', 385 | description: `Get template by ID. Use mode to control response size: nodes_only (minimal), structure (nodes+connections), full (complete workflow).`, 386 | inputSchema: { 387 | type: 'object', 388 | properties: { 389 | templateId: { 390 | type: 'number', 391 | description: 'The template ID to retrieve', 392 | }, 393 | mode: { 394 | type: 'string', 395 | enum: ['nodes_only', 'structure', 'full'], 396 | description: 'Response detail level. nodes_only: just node list, structure: nodes+connections, full: complete workflow JSON.', 397 | default: 'full', 398 | }, 399 | }, 400 | required: ['templateId'], 401 | }, 402 | }, 403 | { 404 | name: 'search_templates', 405 | description: `Search templates by name/description keywords. Returns paginated results. NOT for node types! For nodes use list_node_templates.`, 406 | inputSchema: { 407 | type: 'object', 408 | properties: { 409 | query: { 410 | type: 'string', 411 | description: 'Search keyword as string. Example: "chatbot"', 412 | }, 413 | fields: { 414 | type: 'array', 415 | items: { 416 | type: 'string', 417 | enum: ['id', 'name', 'description', 'author', 'nodes', 'views', 'created', 'url', 'metadata'], 418 | }, 419 | description: 'Fields to include in response. Default: all fields. Example: ["id", "name"] for minimal response.', 420 | }, 421 | limit: { 422 | type: 'number', 423 | description: 'Maximum number of results. Default 20.', 424 | default: 20, 425 | minimum: 1, 426 | maximum: 100, 427 | }, 428 | offset: { 429 | type: 'number', 430 | description: 'Pagination offset. Default 0.', 431 | default: 0, 432 | minimum: 0, 433 | }, 434 | }, 435 | required: ['query'], 436 | }, 437 | }, 438 | { 439 | name: 'get_templates_for_task', 440 | description: `Curated templates by task. Returns paginated results sorted by popularity.`, 441 | inputSchema: { 442 | type: 'object', 443 | properties: { 444 | task: { 445 | type: 'string', 446 | enum: [ 447 | 'ai_automation', 448 | 'data_sync', 449 | 'webhook_processing', 450 | 'email_automation', 451 | 'slack_integration', 452 | 'data_transformation', 453 | 'file_processing', 454 | 'scheduling', 455 | 'api_integration', 456 | 'database_operations' 457 | ], 458 | description: 'The type of task to get templates for', 459 | }, 460 | limit: { 461 | type: 'number', 462 | description: 'Maximum number of results. Default 10.', 463 | default: 10, 464 | minimum: 1, 465 | maximum: 100, 466 | }, 467 | offset: { 468 | type: 'number', 469 | description: 'Pagination offset. Default 0.', 470 | default: 0, 471 | minimum: 0, 472 | }, 473 | }, 474 | required: ['task'], 475 | }, 476 | }, 477 | { 478 | name: 'search_templates_by_metadata', 479 | description: `Search templates by AI-generated metadata. Filter by category, complexity, setup time, services, or audience. Returns rich metadata for smart template discovery.`, 480 | inputSchema: { 481 | type: 'object', 482 | properties: { 483 | category: { 484 | type: 'string', 485 | description: 'Filter by category (e.g., "automation", "integration", "data processing")', 486 | }, 487 | complexity: { 488 | type: 'string', 489 | enum: ['simple', 'medium', 'complex'], 490 | description: 'Filter by complexity level', 491 | }, 492 | maxSetupMinutes: { 493 | type: 'number', 494 | description: 'Maximum setup time in minutes', 495 | minimum: 5, 496 | maximum: 480, 497 | }, 498 | minSetupMinutes: { 499 | type: 'number', 500 | description: 'Minimum setup time in minutes', 501 | minimum: 5, 502 | maximum: 480, 503 | }, 504 | requiredService: { 505 | type: 'string', 506 | description: 'Filter by required service (e.g., "openai", "slack", "google")', 507 | }, 508 | targetAudience: { 509 | type: 'string', 510 | description: 'Filter by target audience (e.g., "developers", "marketers", "analysts")', 511 | }, 512 | limit: { 513 | type: 'number', 514 | description: 'Maximum number of results. Default 20.', 515 | default: 20, 516 | minimum: 1, 517 | maximum: 100, 518 | }, 519 | offset: { 520 | type: 'number', 521 | description: 'Pagination offset. Default 0.', 522 | default: 0, 523 | minimum: 0, 524 | }, 525 | }, 526 | additionalProperties: false, 527 | }, 528 | }, 529 | { 530 | name: 'validate_workflow', 531 | description: `Full workflow validation: structure, connections, expressions, AI tools. Returns errors/warnings/fixes. Essential before deploy.`, 532 | inputSchema: { 533 | type: 'object', 534 | properties: { 535 | workflow: { 536 | type: 'object', 537 | description: 'The complete workflow JSON to validate. Must include nodes array and connections object.', 538 | }, 539 | options: { 540 | type: 'object', 541 | properties: { 542 | validateNodes: { 543 | type: 'boolean', 544 | description: 'Validate individual node configurations. Default true.', 545 | default: true, 546 | }, 547 | validateConnections: { 548 | type: 'boolean', 549 | description: 'Validate node connections and flow. Default true.', 550 | default: true, 551 | }, 552 | validateExpressions: { 553 | type: 'boolean', 554 | description: 'Validate n8n expressions syntax and references. Default true.', 555 | default: true, 556 | }, 557 | profile: { 558 | type: 'string', 559 | enum: ['minimal', 'runtime', 'ai-friendly', 'strict'], 560 | description: 'Validation profile for node validation. Default "runtime".', 561 | default: 'runtime', 562 | }, 563 | }, 564 | description: 'Optional validation settings', 565 | }, 566 | }, 567 | required: ['workflow'], 568 | additionalProperties: false, 569 | }, 570 | outputSchema: { 571 | type: 'object', 572 | properties: { 573 | valid: { type: 'boolean' }, 574 | summary: { 575 | type: 'object', 576 | properties: { 577 | totalNodes: { type: 'number' }, 578 | enabledNodes: { type: 'number' }, 579 | triggerNodes: { type: 'number' }, 580 | validConnections: { type: 'number' }, 581 | invalidConnections: { type: 'number' }, 582 | expressionsValidated: { type: 'number' }, 583 | errorCount: { type: 'number' }, 584 | warningCount: { type: 'number' } 585 | } 586 | }, 587 | errors: { 588 | type: 'array', 589 | items: { 590 | type: 'object', 591 | properties: { 592 | node: { type: 'string' }, 593 | message: { type: 'string' }, 594 | details: { type: 'string' } 595 | } 596 | } 597 | }, 598 | warnings: { 599 | type: 'array', 600 | items: { 601 | type: 'object', 602 | properties: { 603 | node: { type: 'string' }, 604 | message: { type: 'string' }, 605 | details: { type: 'string' } 606 | } 607 | } 608 | }, 609 | suggestions: { type: 'array', items: { type: 'string' } } 610 | }, 611 | required: ['valid', 'summary'] 612 | }, 613 | }, 614 | { 615 | name: 'validate_workflow_connections', 616 | description: `Check workflow connections only: valid nodes, no cycles, proper triggers, AI tool links. Fast structure validation.`, 617 | inputSchema: { 618 | type: 'object', 619 | properties: { 620 | workflow: { 621 | type: 'object', 622 | description: 'The workflow JSON with nodes array and connections object.', 623 | }, 624 | }, 625 | required: ['workflow'], 626 | additionalProperties: false, 627 | }, 628 | outputSchema: { 629 | type: 'object', 630 | properties: { 631 | valid: { type: 'boolean' }, 632 | statistics: { 633 | type: 'object', 634 | properties: { 635 | totalNodes: { type: 'number' }, 636 | triggerNodes: { type: 'number' }, 637 | validConnections: { type: 'number' }, 638 | invalidConnections: { type: 'number' } 639 | } 640 | }, 641 | errors: { 642 | type: 'array', 643 | items: { 644 | type: 'object', 645 | properties: { 646 | node: { type: 'string' }, 647 | message: { type: 'string' } 648 | } 649 | } 650 | }, 651 | warnings: { 652 | type: 'array', 653 | items: { 654 | type: 'object', 655 | properties: { 656 | node: { type: 'string' }, 657 | message: { type: 'string' } 658 | } 659 | } 660 | } 661 | }, 662 | required: ['valid', 'statistics'] 663 | }, 664 | }, 665 | { 666 | name: 'validate_workflow_expressions', 667 | description: `Validate n8n expressions: syntax {{}}, variables ($json/$node), references. Returns errors with locations.`, 668 | inputSchema: { 669 | type: 'object', 670 | properties: { 671 | workflow: { 672 | type: 'object', 673 | description: 'The workflow JSON to check for expression errors.', 674 | }, 675 | }, 676 | required: ['workflow'], 677 | additionalProperties: false, 678 | }, 679 | outputSchema: { 680 | type: 'object', 681 | properties: { 682 | valid: { type: 'boolean' }, 683 | statistics: { 684 | type: 'object', 685 | properties: { 686 | totalNodes: { type: 'number' }, 687 | expressionsValidated: { type: 'number' } 688 | } 689 | }, 690 | errors: { 691 | type: 'array', 692 | items: { 693 | type: 'object', 694 | properties: { 695 | node: { type: 'string' }, 696 | message: { type: 'string' } 697 | } 698 | } 699 | }, 700 | warnings: { 701 | type: 'array', 702 | items: { 703 | type: 'object', 704 | properties: { 705 | node: { type: 'string' }, 706 | message: { type: 'string' } 707 | } 708 | } 709 | }, 710 | tips: { type: 'array', items: { type: 'string' } } 711 | }, 712 | required: ['valid', 'statistics'] 713 | }, 714 | }, 715 | ]; 716 | 717 | /** 718 | * QUICK REFERENCE for AI Agents: 719 | * 720 | * 1. RECOMMENDED WORKFLOW: 721 | * - Start: search_nodes → get_node_essentials → get_node_for_task → validate_node_operation 722 | * - Discovery: list_nodes({category:"trigger"}) for browsing categories 723 | * - Quick Config: get_node_essentials("nodes-base.httpRequest") - only essential properties 724 | * - Full Details: get_node_info only when essentials aren't enough 725 | * - Validation: Use validate_node_operation for complex nodes (Slack, Google Sheets, etc.) 726 | * 727 | * 2. COMMON NODE TYPES: 728 | * Triggers: webhook, schedule, emailReadImap, slackTrigger 729 | * Core: httpRequest, code, set, if, merge, splitInBatches 730 | * Integrations: slack, gmail, googleSheets, postgres, mongodb 731 | * AI: agent, openAi, chainLlm, documentLoader 732 | * 733 | * 3. SEARCH TIPS: 734 | * - search_nodes returns ANY word match (OR logic) 735 | * - Single words more precise, multiple words broader 736 | * - If no results: use list_nodes with category filter 737 | * 738 | * 4. TEMPLATE SEARCHING: 739 | * - search_templates("slack") searches template names/descriptions, NOT node types! 740 | * - To find templates using Slack node: list_node_templates(["n8n-nodes-base.slack"]) 741 | * - For task-based templates: get_templates_for_task("slack_integration") 742 | * - 399 templates available from the last year 743 | * 744 | * 5. KNOWN ISSUES: 745 | * - Some nodes have duplicate properties with different conditions 746 | * - Package names: use 'n8n-nodes-base' not '@n8n/n8n-nodes-base' 747 | * - Check showWhen/hideWhen to identify the right property variant 748 | * 749 | * 6. PERFORMANCE: 750 | * - get_node_essentials: Fast (<5KB) 751 | * - get_node_info: Slow (100KB+) - use sparingly 752 | * - search_nodes/list_nodes: Fast, cached 753 | */ ``` -------------------------------------------------------------------------------- /tests/integration/telemetry/mcp-telemetry.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest'; 2 | import { N8NDocumentationMCPServer } from '../../../src/mcp/server'; 3 | import { telemetry } from '../../../src/telemetry/telemetry-manager'; 4 | import { TelemetryConfigManager } from '../../../src/telemetry/config-manager'; 5 | import { CallToolRequest, ListToolsRequest } from '@modelcontextprotocol/sdk/types.js'; 6 | 7 | // Mock dependencies 8 | vi.mock('../../../src/utils/logger', () => ({ 9 | Logger: vi.fn().mockImplementation(() => ({ 10 | debug: vi.fn(), 11 | info: vi.fn(), 12 | warn: vi.fn(), 13 | error: vi.fn(), 14 | })), 15 | logger: { 16 | debug: vi.fn(), 17 | info: vi.fn(), 18 | warn: vi.fn(), 19 | error: vi.fn(), 20 | } 21 | })); 22 | 23 | vi.mock('../../../src/telemetry/telemetry-manager', () => ({ 24 | telemetry: { 25 | trackSessionStart: vi.fn(), 26 | trackToolUsage: vi.fn(), 27 | trackToolSequence: vi.fn(), 28 | trackError: vi.fn(), 29 | trackSearchQuery: vi.fn(), 30 | trackValidationDetails: vi.fn(), 31 | trackWorkflowCreation: vi.fn(), 32 | trackPerformanceMetric: vi.fn(), 33 | getMetrics: vi.fn().mockReturnValue({ 34 | status: 'enabled', 35 | initialized: true, 36 | tracking: { eventQueueSize: 0 }, 37 | processing: { eventsTracked: 0 }, 38 | errors: { totalErrors: 0 } 39 | }) 40 | } 41 | })); 42 | 43 | vi.mock('../../../src/telemetry/config-manager'); 44 | 45 | // Mock database and other dependencies 46 | vi.mock('../../../src/database/node-repository'); 47 | vi.mock('../../../src/services/enhanced-config-validator'); 48 | vi.mock('../../../src/services/expression-validator'); 49 | vi.mock('../../../src/services/workflow-validator'); 50 | 51 | // TODO: This test needs to be refactored. It's currently mocking everything 52 | // which defeats the purpose of an integration test. It should either: 53 | // 1. Be moved to unit tests if we want to test with mocks 54 | // 2. Be rewritten as a proper integration test without mocks 55 | // Skipping for now to unblock CI - the telemetry functionality is tested 56 | // properly in the unit tests at tests/unit/telemetry/ 57 | describe.skip('MCP Telemetry Integration', () => { 58 | let mcpServer: N8NDocumentationMCPServer; 59 | let mockTelemetryConfig: any; 60 | 61 | beforeEach(() => { 62 | // Mock TelemetryConfigManager 63 | mockTelemetryConfig = { 64 | isEnabled: vi.fn().mockReturnValue(true), 65 | getUserId: vi.fn().mockReturnValue('test-user-123'), 66 | disable: vi.fn(), 67 | enable: vi.fn(), 68 | getStatus: vi.fn().mockReturnValue('enabled') 69 | }; 70 | vi.mocked(TelemetryConfigManager.getInstance).mockReturnValue(mockTelemetryConfig); 71 | 72 | // Mock database repository 73 | const mockNodeRepository = { 74 | searchNodes: vi.fn().mockResolvedValue({ results: [], totalResults: 0 }), 75 | getNodeInfo: vi.fn().mockResolvedValue(null), 76 | getAllNodes: vi.fn().mockResolvedValue([]), 77 | close: vi.fn() 78 | }; 79 | vi.doMock('../../../src/database/node-repository', () => ({ 80 | NodeRepository: vi.fn().mockImplementation(() => mockNodeRepository) 81 | })); 82 | 83 | // Create a mock server instance to avoid initialization issues 84 | const mockServer = { 85 | requestHandlers: new Map(), 86 | notificationHandlers: new Map(), 87 | setRequestHandler: vi.fn((method: string, handler: any) => { 88 | mockServer.requestHandlers.set(method, handler); 89 | }), 90 | setNotificationHandler: vi.fn((method: string, handler: any) => { 91 | mockServer.notificationHandlers.set(method, handler); 92 | }) 93 | }; 94 | 95 | // Set up basic handlers 96 | mockServer.requestHandlers.set('initialize', async () => { 97 | telemetry.trackSessionStart(); 98 | return { protocolVersion: '2024-11-05' }; 99 | }); 100 | 101 | mockServer.requestHandlers.set('tools/call', async (params: any) => { 102 | // Use the actual tool name from the request 103 | const toolName = params?.name || 'unknown-tool'; 104 | 105 | try { 106 | // Call executeTool if it's been mocked 107 | if ((mcpServer as any).executeTool) { 108 | const result = await (mcpServer as any).executeTool(params); 109 | 110 | // Track specific telemetry based on tool type 111 | if (toolName === 'search_nodes') { 112 | const query = params?.arguments?.query || ''; 113 | const totalResults = result?.totalResults || 0; 114 | const mode = params?.arguments?.mode || 'OR'; 115 | telemetry.trackSearchQuery(query, totalResults, mode); 116 | } else if (toolName === 'validate_workflow') { 117 | const workflow = params?.arguments?.workflow || {}; 118 | const validationPassed = result?.isValid !== false; 119 | telemetry.trackWorkflowCreation(workflow, validationPassed); 120 | if (!validationPassed && result?.errors) { 121 | result.errors.forEach((error: any) => { 122 | telemetry.trackValidationDetails(error.nodeType || 'unknown', error.type || 'validation_error', error); 123 | }); 124 | } 125 | } else if (toolName === 'validate_node_operation' || toolName === 'validate_node_minimal') { 126 | const nodeType = params?.arguments?.nodeType || 'unknown'; 127 | const errorType = result?.errors?.[0]?.type || 'validation_error'; 128 | telemetry.trackValidationDetails(nodeType, errorType, result); 129 | } 130 | 131 | // Simulate a duration for tool execution 132 | const duration = params?.duration || Math.random() * 100; 133 | telemetry.trackToolUsage(toolName, true, duration); 134 | return { content: [{ type: 'text', text: JSON.stringify(result) }] }; 135 | } else { 136 | // Default behavior if executeTool is not mocked 137 | telemetry.trackToolUsage(toolName, true); 138 | return { content: [{ type: 'text', text: 'Success' }] }; 139 | } 140 | } catch (error: any) { 141 | telemetry.trackToolUsage(toolName, false); 142 | telemetry.trackError( 143 | error.constructor.name, 144 | error.message, 145 | toolName, 146 | error.message 147 | ); 148 | throw error; 149 | } 150 | }); 151 | 152 | // Mock the N8NDocumentationMCPServer to have the server property 153 | mcpServer = { 154 | server: mockServer, 155 | handleTool: vi.fn().mockResolvedValue({ content: [{ type: 'text', text: 'Success' }] }), 156 | executeTool: vi.fn().mockResolvedValue({ 157 | results: [{ nodeType: 'nodes-base.webhook' }], 158 | totalResults: 1 159 | }), 160 | close: vi.fn() 161 | } as any; 162 | 163 | vi.clearAllMocks(); 164 | }); 165 | 166 | afterEach(() => { 167 | vi.clearAllMocks(); 168 | }); 169 | 170 | describe('Session tracking', () => { 171 | it('should track session start on MCP initialize', async () => { 172 | const initializeRequest = { 173 | method: 'initialize' as const, 174 | params: { 175 | protocolVersion: '2024-11-05', 176 | clientInfo: { 177 | name: 'test-client', 178 | version: '1.0.0' 179 | }, 180 | capabilities: {} 181 | } 182 | }; 183 | 184 | // Access the private server instance for testing 185 | const server = (mcpServer as any).server; 186 | const initializeHandler = server.requestHandlers.get('initialize'); 187 | 188 | if (initializeHandler) { 189 | await initializeHandler(initializeRequest.params); 190 | } 191 | 192 | expect(telemetry.trackSessionStart).toHaveBeenCalledTimes(1); 193 | }); 194 | }); 195 | 196 | describe('Tool usage tracking', () => { 197 | it('should track successful tool execution', async () => { 198 | const callToolRequest: CallToolRequest = { 199 | method: 'tools/call', 200 | params: { 201 | name: 'search_nodes', 202 | arguments: { query: 'webhook' } 203 | } 204 | }; 205 | 206 | // Mock the executeTool method to return a successful result 207 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 208 | results: [{ nodeType: 'nodes-base.webhook' }], 209 | totalResults: 1 210 | }); 211 | 212 | const server = (mcpServer as any).server; 213 | const callToolHandler = server.requestHandlers.get('tools/call'); 214 | 215 | if (callToolHandler) { 216 | await callToolHandler(callToolRequest.params); 217 | } 218 | 219 | expect(telemetry.trackToolUsage).toHaveBeenCalledWith( 220 | 'search_nodes', 221 | true, 222 | expect.any(Number) 223 | ); 224 | }); 225 | 226 | it('should track failed tool execution', async () => { 227 | const callToolRequest: CallToolRequest = { 228 | method: 'tools/call', 229 | params: { 230 | name: 'get_node_info', 231 | arguments: { nodeType: 'invalid-node' } 232 | } 233 | }; 234 | 235 | // Mock the executeTool method to throw an error 236 | const error = new Error('Node not found'); 237 | vi.spyOn(mcpServer as any, 'executeTool').mockRejectedValue(error); 238 | 239 | const server = (mcpServer as any).server; 240 | const callToolHandler = server.requestHandlers.get('tools/call'); 241 | 242 | if (callToolHandler) { 243 | try { 244 | await callToolHandler(callToolRequest.params); 245 | } catch (e) { 246 | // Expected to throw 247 | } 248 | } 249 | 250 | expect(telemetry.trackToolUsage).toHaveBeenCalledWith('get_node_info', false); 251 | expect(telemetry.trackError).toHaveBeenCalledWith( 252 | 'Error', 253 | 'Node not found', 254 | 'get_node_info' 255 | ); 256 | }); 257 | 258 | it('should track tool sequences', async () => { 259 | // Set up previous tool state 260 | (mcpServer as any).previousTool = 'search_nodes'; 261 | (mcpServer as any).previousToolTimestamp = Date.now() - 5000; 262 | 263 | const callToolRequest: CallToolRequest = { 264 | method: 'tools/call', 265 | params: { 266 | name: 'get_node_info', 267 | arguments: { nodeType: 'nodes-base.webhook' } 268 | } 269 | }; 270 | 271 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 272 | nodeType: 'nodes-base.webhook', 273 | displayName: 'Webhook' 274 | }); 275 | 276 | const server = (mcpServer as any).server; 277 | const callToolHandler = server.requestHandlers.get('tools/call'); 278 | 279 | if (callToolHandler) { 280 | await callToolHandler(callToolRequest.params); 281 | } 282 | 283 | expect(telemetry.trackToolSequence).toHaveBeenCalledWith( 284 | 'search_nodes', 285 | 'get_node_info', 286 | expect.any(Number) 287 | ); 288 | }); 289 | }); 290 | 291 | describe('Search query tracking', () => { 292 | it('should track search queries with results', async () => { 293 | const searchRequest: CallToolRequest = { 294 | method: 'tools/call', 295 | params: { 296 | name: 'search_nodes', 297 | arguments: { query: 'webhook', mode: 'OR' } 298 | } 299 | }; 300 | 301 | // Mock search results 302 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 303 | results: [ 304 | { nodeType: 'nodes-base.webhook', score: 0.95 }, 305 | { nodeType: 'nodes-base.httpRequest', score: 0.8 } 306 | ], 307 | totalResults: 2 308 | }); 309 | 310 | const server = (mcpServer as any).server; 311 | const callToolHandler = server.requestHandlers.get('tools/call'); 312 | 313 | if (callToolHandler) { 314 | await callToolHandler(searchRequest.params); 315 | } 316 | 317 | expect(telemetry.trackSearchQuery).toHaveBeenCalledWith('webhook', 2, 'OR'); 318 | }); 319 | 320 | it('should track zero-result searches', async () => { 321 | const zeroResultRequest: CallToolRequest = { 322 | method: 'tools/call', 323 | params: { 324 | name: 'search_nodes', 325 | arguments: { query: 'nonexistent', mode: 'AND' } 326 | } 327 | }; 328 | 329 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 330 | results: [], 331 | totalResults: 0 332 | }); 333 | 334 | const server = (mcpServer as any).server; 335 | const callToolHandler = server.requestHandlers.get('tools/call'); 336 | 337 | if (callToolHandler) { 338 | await callToolHandler(zeroResultRequest.params); 339 | } 340 | 341 | expect(telemetry.trackSearchQuery).toHaveBeenCalledWith('nonexistent', 0, 'AND'); 342 | }); 343 | 344 | it('should track fallback search queries', async () => { 345 | const fallbackRequest: CallToolRequest = { 346 | method: 'tools/call', 347 | params: { 348 | name: 'search_nodes', 349 | arguments: { query: 'partial-match', mode: 'OR' } 350 | } 351 | }; 352 | 353 | // Mock main search with no results, triggering fallback 354 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 355 | results: [{ nodeType: 'nodes-base.webhook', score: 0.6 }], 356 | totalResults: 1, 357 | usedFallback: true 358 | }); 359 | 360 | const server = (mcpServer as any).server; 361 | const callToolHandler = server.requestHandlers.get('tools/call'); 362 | 363 | if (callToolHandler) { 364 | await callToolHandler(fallbackRequest.params); 365 | } 366 | 367 | // Should track both main query and fallback 368 | expect(telemetry.trackSearchQuery).toHaveBeenCalledWith('partial-match', 0, 'OR'); 369 | expect(telemetry.trackSearchQuery).toHaveBeenCalledWith('partial-match', 1, 'OR_LIKE_FALLBACK'); 370 | }); 371 | }); 372 | 373 | describe('Workflow validation tracking', () => { 374 | it('should track successful workflow creation', async () => { 375 | const workflow = { 376 | nodes: [ 377 | { id: '1', type: 'webhook', name: 'Webhook' }, 378 | { id: '2', type: 'httpRequest', name: 'HTTP Request' } 379 | ], 380 | connections: { 381 | '1': { main: [[{ node: '2', type: 'main', index: 0 }]] } 382 | } 383 | }; 384 | 385 | const validateRequest: CallToolRequest = { 386 | method: 'tools/call', 387 | params: { 388 | name: 'validate_workflow', 389 | arguments: { workflow } 390 | } 391 | }; 392 | 393 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 394 | isValid: true, 395 | errors: [], 396 | warnings: [], 397 | summary: { totalIssues: 0, criticalIssues: 0 } 398 | }); 399 | 400 | const server = (mcpServer as any).server; 401 | const callToolHandler = server.requestHandlers.get('tools/call'); 402 | 403 | if (callToolHandler) { 404 | await callToolHandler(validateRequest.params); 405 | } 406 | 407 | expect(telemetry.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true); 408 | }); 409 | 410 | it('should track validation details for failed workflows', async () => { 411 | const workflow = { 412 | nodes: [ 413 | { id: '1', type: 'invalid-node', name: 'Invalid Node' } 414 | ], 415 | connections: {} 416 | }; 417 | 418 | const validateRequest: CallToolRequest = { 419 | method: 'tools/call', 420 | params: { 421 | name: 'validate_workflow', 422 | arguments: { workflow } 423 | } 424 | }; 425 | 426 | const validationResult = { 427 | isValid: false, 428 | errors: [ 429 | { 430 | nodeId: '1', 431 | nodeType: 'invalid-node', 432 | category: 'node_validation', 433 | severity: 'error', 434 | message: 'Unknown node type', 435 | details: { type: 'unknown_node_type' } 436 | } 437 | ], 438 | warnings: [], 439 | summary: { totalIssues: 1, criticalIssues: 1 } 440 | }; 441 | 442 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue(validationResult); 443 | 444 | const server = (mcpServer as any).server; 445 | const callToolHandler = server.requestHandlers.get('tools/call'); 446 | 447 | if (callToolHandler) { 448 | await callToolHandler(validateRequest.params); 449 | } 450 | 451 | expect(telemetry.trackValidationDetails).toHaveBeenCalledWith( 452 | 'invalid-node', 453 | 'unknown_node_type', 454 | expect.objectContaining({ 455 | category: 'node_validation', 456 | severity: 'error' 457 | }) 458 | ); 459 | }); 460 | }); 461 | 462 | describe('Node configuration tracking', () => { 463 | it('should track node configuration validation', async () => { 464 | const validateNodeRequest: CallToolRequest = { 465 | method: 'tools/call', 466 | params: { 467 | name: 'validate_node_operation', 468 | arguments: { 469 | nodeType: 'nodes-base.httpRequest', 470 | config: { url: 'https://api.example.com', method: 'GET' } 471 | } 472 | } 473 | }; 474 | 475 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 476 | isValid: true, 477 | errors: [], 478 | warnings: [], 479 | nodeConfig: { url: 'https://api.example.com', method: 'GET' } 480 | }); 481 | 482 | const server = (mcpServer as any).server; 483 | const callToolHandler = server.requestHandlers.get('tools/call'); 484 | 485 | if (callToolHandler) { 486 | await callToolHandler(validateNodeRequest.params); 487 | } 488 | 489 | // Should track the validation attempt 490 | expect(telemetry.trackToolUsage).toHaveBeenCalledWith( 491 | 'validate_node_operation', 492 | true, 493 | expect.any(Number) 494 | ); 495 | }); 496 | }); 497 | 498 | describe('Performance metric tracking', () => { 499 | it('should track slow tool executions', async () => { 500 | const slowToolRequest: CallToolRequest = { 501 | method: 'tools/call', 502 | params: { 503 | name: 'list_nodes', 504 | arguments: { limit: 1000 } 505 | } 506 | }; 507 | 508 | // Mock a slow operation 509 | vi.spyOn(mcpServer as any, 'executeTool').mockImplementation(async () => { 510 | await new Promise(resolve => setTimeout(resolve, 2000)); // 2 second delay 511 | return { nodes: [], totalCount: 0 }; 512 | }); 513 | 514 | const server = (mcpServer as any).server; 515 | const callToolHandler = server.requestHandlers.get('tools/call'); 516 | 517 | if (callToolHandler) { 518 | await callToolHandler(slowToolRequest.params); 519 | } 520 | 521 | expect(telemetry.trackToolUsage).toHaveBeenCalledWith( 522 | 'list_nodes', 523 | true, 524 | expect.any(Number) 525 | ); 526 | 527 | // Verify duration is tracked (should be around 2000ms) 528 | const trackUsageCall = vi.mocked(telemetry.trackToolUsage).mock.calls[0]; 529 | expect(trackUsageCall[2]).toBeGreaterThan(1500); // Allow some variance 530 | }); 531 | }); 532 | 533 | describe('Tool listing and capabilities', () => { 534 | it('should handle tool listing without telemetry interference', async () => { 535 | const listToolsRequest: ListToolsRequest = { 536 | method: 'tools/list', 537 | params: {} 538 | }; 539 | 540 | const server = (mcpServer as any).server; 541 | const listToolsHandler = server.requestHandlers.get('tools/list'); 542 | 543 | if (listToolsHandler) { 544 | const result = await listToolsHandler(listToolsRequest.params); 545 | expect(result).toHaveProperty('tools'); 546 | expect(Array.isArray(result.tools)).toBe(true); 547 | } 548 | 549 | // Tool listing shouldn't generate telemetry events 550 | expect(telemetry.trackToolUsage).not.toHaveBeenCalled(); 551 | }); 552 | }); 553 | 554 | describe('Error handling and telemetry', () => { 555 | it('should track errors without breaking MCP protocol', async () => { 556 | const errorRequest: CallToolRequest = { 557 | method: 'tools/call', 558 | params: { 559 | name: 'nonexistent_tool', 560 | arguments: {} 561 | } 562 | }; 563 | 564 | const server = (mcpServer as any).server; 565 | const callToolHandler = server.requestHandlers.get('tools/call'); 566 | 567 | if (callToolHandler) { 568 | try { 569 | await callToolHandler(errorRequest.params); 570 | } catch (error) { 571 | // Error should be handled by MCP server 572 | expect(error).toBeDefined(); 573 | } 574 | } 575 | 576 | // Should track error without throwing 577 | expect(telemetry.trackError).toHaveBeenCalled(); 578 | }); 579 | 580 | it('should handle telemetry errors gracefully', async () => { 581 | // Mock telemetry to throw an error 582 | vi.mocked(telemetry.trackToolUsage).mockImplementation(() => { 583 | throw new Error('Telemetry service unavailable'); 584 | }); 585 | 586 | const callToolRequest: CallToolRequest = { 587 | method: 'tools/call', 588 | params: { 589 | name: 'search_nodes', 590 | arguments: { query: 'webhook' } 591 | } 592 | }; 593 | 594 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 595 | results: [], 596 | totalResults: 0 597 | }); 598 | 599 | const server = (mcpServer as any).server; 600 | const callToolHandler = server.requestHandlers.get('tools/call'); 601 | 602 | // Should not throw even if telemetry fails 603 | if (callToolHandler) { 604 | await expect(callToolHandler(callToolRequest.params)).resolves.toBeDefined(); 605 | } 606 | }); 607 | }); 608 | 609 | describe('Telemetry configuration integration', () => { 610 | it('should respect telemetry disabled state', async () => { 611 | mockTelemetryConfig.isEnabled.mockReturnValue(false); 612 | 613 | const callToolRequest: CallToolRequest = { 614 | method: 'tools/call', 615 | params: { 616 | name: 'search_nodes', 617 | arguments: { query: 'webhook' } 618 | } 619 | }; 620 | 621 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 622 | results: [], 623 | totalResults: 0 624 | }); 625 | 626 | const server = (mcpServer as any).server; 627 | const callToolHandler = server.requestHandlers.get('tools/call'); 628 | 629 | if (callToolHandler) { 630 | await callToolHandler(callToolRequest.params); 631 | } 632 | 633 | // Should still track if telemetry manager handles disabled state 634 | // The actual filtering happens in telemetry manager, not MCP server 635 | expect(telemetry.trackToolUsage).toHaveBeenCalled(); 636 | }); 637 | }); 638 | 639 | describe('Complex workflow scenarios', () => { 640 | it('should track comprehensive workflow validation scenario', async () => { 641 | const complexWorkflow = { 642 | nodes: [ 643 | { id: '1', type: 'webhook', name: 'Webhook Trigger' }, 644 | { id: '2', type: 'httpRequest', name: 'API Call', parameters: { url: 'https://api.example.com' } }, 645 | { id: '3', type: 'set', name: 'Transform Data' }, 646 | { id: '4', type: 'if', name: 'Conditional Logic' }, 647 | { id: '5', type: 'slack', name: 'Send Notification' } 648 | ], 649 | connections: { 650 | '1': { main: [[{ node: '2', type: 'main', index: 0 }]] }, 651 | '2': { main: [[{ node: '3', type: 'main', index: 0 }]] }, 652 | '3': { main: [[{ node: '4', type: 'main', index: 0 }]] }, 653 | '4': { main: [[{ node: '5', type: 'main', index: 0 }]] } 654 | } 655 | }; 656 | 657 | const validateRequest: CallToolRequest = { 658 | method: 'tools/call', 659 | params: { 660 | name: 'validate_workflow', 661 | arguments: { workflow: complexWorkflow } 662 | } 663 | }; 664 | 665 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 666 | isValid: true, 667 | errors: [], 668 | warnings: [ 669 | { 670 | nodeId: '2', 671 | nodeType: 'httpRequest', 672 | category: 'configuration', 673 | severity: 'warning', 674 | message: 'Consider adding error handling' 675 | } 676 | ], 677 | summary: { totalIssues: 1, criticalIssues: 0 } 678 | }); 679 | 680 | const server = (mcpServer as any).server; 681 | const callToolHandler = server.requestHandlers.get('tools/call'); 682 | 683 | if (callToolHandler) { 684 | await callToolHandler(validateRequest.params); 685 | } 686 | 687 | expect(telemetry.trackWorkflowCreation).toHaveBeenCalledWith(complexWorkflow, true); 688 | expect(telemetry.trackToolUsage).toHaveBeenCalledWith( 689 | 'validate_workflow', 690 | true, 691 | expect.any(Number) 692 | ); 693 | }); 694 | }); 695 | 696 | describe('MCP server lifecycle and telemetry', () => { 697 | it('should handle server initialization with telemetry', async () => { 698 | // Set up minimal environment for server creation 699 | process.env.NODE_DB_PATH = ':memory:'; 700 | 701 | // Verify that server creation doesn't interfere with telemetry 702 | const newServer = {} as N8NDocumentationMCPServer; // Mock instance 703 | expect(newServer).toBeDefined(); 704 | 705 | // Telemetry should still be functional 706 | expect(telemetry.getMetrics).toBeDefined(); 707 | expect(typeof telemetry.trackToolUsage).toBe('function'); 708 | }); 709 | 710 | it('should handle concurrent tool executions with telemetry', async () => { 711 | const requests = [ 712 | { 713 | method: 'tools/call' as const, 714 | params: { 715 | name: 'search_nodes', 716 | arguments: { query: 'webhook' } 717 | } 718 | }, 719 | { 720 | method: 'tools/call' as const, 721 | params: { 722 | name: 'search_nodes', 723 | arguments: { query: 'http' } 724 | } 725 | }, 726 | { 727 | method: 'tools/call' as const, 728 | params: { 729 | name: 'search_nodes', 730 | arguments: { query: 'database' } 731 | } 732 | } 733 | ]; 734 | 735 | vi.spyOn(mcpServer as any, 'executeTool').mockResolvedValue({ 736 | results: [{ nodeType: 'test-node' }], 737 | totalResults: 1 738 | }); 739 | 740 | const server = (mcpServer as any).server; 741 | const callToolHandler = server.requestHandlers.get('tools/call'); 742 | 743 | if (callToolHandler) { 744 | await Promise.all( 745 | requests.map(req => callToolHandler(req.params)) 746 | ); 747 | } 748 | 749 | // All three calls should be tracked 750 | expect(telemetry.trackToolUsage).toHaveBeenCalledTimes(3); 751 | expect(telemetry.trackSearchQuery).toHaveBeenCalledTimes(3); 752 | }); 753 | }); 754 | }); ``` -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- ```yaml 1 | name: Automated Release 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | paths: 7 | - 'package.json' 8 | - 'package.runtime.json' 9 | 10 | permissions: 11 | contents: write 12 | packages: write 13 | issues: write 14 | pull-requests: write 15 | 16 | # Prevent concurrent Docker pushes across all workflows (shared with docker-build.yml) 17 | # This ensures release.yml and docker-build.yml never push to 'latest' simultaneously 18 | concurrency: 19 | group: docker-push-${{ github.ref }} 20 | cancel-in-progress: false 21 | 22 | env: 23 | REGISTRY: ghcr.io 24 | IMAGE_NAME: ${{ github.repository }} 25 | 26 | jobs: 27 | detect-version-change: 28 | name: Detect Version Change 29 | runs-on: ubuntu-latest 30 | outputs: 31 | version-changed: ${{ steps.check.outputs.changed }} 32 | new-version: ${{ steps.check.outputs.version }} 33 | previous-version: ${{ steps.check.outputs.previous-version }} 34 | is-prerelease: ${{ steps.check.outputs.is-prerelease }} 35 | steps: 36 | - name: Checkout repository 37 | uses: actions/checkout@v4 38 | with: 39 | fetch-depth: 2 40 | 41 | - name: Check for version change 42 | id: check 43 | run: | 44 | # Get current version from package.json 45 | CURRENT_VERSION=$(node -e "console.log(require('./package.json').version)") 46 | 47 | # Get previous version from git history safely 48 | PREVIOUS_VERSION=$(git show HEAD~1:package.json 2>/dev/null | node -e " 49 | try { 50 | const data = require('fs').readFileSync(0, 'utf8'); 51 | const pkg = JSON.parse(data); 52 | console.log(pkg.version || '0.0.0'); 53 | } catch (e) { 54 | console.log('0.0.0'); 55 | } 56 | " || echo "0.0.0") 57 | 58 | echo "Previous version: $PREVIOUS_VERSION" 59 | echo "Current version: $CURRENT_VERSION" 60 | 61 | # Check if version changed 62 | if [ "$CURRENT_VERSION" != "$PREVIOUS_VERSION" ]; then 63 | echo "changed=true" >> $GITHUB_OUTPUT 64 | echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT 65 | echo "previous-version=$PREVIOUS_VERSION" >> $GITHUB_OUTPUT 66 | 67 | # Check if it's a prerelease (contains alpha, beta, rc, dev) 68 | if echo "$CURRENT_VERSION" | grep -E "(alpha|beta|rc|dev)" > /dev/null; then 69 | echo "is-prerelease=true" >> $GITHUB_OUTPUT 70 | else 71 | echo "is-prerelease=false" >> $GITHUB_OUTPUT 72 | fi 73 | 74 | echo "🎉 Version changed from $PREVIOUS_VERSION to $CURRENT_VERSION" 75 | else 76 | echo "changed=false" >> $GITHUB_OUTPUT 77 | echo "version=$CURRENT_VERSION" >> $GITHUB_OUTPUT 78 | echo "previous-version=$PREVIOUS_VERSION" >> $GITHUB_OUTPUT 79 | echo "is-prerelease=false" >> $GITHUB_OUTPUT 80 | echo "ℹ️ No version change detected" 81 | fi 82 | 83 | - name: Validate version against npm registry 84 | if: steps.check.outputs.changed == 'true' 85 | run: | 86 | CURRENT_VERSION="${{ steps.check.outputs.version }}" 87 | 88 | # Get latest version from npm (handle package not found) 89 | NPM_VERSION=$(npm view n8n-mcp version 2>/dev/null || echo "0.0.0") 90 | 91 | echo "Current version: $CURRENT_VERSION" 92 | echo "NPM registry version: $NPM_VERSION" 93 | 94 | # Check if version already exists in npm 95 | if [ "$CURRENT_VERSION" = "$NPM_VERSION" ]; then 96 | echo "❌ Error: Version $CURRENT_VERSION already published to npm" 97 | echo "Please bump the version in package.json before releasing" 98 | exit 1 99 | fi 100 | 101 | # Simple semver comparison (assumes format: major.minor.patch) 102 | # Compare if current version is greater than npm version 103 | if [ "$NPM_VERSION" != "0.0.0" ]; then 104 | # Sort versions and check if current is not the highest 105 | HIGHEST=$(printf '%s\n%s' "$NPM_VERSION" "$CURRENT_VERSION" | sort -V | tail -n1) 106 | if [ "$HIGHEST" != "$CURRENT_VERSION" ]; then 107 | echo "❌ Error: Version $CURRENT_VERSION is not greater than npm version $NPM_VERSION" 108 | echo "Please use a higher version number" 109 | exit 1 110 | fi 111 | fi 112 | 113 | echo "✅ Version $CURRENT_VERSION is valid (higher than npm version $NPM_VERSION)" 114 | 115 | extract-changelog: 116 | name: Extract Changelog 117 | runs-on: ubuntu-latest 118 | needs: detect-version-change 119 | if: needs.detect-version-change.outputs.version-changed == 'true' 120 | outputs: 121 | release-notes: ${{ steps.extract.outputs.notes }} 122 | has-notes: ${{ steps.extract.outputs.has-notes }} 123 | steps: 124 | - name: Checkout repository 125 | uses: actions/checkout@v4 126 | 127 | - name: Extract changelog for version 128 | id: extract 129 | run: | 130 | VERSION="${{ needs.detect-version-change.outputs.new-version }}" 131 | CHANGELOG_FILE="docs/CHANGELOG.md" 132 | 133 | if [ ! -f "$CHANGELOG_FILE" ]; then 134 | echo "Changelog file not found at $CHANGELOG_FILE" 135 | echo "has-notes=false" >> $GITHUB_OUTPUT 136 | echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT 137 | exit 0 138 | fi 139 | 140 | # Use the extracted changelog script 141 | if NOTES=$(node scripts/extract-changelog.js "$VERSION" "$CHANGELOG_FILE" 2>/dev/null); then 142 | echo "has-notes=true" >> $GITHUB_OUTPUT 143 | 144 | # Use heredoc to properly handle multiline content 145 | { 146 | echo "notes<<EOF" 147 | echo "$NOTES" 148 | echo "EOF" 149 | } >> $GITHUB_OUTPUT 150 | 151 | echo "✅ Successfully extracted changelog for version $VERSION" 152 | else 153 | echo "has-notes=false" >> $GITHUB_OUTPUT 154 | echo "notes=No changelog entries found for version $VERSION" >> $GITHUB_OUTPUT 155 | echo "⚠️ Could not extract changelog for version $VERSION" 156 | fi 157 | 158 | create-release: 159 | name: Create GitHub Release 160 | runs-on: ubuntu-latest 161 | needs: [detect-version-change, extract-changelog] 162 | if: needs.detect-version-change.outputs.version-changed == 'true' 163 | outputs: 164 | release-id: ${{ steps.create.outputs.id }} 165 | upload-url: ${{ steps.create.outputs.upload_url }} 166 | steps: 167 | - name: Checkout repository 168 | uses: actions/checkout@v4 169 | 170 | - name: Create Git Tag 171 | run: | 172 | VERSION="${{ needs.detect-version-change.outputs.new-version }}" 173 | git config user.name "github-actions[bot]" 174 | git config user.email "github-actions[bot]@users.noreply.github.com" 175 | 176 | # Create annotated tag 177 | git tag -a "v$VERSION" -m "Release v$VERSION" 178 | git push origin "v$VERSION" 179 | 180 | - name: Create GitHub Release 181 | id: create 182 | env: 183 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 184 | run: | 185 | VERSION="${{ needs.detect-version-change.outputs.new-version }}" 186 | IS_PRERELEASE="${{ needs.detect-version-change.outputs.is-prerelease }}" 187 | 188 | # Create release body 189 | cat > release_body.md << 'EOF' 190 | # Release v${{ needs.detect-version-change.outputs.new-version }} 191 | 192 | ${{ needs.extract-changelog.outputs.release-notes }} 193 | 194 | --- 195 | 196 | ## Installation 197 | 198 | ### NPM Package 199 | ```bash 200 | # Install globally 201 | npm install -g n8n-mcp 202 | 203 | # Or run directly 204 | npx n8n-mcp 205 | ``` 206 | 207 | ### Docker 208 | ```bash 209 | # Standard image 210 | docker run -p 3000:3000 ghcr.io/czlonkowski/n8n-mcp:v${{ needs.detect-version-change.outputs.new-version }} 211 | 212 | # Railway optimized 213 | docker run -p 3000:3000 ghcr.io/czlonkowski/n8n-mcp-railway:v${{ needs.detect-version-change.outputs.new-version }} 214 | ``` 215 | 216 | ## Documentation 217 | - [Installation Guide](https://github.com/czlonkowski/n8n-mcp#installation) 218 | - [Docker Deployment](https://github.com/czlonkowski/n8n-mcp/blob/main/docs/DOCKER_README.md) 219 | - [n8n Integration](https://github.com/czlonkowski/n8n-mcp/blob/main/docs/N8N_DEPLOYMENT.md) 220 | - [Complete Changelog](https://github.com/czlonkowski/n8n-mcp/blob/main/docs/CHANGELOG.md) 221 | 222 | 🤖 *Generated with [Claude Code](https://claude.ai/code)* 223 | EOF 224 | 225 | # Create release using gh CLI 226 | if [ "$IS_PRERELEASE" = "true" ]; then 227 | PRERELEASE_FLAG="--prerelease" 228 | else 229 | PRERELEASE_FLAG="" 230 | fi 231 | 232 | gh release create "v$VERSION" \ 233 | --title "Release v$VERSION" \ 234 | --notes-file release_body.md \ 235 | $PRERELEASE_FLAG 236 | 237 | # Output release info for next jobs 238 | RELEASE_ID=$(gh release view "v$VERSION" --json id --jq '.id') 239 | echo "id=$RELEASE_ID" >> $GITHUB_OUTPUT 240 | echo "upload_url=https://uploads.github.com/repos/${{ github.repository }}/releases/$RELEASE_ID/assets{?name,label}" >> $GITHUB_OUTPUT 241 | 242 | build-and-verify: 243 | name: Build and Verify 244 | runs-on: ubuntu-latest 245 | needs: detect-version-change 246 | if: needs.detect-version-change.outputs.version-changed == 'true' 247 | steps: 248 | - name: Checkout repository 249 | uses: actions/checkout@v4 250 | 251 | - name: Setup Node.js 252 | uses: actions/setup-node@v4 253 | with: 254 | node-version: 20 255 | cache: 'npm' 256 | 257 | - name: Install dependencies 258 | run: npm ci 259 | 260 | - name: Build project 261 | run: npm run build 262 | 263 | # Database is already built and committed during development 264 | # Rebuilding here causes segfault due to memory pressure (exit code 139) 265 | - name: Verify database exists 266 | run: | 267 | if [ ! -f "data/nodes.db" ]; then 268 | echo "❌ Error: data/nodes.db not found" 269 | echo "Please run 'npm run rebuild' locally and commit the database" 270 | exit 1 271 | fi 272 | echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))" 273 | 274 | # Skip tests - they already passed in PR before merge 275 | # Running them again on the same commit adds no safety, only time (~6-7 min) 276 | 277 | - name: Run type checking 278 | run: npm run typecheck 279 | 280 | publish-npm: 281 | name: Publish to NPM 282 | runs-on: ubuntu-latest 283 | needs: [detect-version-change, build-and-verify, create-release] 284 | if: needs.detect-version-change.outputs.version-changed == 'true' 285 | steps: 286 | - name: Checkout repository 287 | uses: actions/checkout@v4 288 | 289 | - name: Setup Node.js 290 | uses: actions/setup-node@v4 291 | with: 292 | node-version: 20 293 | cache: 'npm' 294 | registry-url: 'https://registry.npmjs.org' 295 | 296 | - name: Install dependencies 297 | run: npm ci 298 | 299 | - name: Build project 300 | run: npm run build 301 | 302 | # Database is already built and committed during development 303 | - name: Verify database exists 304 | run: | 305 | if [ ! -f "data/nodes.db" ]; then 306 | echo "❌ Error: data/nodes.db not found" 307 | exit 1 308 | fi 309 | echo "✅ Database exists ($(du -h data/nodes.db | cut -f1))" 310 | 311 | - name: Sync runtime version 312 | run: npm run sync:runtime-version 313 | 314 | - name: Prepare package for publishing 315 | run: | 316 | # Create publish directory 317 | PUBLISH_DIR="npm-publish-temp" 318 | rm -rf $PUBLISH_DIR 319 | mkdir -p $PUBLISH_DIR 320 | 321 | # Copy necessary files 322 | cp -r dist $PUBLISH_DIR/ 323 | cp -r data $PUBLISH_DIR/ 324 | cp README.md $PUBLISH_DIR/ 325 | cp LICENSE $PUBLISH_DIR/ 326 | cp .env.example $PUBLISH_DIR/ 327 | 328 | # Use runtime package.json as base 329 | cp package.runtime.json $PUBLISH_DIR/package.json 330 | 331 | cd $PUBLISH_DIR 332 | 333 | # Update package.json with complete metadata 334 | node -e " 335 | const pkg = require('./package.json'); 336 | pkg.name = 'n8n-mcp'; 337 | pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)'; 338 | pkg.main = 'dist/index.js'; 339 | pkg.types = 'dist/index.d.ts'; 340 | pkg.exports = { 341 | '.': { 342 | types: './dist/index.d.ts', 343 | require: './dist/index.js', 344 | import: './dist/index.js' 345 | } 346 | }; 347 | pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' }; 348 | pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' }; 349 | pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation']; 350 | pkg.author = 'Romuald Czlonkowski @ www.aiadvisors.pl/en'; 351 | pkg.license = 'MIT'; 352 | pkg.bugs = { url: 'https://github.com/czlonkowski/n8n-mcp/issues' }; 353 | pkg.homepage = 'https://github.com/czlonkowski/n8n-mcp#readme'; 354 | pkg.files = ['dist/**/*', 'data/nodes.db', '.env.example', 'README.md', 'LICENSE']; 355 | delete pkg.private; 356 | require('fs').writeFileSync('./package.json', JSON.stringify(pkg, null, 2)); 357 | " 358 | 359 | echo "Package prepared for publishing:" 360 | echo "Name: $(node -e "console.log(require('./package.json').name)")" 361 | echo "Version: $(node -e "console.log(require('./package.json').version)")" 362 | 363 | - name: Publish to NPM with retry 364 | uses: nick-invision/retry@v2 365 | with: 366 | timeout_minutes: 5 367 | max_attempts: 3 368 | command: | 369 | cd npm-publish-temp 370 | npm publish --access public 371 | env: 372 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} 373 | 374 | - name: Clean up 375 | if: always() 376 | run: rm -rf npm-publish-temp 377 | 378 | build-docker: 379 | name: Build and Push Docker Images 380 | runs-on: ubuntu-latest 381 | needs: [detect-version-change, build-and-verify] 382 | if: needs.detect-version-change.outputs.version-changed == 'true' 383 | permissions: 384 | contents: read 385 | packages: write 386 | steps: 387 | - name: Checkout repository 388 | uses: actions/checkout@v4 389 | with: 390 | lfs: true 391 | 392 | - name: Check disk space 393 | run: | 394 | echo "Disk usage before Docker build:" 395 | df -h 396 | 397 | # Check available space (require at least 2GB) 398 | AVAILABLE_GB=$(df / --output=avail --block-size=1G | tail -1) 399 | if [ "$AVAILABLE_GB" -lt 2 ]; then 400 | echo "❌ Insufficient disk space: ${AVAILABLE_GB}GB available, 2GB required" 401 | exit 1 402 | fi 403 | echo "✅ Sufficient disk space: ${AVAILABLE_GB}GB available" 404 | 405 | - name: Set up QEMU 406 | uses: docker/setup-qemu-action@v3 407 | 408 | - name: Set up Docker Buildx 409 | uses: docker/setup-buildx-action@v3 410 | 411 | - name: Log in to GitHub Container Registry 412 | uses: docker/login-action@v3 413 | with: 414 | registry: ${{ env.REGISTRY }} 415 | username: ${{ github.actor }} 416 | password: ${{ secrets.GITHUB_TOKEN }} 417 | 418 | - name: Extract metadata for standard image 419 | id: meta 420 | uses: docker/metadata-action@v5 421 | with: 422 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 423 | tags: | 424 | type=semver,pattern={{version}},value=v${{ needs.detect-version-change.outputs.new-version }} 425 | type=semver,pattern={{major}}.{{minor}},value=v${{ needs.detect-version-change.outputs.new-version }} 426 | type=semver,pattern={{major}},value=v${{ needs.detect-version-change.outputs.new-version }} 427 | type=raw,value=latest,enable={{is_default_branch}} 428 | 429 | - name: Build and push standard Docker image 430 | uses: docker/build-push-action@v5 431 | with: 432 | context: . 433 | platforms: linux/amd64,linux/arm64 434 | push: true 435 | tags: ${{ steps.meta.outputs.tags }} 436 | labels: ${{ steps.meta.outputs.labels }} 437 | cache-from: type=gha 438 | cache-to: type=gha,mode=max 439 | 440 | - name: Verify multi-arch manifest for latest tag 441 | run: | 442 | echo "Verifying multi-arch manifest for latest tag..." 443 | 444 | # Retry with exponential backoff (registry propagation can take time) 445 | MAX_ATTEMPTS=5 446 | ATTEMPT=1 447 | WAIT_TIME=2 448 | 449 | while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do 450 | echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..." 451 | 452 | MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest 2>&1 || true) 453 | 454 | # Check for both platforms 455 | if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then 456 | echo "✅ Multi-arch manifest verified: both amd64 and arm64 present" 457 | echo "$MANIFEST" 458 | exit 0 459 | fi 460 | 461 | if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then 462 | echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..." 463 | sleep $WAIT_TIME 464 | WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s 465 | fi 466 | 467 | ATTEMPT=$((ATTEMPT + 1)) 468 | done 469 | 470 | echo "❌ ERROR: Multi-arch manifest incomplete after $MAX_ATTEMPTS attempts!" 471 | echo "$MANIFEST" 472 | exit 1 473 | 474 | - name: Verify multi-arch manifest for version tag 475 | run: | 476 | VERSION="${{ needs.detect-version-change.outputs.new-version }}" 477 | echo "Verifying multi-arch manifest for version tag :$VERSION (without 'v' prefix)..." 478 | 479 | # Retry with exponential backoff (registry propagation can take time) 480 | MAX_ATTEMPTS=5 481 | ATTEMPT=1 482 | WAIT_TIME=2 483 | 484 | while [ $ATTEMPT -le $MAX_ATTEMPTS ]; do 485 | echo "Attempt $ATTEMPT of $MAX_ATTEMPTS..." 486 | 487 | MANIFEST=$(docker buildx imagetools inspect ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:$VERSION 2>&1 || true) 488 | 489 | # Check for both platforms 490 | if echo "$MANIFEST" | grep -q "linux/amd64" && echo "$MANIFEST" | grep -q "linux/arm64"; then 491 | echo "✅ Multi-arch manifest verified for $VERSION: both amd64 and arm64 present" 492 | echo "$MANIFEST" 493 | exit 0 494 | fi 495 | 496 | if [ $ATTEMPT -lt $MAX_ATTEMPTS ]; then 497 | echo "⏳ Registry still propagating, waiting ${WAIT_TIME}s before retry..." 498 | sleep $WAIT_TIME 499 | WAIT_TIME=$((WAIT_TIME * 2)) # Exponential backoff: 2s, 4s, 8s, 16s 500 | fi 501 | 502 | ATTEMPT=$((ATTEMPT + 1)) 503 | done 504 | 505 | echo "❌ ERROR: Multi-arch manifest incomplete for version $VERSION after $MAX_ATTEMPTS attempts!" 506 | echo "$MANIFEST" 507 | exit 1 508 | 509 | - name: Extract metadata for Railway image 510 | id: meta-railway 511 | uses: docker/metadata-action@v5 512 | with: 513 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-railway 514 | tags: | 515 | type=semver,pattern={{version}},value=v${{ needs.detect-version-change.outputs.new-version }} 516 | type=semver,pattern={{major}}.{{minor}},value=v${{ needs.detect-version-change.outputs.new-version }} 517 | type=semver,pattern={{major}},value=v${{ needs.detect-version-change.outputs.new-version }} 518 | type=raw,value=latest,enable={{is_default_branch}} 519 | 520 | - name: Build and push Railway Docker image 521 | uses: docker/build-push-action@v5 522 | with: 523 | context: . 524 | file: ./Dockerfile.railway 525 | platforms: linux/amd64 526 | push: true 527 | tags: ${{ steps.meta-railway.outputs.tags }} 528 | labels: ${{ steps.meta-railway.outputs.labels }} 529 | cache-from: type=gha 530 | cache-to: type=gha,mode=max 531 | 532 | update-documentation: 533 | name: Update Documentation 534 | runs-on: ubuntu-latest 535 | needs: [detect-version-change, create-release, publish-npm, build-docker] 536 | if: needs.detect-version-change.outputs.version-changed == 'true' && !failure() 537 | steps: 538 | - name: Checkout repository 539 | uses: actions/checkout@v4 540 | with: 541 | token: ${{ secrets.GITHUB_TOKEN }} 542 | 543 | - name: Setup Node.js 544 | uses: actions/setup-node@v4 545 | with: 546 | node-version: 20 547 | 548 | - name: Update version badges in README 549 | run: | 550 | VERSION="${{ needs.detect-version-change.outputs.new-version }}" 551 | 552 | # Update README version badges 553 | if [ -f "README.md" ]; then 554 | # Update npm version badge 555 | sed -i.bak "s|npm/v/n8n-mcp/[^)]*|npm/v/n8n-mcp/$VERSION|g" README.md 556 | 557 | # Update any other version references 558 | sed -i.bak "s|version-[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*|version-$VERSION|g" README.md 559 | 560 | # Clean up backup file 561 | rm -f README.md.bak 562 | 563 | echo "✅ Updated version badges in README.md to $VERSION" 564 | fi 565 | 566 | - name: Commit documentation updates 567 | env: 568 | VERSION: ${{ needs.detect-version-change.outputs.new-version }} 569 | run: | 570 | git config user.name "github-actions[bot]" 571 | git config user.email "github-actions[bot]@users.noreply.github.com" 572 | 573 | if git diff --quiet; then 574 | echo "No documentation changes to commit" 575 | else 576 | git add README.md 577 | git commit -m "docs: update version badges to v${VERSION}" 578 | git push 579 | echo "✅ Committed documentation updates" 580 | fi 581 | 582 | notify-completion: 583 | name: Notify Release Completion 584 | runs-on: ubuntu-latest 585 | needs: [detect-version-change, create-release, publish-npm, build-docker, update-documentation] 586 | if: always() && needs.detect-version-change.outputs.version-changed == 'true' 587 | steps: 588 | - name: Create release summary 589 | run: | 590 | VERSION="${{ needs.detect-version-change.outputs.new-version }}" 591 | RELEASE_URL="https://github.com/${{ github.repository }}/releases/tag/v$VERSION" 592 | 593 | echo "## 🎉 Release v$VERSION Published Successfully!" >> $GITHUB_STEP_SUMMARY 594 | echo "" >> $GITHUB_STEP_SUMMARY 595 | echo "### ✅ Completed Tasks:" >> $GITHUB_STEP_SUMMARY 596 | echo "" >> $GITHUB_STEP_SUMMARY 597 | 598 | # Check job statuses 599 | if [ "${{ needs.create-release.result }}" = "success" ]; then 600 | echo "- ✅ GitHub Release created: [$RELEASE_URL]($RELEASE_URL)" >> $GITHUB_STEP_SUMMARY 601 | else 602 | echo "- ❌ GitHub Release creation failed" >> $GITHUB_STEP_SUMMARY 603 | fi 604 | 605 | if [ "${{ needs.publish-npm.result }}" = "success" ]; then 606 | echo "- ✅ NPM package published: [npmjs.com/package/n8n-mcp](https://www.npmjs.com/package/n8n-mcp)" >> $GITHUB_STEP_SUMMARY 607 | else 608 | echo "- ❌ NPM publishing failed" >> $GITHUB_STEP_SUMMARY 609 | fi 610 | 611 | if [ "${{ needs.build-docker.result }}" = "success" ]; then 612 | echo "- ✅ Docker images built and pushed" >> $GITHUB_STEP_SUMMARY 613 | echo " - Standard: \`ghcr.io/czlonkowski/n8n-mcp:v$VERSION\`" >> $GITHUB_STEP_SUMMARY 614 | echo " - Railway: \`ghcr.io/czlonkowski/n8n-mcp-railway:v$VERSION\`" >> $GITHUB_STEP_SUMMARY 615 | else 616 | echo "- ❌ Docker image building failed" >> $GITHUB_STEP_SUMMARY 617 | fi 618 | 619 | if [ "${{ needs.update-documentation.result }}" = "success" ]; then 620 | echo "- ✅ Documentation updated" >> $GITHUB_STEP_SUMMARY 621 | else 622 | echo "- ⚠️ Documentation update skipped or failed" >> $GITHUB_STEP_SUMMARY 623 | fi 624 | 625 | echo "" >> $GITHUB_STEP_SUMMARY 626 | echo "### 📦 Installation:" >> $GITHUB_STEP_SUMMARY 627 | echo "" >> $GITHUB_STEP_SUMMARY 628 | echo "\`\`\`bash" >> $GITHUB_STEP_SUMMARY 629 | echo "# NPM" >> $GITHUB_STEP_SUMMARY 630 | echo "npx n8n-mcp" >> $GITHUB_STEP_SUMMARY 631 | echo "" >> $GITHUB_STEP_SUMMARY 632 | echo "# Docker" >> $GITHUB_STEP_SUMMARY 633 | echo "docker run -p 3000:3000 ghcr.io/czlonkowski/n8n-mcp:v$VERSION" >> $GITHUB_STEP_SUMMARY 634 | echo "\`\`\`" >> $GITHUB_STEP_SUMMARY 635 | 636 | echo "🎉 Release automation completed for v$VERSION!" ```