This is page 4 of 63. Use http://codebase.md/czlonkowski/n8n-mcp?lines=true&page={x} to view the full context. # Directory Structure ``` ├── _config.yml ├── .claude │ └── agents │ ├── code-reviewer.md │ ├── context-manager.md │ ├── debugger.md │ ├── deployment-engineer.md │ ├── mcp-backend-engineer.md │ ├── n8n-mcp-tester.md │ ├── technical-researcher.md │ └── test-automator.md ├── .dockerignore ├── .env.docker ├── .env.example ├── .env.n8n.example ├── .env.test ├── .env.test.example ├── .github │ ├── ABOUT.md │ ├── BENCHMARK_THRESHOLDS.md │ ├── FUNDING.yml │ ├── gh-pages.yml │ ├── secret_scanning.yml │ └── workflows │ ├── benchmark-pr.yml │ ├── benchmark.yml │ ├── docker-build-fast.yml │ ├── docker-build-n8n.yml │ ├── docker-build.yml │ ├── release.yml │ ├── test.yml │ └── update-n8n-deps.yml ├── .gitignore ├── .npmignore ├── ATTRIBUTION.md ├── CHANGELOG.md ├── CLAUDE.md ├── codecov.yml ├── coverage.json ├── data │ ├── .gitkeep │ ├── nodes.db │ ├── nodes.db-shm │ ├── nodes.db-wal │ └── templates.db ├── deploy │ └── quick-deploy-n8n.sh ├── docker │ ├── docker-entrypoint.sh │ ├── n8n-mcp │ ├── parse-config.js │ └── README.md ├── docker-compose.buildkit.yml ├── docker-compose.extract.yml ├── docker-compose.n8n.yml ├── docker-compose.override.yml.example ├── docker-compose.test-n8n.yml ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.railway ├── Dockerfile.test ├── docs │ ├── AUTOMATED_RELEASES.md │ ├── BENCHMARKS.md │ ├── bugfix-onSessionCreated-event.md │ ├── CHANGELOG.md │ ├── CLAUDE_CODE_SETUP.md │ ├── CLAUDE_INTERVIEW.md │ ├── CODECOV_SETUP.md │ ├── CODEX_SETUP.md │ ├── CURSOR_SETUP.md │ ├── DEPENDENCY_UPDATES.md │ ├── DOCKER_README.md │ ├── DOCKER_TROUBLESHOOTING.md │ ├── FINAL_AI_VALIDATION_SPEC.md │ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md │ ├── HTTP_DEPLOYMENT.md │ ├── img │ │ ├── cc_command.png │ │ ├── cc_connected.png │ │ ├── codex_connected.png │ │ ├── cursor_tut.png │ │ ├── Railway_api.png │ │ ├── Railway_server_address.png │ │ ├── vsc_ghcp_chat_agent_mode.png │ │ ├── vsc_ghcp_chat_instruction_files.png │ │ ├── vsc_ghcp_chat_thinking_tool.png │ │ └── windsurf_tut.png │ ├── INSTALLATION.md │ ├── LIBRARY_USAGE.md │ ├── local │ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md │ │ ├── DEEP_DIVE_ANALYSIS_README.md │ │ ├── Deep_dive_p1_p2.md │ │ ├── integration-testing-plan.md │ │ ├── integration-tests-phase1-summary.md │ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md │ │ ├── P0_IMPLEMENTATION_PLAN.md │ │ └── TEMPLATE_MINING_ANALYSIS.md │ ├── MCP_ESSENTIALS_README.md │ ├── MCP_QUICK_START_GUIDE.md │ ├── MULTI_APP_INTEGRATION.md │ ├── N8N_DEPLOYMENT.md │ ├── RAILWAY_DEPLOYMENT.md │ ├── README_CLAUDE_SETUP.md │ ├── README.md │ ├── tools-documentation-usage.md │ ├── VS_CODE_PROJECT_SETUP.md │ ├── WINDSURF_SETUP.md │ └── workflow-diff-examples.md ├── examples │ └── enhanced-documentation-demo.js ├── fetch_log.txt ├── IMPLEMENTATION_GUIDE.md ├── LICENSE ├── MEMORY_N8N_UPDATE.md ├── MEMORY_TEMPLATE_UPDATE.md ├── monitor_fetch.sh ├── MVP_DEPLOYMENT_PLAN.md ├── N8N_HTTP_STREAMABLE_SETUP.md ├── n8n-nodes.db ├── P0-R3-TEST-PLAN.md ├── package-lock.json ├── package.json ├── package.runtime.json ├── PRIVACY.md ├── railway.json ├── README.md ├── renovate.json ├── scripts │ ├── analyze-optimization.sh │ ├── audit-schema-coverage.ts │ ├── build-optimized.sh │ ├── compare-benchmarks.js │ ├── demo-optimization.sh │ ├── deploy-http.sh │ ├── deploy-to-vm.sh │ ├── export-webhook-workflows.ts │ ├── extract-changelog.js │ ├── extract-from-docker.js │ ├── extract-nodes-docker.sh │ ├── extract-nodes-simple.sh │ ├── format-benchmark-results.js │ ├── generate-benchmark-stub.js │ ├── generate-detailed-reports.js │ ├── generate-test-summary.js │ ├── http-bridge.js │ ├── mcp-http-client.js │ ├── migrate-nodes-fts.ts │ ├── migrate-tool-docs.ts │ ├── n8n-docs-mcp.service │ ├── nginx-n8n-mcp.conf │ ├── prebuild-fts5.ts │ ├── prepare-release.js │ ├── publish-npm-quick.sh │ ├── publish-npm.sh │ ├── quick-test.ts │ ├── run-benchmarks-ci.js │ ├── sync-runtime-version.js │ ├── test-ai-validation-debug.ts │ ├── test-code-node-enhancements.ts │ ├── test-code-node-fixes.ts │ ├── test-docker-config.sh │ ├── test-docker-fingerprint.ts │ ├── test-docker-optimization.sh │ ├── test-docker.sh │ ├── test-empty-connection-validation.ts │ ├── test-error-message-tracking.ts │ ├── test-error-output-validation.ts │ ├── test-error-validation.js │ ├── test-essentials.ts │ ├── test-expression-code-validation.ts │ ├── test-expression-format-validation.js │ ├── test-fts5-search.ts │ ├── test-fuzzy-fix.ts │ ├── test-fuzzy-simple.ts │ ├── test-helpers-validation.ts │ ├── test-http-search.ts │ ├── test-http.sh │ ├── test-jmespath-validation.ts │ ├── test-multi-tenant-simple.ts │ ├── test-multi-tenant.ts │ ├── test-n8n-integration.sh │ ├── test-node-info.js │ ├── test-node-type-validation.ts │ ├── test-nodes-base-prefix.ts │ ├── test-operation-validation.ts │ ├── test-optimized-docker.sh │ ├── test-release-automation.js │ ├── test-search-improvements.ts │ ├── test-security.ts │ ├── test-single-session.sh │ ├── test-sqljs-triggers.ts │ ├── test-telemetry-debug.ts │ ├── test-telemetry-direct.ts │ ├── test-telemetry-env.ts │ ├── test-telemetry-integration.ts │ ├── test-telemetry-no-select.ts │ ├── test-telemetry-security.ts │ ├── test-telemetry-simple.ts │ ├── test-typeversion-validation.ts │ ├── test-url-configuration.ts │ ├── test-user-id-persistence.ts │ ├── test-webhook-validation.ts │ ├── test-workflow-insert.ts │ ├── test-workflow-sanitizer.ts │ ├── test-workflow-tracking-debug.ts │ ├── update-and-publish-prep.sh │ ├── update-n8n-deps.js │ ├── update-readme-version.js │ ├── vitest-benchmark-json-reporter.js │ └── vitest-benchmark-reporter.ts ├── SECURITY.md ├── src │ ├── config │ │ └── n8n-api.ts │ ├── data │ │ └── canonical-ai-tool-examples.json │ ├── database │ │ ├── database-adapter.ts │ │ ├── migrations │ │ │ └── add-template-node-configs.sql │ │ ├── node-repository.ts │ │ ├── nodes.db │ │ ├── schema-optimized.sql │ │ └── schema.sql │ ├── errors │ │ └── validation-service-error.ts │ ├── http-server-single-session.ts │ ├── http-server.ts │ ├── index.ts │ ├── loaders │ │ └── node-loader.ts │ ├── mappers │ │ └── docs-mapper.ts │ ├── mcp │ │ ├── handlers-n8n-manager.ts │ │ ├── handlers-workflow-diff.ts │ │ ├── index.ts │ │ ├── server.ts │ │ ├── stdio-wrapper.ts │ │ ├── tool-docs │ │ │ ├── configuration │ │ │ │ ├── get-node-as-tool-info.ts │ │ │ │ ├── get-node-documentation.ts │ │ │ │ ├── get-node-essentials.ts │ │ │ │ ├── get-node-info.ts │ │ │ │ ├── get-property-dependencies.ts │ │ │ │ ├── index.ts │ │ │ │ └── search-node-properties.ts │ │ │ ├── discovery │ │ │ │ ├── get-database-statistics.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-ai-tools.ts │ │ │ │ ├── list-nodes.ts │ │ │ │ └── search-nodes.ts │ │ │ ├── guides │ │ │ │ ├── ai-agents-guide.ts │ │ │ │ └── index.ts │ │ │ ├── index.ts │ │ │ ├── system │ │ │ │ ├── index.ts │ │ │ │ ├── n8n-diagnostic.ts │ │ │ │ ├── n8n-health-check.ts │ │ │ │ ├── n8n-list-available-tools.ts │ │ │ │ └── tools-documentation.ts │ │ │ ├── templates │ │ │ │ ├── get-template.ts │ │ │ │ ├── get-templates-for-task.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-node-templates.ts │ │ │ │ ├── list-tasks.ts │ │ │ │ ├── search-templates-by-metadata.ts │ │ │ │ └── search-templates.ts │ │ │ ├── types.ts │ │ │ ├── validation │ │ │ │ ├── index.ts │ │ │ │ ├── validate-node-minimal.ts │ │ │ │ ├── validate-node-operation.ts │ │ │ │ ├── validate-workflow-connections.ts │ │ │ │ ├── validate-workflow-expressions.ts │ │ │ │ └── validate-workflow.ts │ │ │ └── workflow_management │ │ │ ├── index.ts │ │ │ ├── n8n-autofix-workflow.ts │ │ │ ├── n8n-create-workflow.ts │ │ │ ├── n8n-delete-execution.ts │ │ │ ├── n8n-delete-workflow.ts │ │ │ ├── n8n-get-execution.ts │ │ │ ├── n8n-get-workflow-details.ts │ │ │ ├── n8n-get-workflow-minimal.ts │ │ │ ├── n8n-get-workflow-structure.ts │ │ │ ├── n8n-get-workflow.ts │ │ │ ├── n8n-list-executions.ts │ │ │ ├── n8n-list-workflows.ts │ │ │ ├── n8n-trigger-webhook-workflow.ts │ │ │ ├── n8n-update-full-workflow.ts │ │ │ ├── n8n-update-partial-workflow.ts │ │ │ └── n8n-validate-workflow.ts │ │ ├── tools-documentation.ts │ │ ├── tools-n8n-friendly.ts │ │ ├── tools-n8n-manager.ts │ │ ├── tools.ts │ │ └── workflow-examples.ts │ ├── mcp-engine.ts │ ├── mcp-tools-engine.ts │ ├── n8n │ │ ├── MCPApi.credentials.ts │ │ └── MCPNode.node.ts │ ├── parsers │ │ ├── node-parser.ts │ │ ├── property-extractor.ts │ │ └── simple-parser.ts │ ├── scripts │ │ ├── debug-http-search.ts │ │ ├── extract-from-docker.ts │ │ ├── fetch-templates-robust.ts │ │ ├── fetch-templates.ts │ │ ├── rebuild-database.ts │ │ ├── rebuild-optimized.ts │ │ ├── rebuild.ts │ │ ├── sanitize-templates.ts │ │ ├── seed-canonical-ai-examples.ts │ │ ├── test-autofix-documentation.ts │ │ ├── test-autofix-workflow.ts │ │ ├── test-execution-filtering.ts │ │ ├── test-node-suggestions.ts │ │ ├── test-protocol-negotiation.ts │ │ ├── test-summary.ts │ │ ├── test-webhook-autofix.ts │ │ ├── validate.ts │ │ └── validation-summary.ts │ ├── services │ │ ├── ai-node-validator.ts │ │ ├── ai-tool-validators.ts │ │ ├── confidence-scorer.ts │ │ ├── config-validator.ts │ │ ├── enhanced-config-validator.ts │ │ ├── example-generator.ts │ │ ├── execution-processor.ts │ │ ├── expression-format-validator.ts │ │ ├── expression-validator.ts │ │ ├── n8n-api-client.ts │ │ ├── n8n-validation.ts │ │ ├── node-documentation-service.ts │ │ ├── node-similarity-service.ts │ │ ├── node-specific-validators.ts │ │ ├── operation-similarity-service.ts │ │ ├── property-dependencies.ts │ │ ├── property-filter.ts │ │ ├── resource-similarity-service.ts │ │ ├── sqlite-storage-service.ts │ │ ├── task-templates.ts │ │ ├── universal-expression-validator.ts │ │ ├── workflow-auto-fixer.ts │ │ ├── workflow-diff-engine.ts │ │ └── workflow-validator.ts │ ├── telemetry │ │ ├── batch-processor.ts │ │ ├── config-manager.ts │ │ ├── early-error-logger.ts │ │ ├── error-sanitization-utils.ts │ │ ├── error-sanitizer.ts │ │ ├── event-tracker.ts │ │ ├── event-validator.ts │ │ ├── index.ts │ │ ├── performance-monitor.ts │ │ ├── rate-limiter.ts │ │ ├── startup-checkpoints.ts │ │ ├── telemetry-error.ts │ │ ├── telemetry-manager.ts │ │ ├── telemetry-types.ts │ │ └── workflow-sanitizer.ts │ ├── templates │ │ ├── batch-processor.ts │ │ ├── metadata-generator.ts │ │ ├── README.md │ │ ├── template-fetcher.ts │ │ ├── template-repository.ts │ │ └── template-service.ts │ ├── types │ │ ├── index.ts │ │ ├── instance-context.ts │ │ ├── n8n-api.ts │ │ ├── node-types.ts │ │ ├── session-restoration.ts │ │ └── workflow-diff.ts │ └── utils │ ├── auth.ts │ ├── bridge.ts │ ├── cache-utils.ts │ ├── console-manager.ts │ ├── documentation-fetcher.ts │ ├── enhanced-documentation-fetcher.ts │ ├── error-handler.ts │ ├── example-generator.ts │ ├── fixed-collection-validator.ts │ ├── logger.ts │ ├── mcp-client.ts │ ├── n8n-errors.ts │ ├── node-source-extractor.ts │ ├── node-type-normalizer.ts │ ├── node-type-utils.ts │ ├── node-utils.ts │ ├── npm-version-checker.ts │ ├── protocol-version.ts │ ├── simple-cache.ts │ ├── ssrf-protection.ts │ ├── template-node-resolver.ts │ ├── template-sanitizer.ts │ ├── url-detector.ts │ ├── validation-schemas.ts │ └── version.ts ├── supabase-telemetry-aggregation.sql ├── TELEMETRY_PRUNING_GUIDE.md ├── telemetry-pruning-analysis.md ├── test-output.txt ├── test-reinit-fix.sh ├── tests │ ├── __snapshots__ │ │ └── .gitkeep │ ├── auth.test.ts │ ├── benchmarks │ │ ├── database-queries.bench.ts │ │ ├── index.ts │ │ ├── mcp-tools.bench.ts │ │ ├── mcp-tools.bench.ts.disabled │ │ ├── mcp-tools.bench.ts.skip │ │ ├── node-loading.bench.ts.disabled │ │ ├── README.md │ │ ├── search-operations.bench.ts.disabled │ │ └── validation-performance.bench.ts.disabled │ ├── bridge.test.ts │ ├── comprehensive-extraction-test.js │ ├── data │ │ └── .gitkeep │ ├── debug-slack-doc.js │ ├── demo-enhanced-documentation.js │ ├── docker-tests-README.md │ ├── error-handler.test.ts │ ├── examples │ │ └── using-database-utils.test.ts │ ├── extracted-nodes-db │ │ ├── database-import.json │ │ ├── extraction-report.json │ │ ├── insert-nodes.sql │ │ ├── n8n-nodes-base__Airtable.json │ │ ├── n8n-nodes-base__Discord.json │ │ ├── n8n-nodes-base__Function.json │ │ ├── n8n-nodes-base__HttpRequest.json │ │ ├── n8n-nodes-base__If.json │ │ ├── n8n-nodes-base__Slack.json │ │ ├── n8n-nodes-base__SplitInBatches.json │ │ └── n8n-nodes-base__Webhook.json │ ├── factories │ │ ├── node-factory.ts │ │ └── property-definition-factory.ts │ ├── fixtures │ │ ├── .gitkeep │ │ ├── database │ │ │ └── test-nodes.json │ │ ├── factories │ │ │ ├── node.factory.ts │ │ │ └── parser-node.factory.ts │ │ └── template-configs.ts │ ├── helpers │ │ └── env-helpers.ts │ ├── http-server-auth.test.ts │ ├── integration │ │ ├── ai-validation │ │ │ ├── ai-agent-validation.test.ts │ │ │ ├── ai-tool-validation.test.ts │ │ │ ├── chat-trigger-validation.test.ts │ │ │ ├── e2e-validation.test.ts │ │ │ ├── helpers.ts │ │ │ ├── llm-chain-validation.test.ts │ │ │ ├── README.md │ │ │ └── TEST_REPORT.md │ │ ├── ci │ │ │ └── database-population.test.ts │ │ ├── database │ │ │ ├── connection-management.test.ts │ │ │ ├── empty-database.test.ts │ │ │ ├── fts5-search.test.ts │ │ │ ├── node-fts5-search.test.ts │ │ │ ├── node-repository.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── template-node-configs.test.ts │ │ │ ├── template-repository.test.ts │ │ │ ├── test-utils.ts │ │ │ └── transactions.test.ts │ │ ├── database-integration.test.ts │ │ ├── docker │ │ │ ├── docker-config.test.ts │ │ │ ├── docker-entrypoint.test.ts │ │ │ └── test-helpers.ts │ │ ├── flexible-instance-config.test.ts │ │ ├── mcp │ │ │ └── template-examples-e2e.test.ts │ │ ├── mcp-protocol │ │ │ ├── basic-connection.test.ts │ │ │ ├── error-handling.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── protocol-compliance.test.ts │ │ │ ├── README.md │ │ │ ├── session-management.test.ts │ │ │ ├── test-helpers.ts │ │ │ ├── tool-invocation.test.ts │ │ │ └── workflow-error-validation.test.ts │ │ ├── msw-setup.test.ts │ │ ├── n8n-api │ │ │ ├── executions │ │ │ │ ├── delete-execution.test.ts │ │ │ │ ├── get-execution.test.ts │ │ │ │ ├── list-executions.test.ts │ │ │ │ └── trigger-webhook.test.ts │ │ │ ├── scripts │ │ │ │ └── cleanup-orphans.ts │ │ │ ├── system │ │ │ │ ├── diagnostic.test.ts │ │ │ │ ├── health-check.test.ts │ │ │ │ └── list-tools.test.ts │ │ │ ├── test-connection.ts │ │ │ ├── types │ │ │ │ └── mcp-responses.ts │ │ │ ├── utils │ │ │ │ ├── cleanup-helpers.ts │ │ │ │ ├── credentials.ts │ │ │ │ ├── factories.ts │ │ │ │ ├── fixtures.ts │ │ │ │ ├── mcp-context.ts │ │ │ │ ├── n8n-client.ts │ │ │ │ ├── node-repository.ts │ │ │ │ ├── response-types.ts │ │ │ │ ├── test-context.ts │ │ │ │ └── webhook-workflows.ts │ │ │ └── workflows │ │ │ ├── autofix-workflow.test.ts │ │ │ ├── create-workflow.test.ts │ │ │ ├── delete-workflow.test.ts │ │ │ ├── get-workflow-details.test.ts │ │ │ ├── get-workflow-minimal.test.ts │ │ │ ├── get-workflow-structure.test.ts │ │ │ ├── get-workflow.test.ts │ │ │ ├── list-workflows.test.ts │ │ │ ├── smart-parameters.test.ts │ │ │ ├── update-partial-workflow.test.ts │ │ │ ├── update-workflow.test.ts │ │ │ └── validate-workflow.test.ts │ │ ├── security │ │ │ ├── command-injection-prevention.test.ts │ │ │ └── rate-limiting.test.ts │ │ ├── session │ │ │ └── test-onSessionCreated-event.ts │ │ ├── session-lifecycle-retry.test.ts │ │ ├── session-persistence.test.ts │ │ ├── session-restoration-warmstart.test.ts │ │ ├── setup │ │ │ ├── integration-setup.ts │ │ │ └── msw-test-server.ts │ │ ├── telemetry │ │ │ ├── docker-user-id-stability.test.ts │ │ │ └── mcp-telemetry.test.ts │ │ ├── templates │ │ │ └── metadata-operations.test.ts │ │ └── workflow-creation-node-type-format.test.ts │ ├── logger.test.ts │ ├── MOCKING_STRATEGY.md │ ├── mocks │ │ ├── n8n-api │ │ │ ├── data │ │ │ │ ├── credentials.ts │ │ │ │ ├── executions.ts │ │ │ │ └── workflows.ts │ │ │ ├── handlers.ts │ │ │ └── index.ts │ │ └── README.md │ ├── node-storage-export.json │ ├── setup │ │ ├── global-setup.ts │ │ ├── msw-setup.ts │ │ ├── TEST_ENV_DOCUMENTATION.md │ │ └── test-env.ts │ ├── test-database-extraction.js │ ├── test-direct-extraction.js │ ├── test-enhanced-documentation.js │ ├── test-enhanced-integration.js │ ├── test-mcp-extraction.js │ ├── test-mcp-server-extraction.js │ ├── test-mcp-tools-integration.js │ ├── test-node-documentation-service.js │ ├── test-node-list.js │ ├── test-package-info.js │ ├── test-parsing-operations.js │ ├── test-slack-node-complete.js │ ├── test-small-rebuild.js │ ├── test-sqlite-search.js │ ├── test-storage-system.js │ ├── unit │ │ ├── __mocks__ │ │ │ ├── n8n-nodes-base.test.ts │ │ │ ├── n8n-nodes-base.ts │ │ │ └── README.md │ │ ├── database │ │ │ ├── __mocks__ │ │ │ │ └── better-sqlite3.ts │ │ │ ├── database-adapter-unit.test.ts │ │ │ ├── node-repository-core.test.ts │ │ │ ├── node-repository-operations.test.ts │ │ │ ├── node-repository-outputs.test.ts │ │ │ ├── README.md │ │ │ └── template-repository-core.test.ts │ │ ├── docker │ │ │ ├── config-security.test.ts │ │ │ ├── edge-cases.test.ts │ │ │ ├── parse-config.test.ts │ │ │ └── serve-command.test.ts │ │ ├── errors │ │ │ └── validation-service-error.test.ts │ │ ├── examples │ │ │ └── using-n8n-nodes-base-mock.test.ts │ │ ├── flexible-instance-security-advanced.test.ts │ │ ├── flexible-instance-security.test.ts │ │ ├── http-server │ │ │ └── multi-tenant-support.test.ts │ │ ├── http-server-n8n-mode.test.ts │ │ ├── http-server-n8n-reinit.test.ts │ │ ├── http-server-session-management.test.ts │ │ ├── loaders │ │ │ └── node-loader.test.ts │ │ ├── mappers │ │ │ └── docs-mapper.test.ts │ │ ├── mcp │ │ │ ├── get-node-essentials-examples.test.ts │ │ │ ├── handlers-n8n-manager-simple.test.ts │ │ │ ├── handlers-n8n-manager.test.ts │ │ │ ├── handlers-workflow-diff.test.ts │ │ │ ├── lru-cache-behavior.test.ts │ │ │ ├── multi-tenant-tool-listing.test.ts.disabled │ │ │ ├── parameter-validation.test.ts │ │ │ ├── search-nodes-examples.test.ts │ │ │ ├── tools-documentation.test.ts │ │ │ └── tools.test.ts │ │ ├── monitoring │ │ │ └── cache-metrics.test.ts │ │ ├── MULTI_TENANT_TEST_COVERAGE.md │ │ ├── multi-tenant-integration.test.ts │ │ ├── parsers │ │ │ ├── node-parser-outputs.test.ts │ │ │ ├── node-parser.test.ts │ │ │ ├── property-extractor.test.ts │ │ │ └── simple-parser.test.ts │ │ ├── scripts │ │ │ └── fetch-templates-extraction.test.ts │ │ ├── services │ │ │ ├── ai-node-validator.test.ts │ │ │ ├── ai-tool-validators.test.ts │ │ │ ├── confidence-scorer.test.ts │ │ │ ├── config-validator-basic.test.ts │ │ │ ├── config-validator-edge-cases.test.ts │ │ │ ├── config-validator-node-specific.test.ts │ │ │ ├── config-validator-security.test.ts │ │ │ ├── debug-validator.test.ts │ │ │ ├── enhanced-config-validator-integration.test.ts │ │ │ ├── enhanced-config-validator-operations.test.ts │ │ │ ├── enhanced-config-validator.test.ts │ │ │ ├── example-generator.test.ts │ │ │ ├── execution-processor.test.ts │ │ │ ├── expression-format-validator.test.ts │ │ │ ├── expression-validator-edge-cases.test.ts │ │ │ ├── expression-validator.test.ts │ │ │ ├── fixed-collection-validation.test.ts │ │ │ ├── loop-output-edge-cases.test.ts │ │ │ ├── n8n-api-client.test.ts │ │ │ ├── n8n-validation.test.ts │ │ │ ├── node-similarity-service.test.ts │ │ │ ├── node-specific-validators.test.ts │ │ │ ├── operation-similarity-service-comprehensive.test.ts │ │ │ ├── operation-similarity-service.test.ts │ │ │ ├── property-dependencies.test.ts │ │ │ ├── property-filter-edge-cases.test.ts │ │ │ ├── property-filter.test.ts │ │ │ ├── resource-similarity-service-comprehensive.test.ts │ │ │ ├── resource-similarity-service.test.ts │ │ │ ├── task-templates.test.ts │ │ │ ├── template-service.test.ts │ │ │ ├── universal-expression-validator.test.ts │ │ │ ├── validation-fixes.test.ts │ │ │ ├── workflow-auto-fixer.test.ts │ │ │ ├── workflow-diff-engine.test.ts │ │ │ ├── workflow-fixed-collection-validation.test.ts │ │ │ ├── workflow-validator-comprehensive.test.ts │ │ │ ├── workflow-validator-edge-cases.test.ts │ │ │ ├── workflow-validator-error-outputs.test.ts │ │ │ ├── workflow-validator-expression-format.test.ts │ │ │ ├── workflow-validator-loops-simple.test.ts │ │ │ ├── workflow-validator-loops.test.ts │ │ │ ├── workflow-validator-mocks.test.ts │ │ │ ├── workflow-validator-performance.test.ts │ │ │ ├── workflow-validator-with-mocks.test.ts │ │ │ └── workflow-validator.test.ts │ │ ├── session-lifecycle-events.test.ts │ │ ├── session-management-api.test.ts │ │ ├── session-restoration-retry.test.ts │ │ ├── session-restoration.test.ts │ │ ├── telemetry │ │ │ ├── batch-processor.test.ts │ │ │ ├── config-manager.test.ts │ │ │ ├── event-tracker.test.ts │ │ │ ├── event-validator.test.ts │ │ │ ├── rate-limiter.test.ts │ │ │ ├── telemetry-error.test.ts │ │ │ ├── telemetry-manager.test.ts │ │ │ ├── v2.18.3-fixes-verification.test.ts │ │ │ └── workflow-sanitizer.test.ts │ │ ├── templates │ │ │ ├── batch-processor.test.ts │ │ │ ├── metadata-generator.test.ts │ │ │ ├── template-repository-metadata.test.ts │ │ │ └── template-repository-security.test.ts │ │ ├── test-env-example.test.ts │ │ ├── test-infrastructure.test.ts │ │ ├── types │ │ │ ├── instance-context-coverage.test.ts │ │ │ └── instance-context-multi-tenant.test.ts │ │ ├── utils │ │ │ ├── auth-timing-safe.test.ts │ │ │ ├── cache-utils.test.ts │ │ │ ├── console-manager.test.ts │ │ │ ├── database-utils.test.ts │ │ │ ├── fixed-collection-validator.test.ts │ │ │ ├── n8n-errors.test.ts │ │ │ ├── node-type-normalizer.test.ts │ │ │ ├── node-type-utils.test.ts │ │ │ ├── node-utils.test.ts │ │ │ ├── simple-cache-memory-leak-fix.test.ts │ │ │ ├── ssrf-protection.test.ts │ │ │ └── template-node-resolver.test.ts │ │ └── validation-fixes.test.ts │ └── utils │ ├── assertions.ts │ ├── builders │ │ └── workflow.builder.ts │ ├── data-generators.ts │ ├── database-utils.ts │ ├── README.md │ └── test-helpers.ts ├── thumbnail.png ├── tsconfig.build.json ├── tsconfig.json ├── types │ ├── mcp.d.ts │ └── test-env.d.ts ├── verify-telemetry-fix.js ├── versioned-nodes.md ├── vitest.config.benchmark.ts ├── vitest.config.integration.ts └── vitest.config.ts ``` # Files -------------------------------------------------------------------------------- /scripts/test-webhook-validation.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env npx tsx 2 | 3 | import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator.js'; 4 | 5 | console.log('🧪 Testing Webhook Data Access Validation\n'); 6 | 7 | const testCases = [ 8 | { 9 | name: 'Direct webhook data access (incorrect)', 10 | config: { 11 | language: 'javaScript', 12 | jsCode: `// Processing data from Webhook node 13 | const prevWebhook = $('Webhook').first(); 14 | const command = items[0].json.testCommand; 15 | const data = items[0].json.payload; 16 | return [{json: {command, data}}];` 17 | }, 18 | expectWarning: true 19 | }, 20 | { 21 | name: 'Correct webhook data access through body', 22 | config: { 23 | language: 'javaScript', 24 | jsCode: `// Processing data from Webhook node 25 | const webhookData = items[0].json.body; 26 | const command = webhookData.testCommand; 27 | const data = webhookData.payload; 28 | return [{json: {command, data}}];` 29 | }, 30 | expectWarning: false 31 | }, 32 | { 33 | name: 'Common webhook field names without body', 34 | config: { 35 | language: 'javaScript', 36 | jsCode: `// Processing webhook 37 | const command = items[0].json.command; 38 | const action = items[0].json.action; 39 | const payload = items[0].json.payload; 40 | return [{json: {command, action, payload}}];` 41 | }, 42 | expectWarning: true 43 | }, 44 | { 45 | name: 'Non-webhook data access (should not warn)', 46 | config: { 47 | language: 'javaScript', 48 | jsCode: `// Processing data from HTTP Request node 49 | const data = items[0].json.results; 50 | const status = items[0].json.status; 51 | return [{json: {data, status}}];` 52 | }, 53 | expectWarning: false 54 | }, 55 | { 56 | name: 'Mixed correct and incorrect access', 57 | config: { 58 | language: 'javaScript', 59 | jsCode: `// Mixed access patterns 60 | const webhookBody = items[0].json.body; // Correct 61 | const directAccess = items[0].json.command; // Incorrect if webhook 62 | return [{json: {webhookBody, directAccess}}];` 63 | }, 64 | expectWarning: false // If user already uses .body, we assume they know the pattern 65 | } 66 | ]; 67 | 68 | let passCount = 0; 69 | let failCount = 0; 70 | 71 | for (const test of testCases) { 72 | console.log(`Test: ${test.name}`); 73 | const result = EnhancedConfigValidator.validateWithMode( 74 | 'nodes-base.code', 75 | test.config, 76 | [ 77 | { name: 'language', type: 'options', options: ['javaScript', 'python'] }, 78 | { name: 'jsCode', type: 'string' } 79 | ], 80 | 'operation', 81 | 'ai-friendly' 82 | ); 83 | 84 | const hasWebhookWarning = result.warnings.some(w => 85 | w.message.includes('Webhook data is nested under .body') || 86 | w.message.includes('webhook data, remember it\'s nested under .body') 87 | ); 88 | 89 | const passed = hasWebhookWarning === test.expectWarning; 90 | 91 | console.log(` Expected warning: ${test.expectWarning}`); 92 | console.log(` Has webhook warning: ${hasWebhookWarning}`); 93 | console.log(` Result: ${passed ? '✅ PASS' : '❌ FAIL'}`); 94 | 95 | if (result.warnings.length > 0) { 96 | const relevantWarnings = result.warnings 97 | .filter(w => w.message.includes('webhook') || w.message.includes('Webhook')) 98 | .map(w => w.message); 99 | if (relevantWarnings.length > 0) { 100 | console.log(` Webhook warnings: ${relevantWarnings.join(', ')}`); 101 | } 102 | } 103 | 104 | if (passed) passCount++; 105 | else failCount++; 106 | 107 | console.log(); 108 | } 109 | 110 | console.log(`\n📊 Results: ${passCount} passed, ${failCount} failed`); 111 | console.log(failCount === 0 ? '✅ All webhook validation tests passed!' : '❌ Some tests failed'); ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/discovery/search-nodes.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const searchNodesDoc: ToolDocumentation = { 4 | name: 'search_nodes', 5 | category: 'discovery', 6 | essentials: { 7 | description: 'Text search across node names and descriptions. Returns most relevant nodes first, with frequently-used nodes (HTTP Request, Webhook, Set, Code, Slack) prioritized in results. Searches all 525 nodes in the database.', 8 | keyParameters: ['query', 'mode', 'limit'], 9 | example: 'search_nodes({query: "webhook"})', 10 | performance: '<20ms even for complex queries', 11 | tips: [ 12 | 'OR mode (default): Matches any search word', 13 | 'AND mode: Requires all words present', 14 | 'FUZZY mode: Handles typos and spelling errors', 15 | 'Use quotes for exact phrases: "google sheets"' 16 | ] 17 | }, 18 | full: { 19 | description: 'Full-text search engine for n8n nodes using SQLite FTS5. Searches across node names, descriptions, and aliases. Results are ranked by relevance with commonly-used nodes given priority. Common nodes include: HTTP Request, Webhook, Set, Code, IF, Switch, Merge, SplitInBatches, Slack, Google Sheets.', 20 | parameters: { 21 | query: { type: 'string', description: 'Search keywords. Use quotes for exact phrases like "google sheets"', required: true }, 22 | limit: { type: 'number', description: 'Maximum results to return. Default: 20, Max: 100', required: false }, 23 | mode: { type: 'string', description: 'Search mode: "OR" (any word matches, default), "AND" (all words required), "FUZZY" (typo-tolerant)', required: false } 24 | }, 25 | returns: 'Array of node objects sorted by relevance score. Each object contains: nodeType, displayName, description, category, relevance score. Common nodes appear first when relevance is similar.', 26 | examples: [ 27 | 'search_nodes({query: "webhook"}) - Returns Webhook node as top result', 28 | 'search_nodes({query: "database"}) - Returns MySQL, Postgres, MongoDB, Redis, etc.', 29 | 'search_nodes({query: "google sheets", mode: "AND"}) - Requires both words', 30 | 'search_nodes({query: "slak", mode: "FUZZY"}) - Finds Slack despite typo', 31 | 'search_nodes({query: "http api"}) - Finds HTTP Request, GraphQL, REST nodes', 32 | 'search_nodes({query: "transform data"}) - Finds Set, Code, Function, Item Lists nodes' 33 | ], 34 | useCases: [ 35 | 'Finding nodes when you know partial names', 36 | 'Discovering nodes by functionality (e.g., "email", "database", "transform")', 37 | 'Handling user typos in node names', 38 | 'Finding all nodes related to a service (e.g., "google", "aws", "microsoft")' 39 | ], 40 | performance: '<20ms for simple queries, <50ms for complex FUZZY searches. Uses FTS5 index for speed', 41 | bestPractices: [ 42 | 'Start with single keywords for broadest results', 43 | 'Use FUZZY mode when users might misspell node names', 44 | 'AND mode works best for 2-3 word searches', 45 | 'Combine with get_node_essentials after finding the right node' 46 | ], 47 | pitfalls: [ 48 | 'AND mode searches all fields (name, description) not just node names', 49 | 'FUZZY mode with very short queries (1-2 chars) may return unexpected results', 50 | 'Exact matches in quotes are case-sensitive' 51 | ], 52 | relatedTools: ['list_nodes for browsing by category', 'get_node_essentials to configure found nodes', 'list_ai_tools for AI-specific search'] 53 | } 54 | }; ``` -------------------------------------------------------------------------------- /scripts/mcp-http-client.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Minimal MCP HTTP Client for Node.js v16 compatibility 5 | * This bypasses mcp-remote and its TransformStream dependency 6 | */ 7 | 8 | const http = require('http'); 9 | const https = require('https'); 10 | const readline = require('readline'); 11 | 12 | // Get configuration from command line arguments 13 | const url = process.argv[2]; 14 | const authToken = process.env.MCP_AUTH_TOKEN; 15 | 16 | if (!url) { 17 | console.error('Usage: node mcp-http-client.js <server-url>'); 18 | process.exit(1); 19 | } 20 | 21 | if (!authToken) { 22 | console.error('Error: MCP_AUTH_TOKEN environment variable is required'); 23 | process.exit(1); 24 | } 25 | 26 | // Parse URL 27 | const parsedUrl = new URL(url); 28 | const isHttps = parsedUrl.protocol === 'https:'; 29 | const httpModule = isHttps ? https : http; 30 | 31 | // Create readline interface for stdio 32 | const rl = readline.createInterface({ 33 | input: process.stdin, 34 | output: process.stdout, 35 | terminal: false 36 | }); 37 | 38 | // Buffer for incomplete JSON messages 39 | let buffer = ''; 40 | 41 | // Function to send JSON-RPC request 42 | function sendRequest(request) { 43 | const requestBody = JSON.stringify(request); 44 | 45 | const options = { 46 | hostname: parsedUrl.hostname, 47 | port: parsedUrl.port || (isHttps ? 443 : 80), 48 | path: parsedUrl.pathname, 49 | method: 'POST', 50 | headers: { 51 | 'Content-Type': 'application/json', 52 | 'Content-Length': Buffer.byteLength(requestBody), 53 | 'Authorization': `Bearer ${authToken}` 54 | } 55 | }; 56 | 57 | const req = httpModule.request(options, (res) => { 58 | let responseData = ''; 59 | 60 | res.on('data', (chunk) => { 61 | responseData += chunk; 62 | }); 63 | 64 | res.on('end', () => { 65 | try { 66 | const response = JSON.parse(responseData); 67 | // Ensure the response has the correct structure 68 | if (response.jsonrpc && (response.result !== undefined || response.error !== undefined)) { 69 | console.log(JSON.stringify(response)); 70 | } else { 71 | // Wrap non-JSON-RPC responses 72 | console.log(JSON.stringify({ 73 | jsonrpc: '2.0', 74 | id: request.id || null, 75 | error: { 76 | code: -32603, 77 | message: 'Internal error', 78 | data: response 79 | } 80 | })); 81 | } 82 | } catch (err) { 83 | console.log(JSON.stringify({ 84 | jsonrpc: '2.0', 85 | id: request.id || null, 86 | error: { 87 | code: -32700, 88 | message: 'Parse error', 89 | data: err.message 90 | } 91 | })); 92 | } 93 | }); 94 | }); 95 | 96 | req.on('error', (err) => { 97 | console.log(JSON.stringify({ 98 | jsonrpc: '2.0', 99 | id: request.id || null, 100 | error: { 101 | code: -32000, 102 | message: 'Transport error', 103 | data: err.message 104 | } 105 | })); 106 | }); 107 | 108 | req.write(requestBody); 109 | req.end(); 110 | } 111 | 112 | // Process incoming JSON-RPC messages from stdin 113 | rl.on('line', (line) => { 114 | // Try to parse each line as a complete JSON-RPC message 115 | try { 116 | const request = JSON.parse(line); 117 | 118 | // Forward the request to the HTTP server 119 | sendRequest(request); 120 | } catch (err) { 121 | // Log parse errors to stdout in JSON-RPC format 122 | console.log(JSON.stringify({ 123 | jsonrpc: '2.0', 124 | id: null, 125 | error: { 126 | code: -32700, 127 | message: 'Parse error', 128 | data: err.message 129 | } 130 | })); 131 | } 132 | }); 133 | 134 | // Handle process termination 135 | process.on('SIGINT', () => { 136 | process.exit(0); 137 | }); 138 | 139 | process.on('SIGTERM', () => { 140 | process.exit(0); 141 | }); ``` -------------------------------------------------------------------------------- /tests/test-node-documentation-service.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | const { NodeDocumentationService } = require('../dist/services/node-documentation-service'); 4 | 5 | async function testService() { 6 | console.log('=== Testing Node Documentation Service ===\n'); 7 | 8 | // Use the main database 9 | const service = new NodeDocumentationService('./data/nodes.db'); 10 | 11 | try { 12 | // Test 1: List nodes 13 | console.log('1️⃣ Testing list nodes...'); 14 | const nodes = await service.listNodes(); 15 | console.log(` Found ${nodes.length} nodes in database`); 16 | 17 | if (nodes.length === 0) { 18 | console.log('\n⚠️ No nodes found. Running rebuild...'); 19 | const stats = await service.rebuildDatabase(); 20 | console.log(` Rebuild complete: ${stats.successful} nodes stored`); 21 | } 22 | 23 | // Test 2: Get specific node info (IF node) 24 | console.log('\n2️⃣ Testing get node info for "If" node...'); 25 | const ifNode = await service.getNodeInfo('n8n-nodes-base.if'); 26 | 27 | if (ifNode) { 28 | console.log(' ✅ Found IF node:'); 29 | console.log(` Name: ${ifNode.displayName}`); 30 | console.log(` Description: ${ifNode.description}`); 31 | console.log(` Has source code: ${!!ifNode.sourceCode}`); 32 | console.log(` Source code length: ${ifNode.sourceCode?.length || 0} bytes`); 33 | console.log(` Has documentation: ${!!ifNode.documentation}`); 34 | console.log(` Has example: ${!!ifNode.exampleWorkflow}`); 35 | 36 | if (ifNode.exampleWorkflow) { 37 | console.log('\n 📋 Example workflow:'); 38 | console.log(JSON.stringify(ifNode.exampleWorkflow, null, 2).substring(0, 500) + '...'); 39 | } 40 | } else { 41 | console.log(' ❌ IF node not found'); 42 | } 43 | 44 | // Test 3: Search nodes 45 | console.log('\n3️⃣ Testing search functionality...'); 46 | 47 | // Search for webhook nodes 48 | const webhookNodes = await service.searchNodes({ query: 'webhook' }); 49 | console.log(`\n 🔍 Search for "webhook": ${webhookNodes.length} results`); 50 | webhookNodes.slice(0, 3).forEach(node => { 51 | console.log(` - ${node.displayName} (${node.nodeType})`); 52 | }); 53 | 54 | // Search for HTTP nodes 55 | const httpNodes = await service.searchNodes({ query: 'http' }); 56 | console.log(`\n 🔍 Search for "http": ${httpNodes.length} results`); 57 | httpNodes.slice(0, 3).forEach(node => { 58 | console.log(` - ${node.displayName} (${node.nodeType})`); 59 | }); 60 | 61 | // Test 4: Get statistics 62 | console.log('\n4️⃣ Testing database statistics...'); 63 | const stats = service.getStatistics(); 64 | console.log(' 📊 Database stats:'); 65 | console.log(` Total nodes: ${stats.totalNodes}`); 66 | console.log(` Nodes with docs: ${stats.nodesWithDocs}`); 67 | console.log(` Nodes with examples: ${stats.nodesWithExamples}`); 68 | console.log(` Trigger nodes: ${stats.triggerNodes}`); 69 | console.log(` Webhook nodes: ${stats.webhookNodes}`); 70 | console.log(` Total packages: ${stats.totalPackages}`); 71 | 72 | // Test 5: Category filtering 73 | console.log('\n5️⃣ Testing category filtering...'); 74 | const coreNodes = await service.searchNodes({ category: 'Core Nodes' }); 75 | console.log(` Found ${coreNodes.length} core nodes`); 76 | 77 | console.log('\n✅ All tests completed!'); 78 | 79 | } catch (error) { 80 | console.error('\n❌ Test failed:', error); 81 | process.exit(1); 82 | } finally { 83 | service.close(); 84 | } 85 | } 86 | 87 | // Run tests 88 | testService().catch(console.error); ``` -------------------------------------------------------------------------------- /scripts/prebuild-fts5.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env npx tsx 2 | /** 3 | * Pre-build FTS5 indexes for the database 4 | * This ensures FTS5 tables are created before the database is deployed to Docker 5 | */ 6 | import { createDatabaseAdapter } from '../src/database/database-adapter'; 7 | import { logger } from '../src/utils/logger'; 8 | import * as fs from 'fs'; 9 | 10 | async function prebuildFTS5() { 11 | console.log('🔍 Pre-building FTS5 indexes...\n'); 12 | 13 | const dbPath = './data/nodes.db'; 14 | 15 | if (!fs.existsSync(dbPath)) { 16 | console.error('❌ Database not found at', dbPath); 17 | console.error(' Please run npm run rebuild first'); 18 | process.exit(1); 19 | } 20 | 21 | const db = await createDatabaseAdapter(dbPath); 22 | 23 | // Check FTS5 support 24 | const hasFTS5 = db.checkFTS5Support(); 25 | 26 | if (!hasFTS5) { 27 | console.log('ℹ️ FTS5 not supported in this SQLite build'); 28 | console.log(' Skipping FTS5 pre-build'); 29 | db.close(); 30 | return; 31 | } 32 | 33 | console.log('✅ FTS5 is supported'); 34 | 35 | try { 36 | // Create FTS5 virtual table for templates 37 | console.log('\n📋 Creating FTS5 table for templates...'); 38 | db.exec(` 39 | CREATE VIRTUAL TABLE IF NOT EXISTS templates_fts USING fts5( 40 | name, description, content=templates 41 | ); 42 | `); 43 | 44 | // Create triggers to keep FTS5 in sync 45 | console.log('🔗 Creating synchronization triggers...'); 46 | 47 | db.exec(` 48 | CREATE TRIGGER IF NOT EXISTS templates_ai AFTER INSERT ON templates BEGIN 49 | INSERT INTO templates_fts(rowid, name, description) 50 | VALUES (new.id, new.name, new.description); 51 | END; 52 | `); 53 | 54 | db.exec(` 55 | CREATE TRIGGER IF NOT EXISTS templates_au AFTER UPDATE ON templates BEGIN 56 | UPDATE templates_fts SET name = new.name, description = new.description 57 | WHERE rowid = new.id; 58 | END; 59 | `); 60 | 61 | db.exec(` 62 | CREATE TRIGGER IF NOT EXISTS templates_ad AFTER DELETE ON templates BEGIN 63 | DELETE FROM templates_fts WHERE rowid = old.id; 64 | END; 65 | `); 66 | 67 | // Rebuild FTS5 index from existing data 68 | console.log('🔄 Rebuilding FTS5 index from existing templates...'); 69 | 70 | // Clear existing FTS data 71 | db.exec('DELETE FROM templates_fts'); 72 | 73 | // Repopulate from templates table 74 | db.exec(` 75 | INSERT INTO templates_fts(rowid, name, description) 76 | SELECT id, name, description FROM templates 77 | `); 78 | 79 | // Get counts 80 | const templateCount = db.prepare('SELECT COUNT(*) as count FROM templates').get() as { count: number }; 81 | const ftsCount = db.prepare('SELECT COUNT(*) as count FROM templates_fts').get() as { count: number }; 82 | 83 | console.log(`\n✅ FTS5 pre-build complete!`); 84 | console.log(` Templates: ${templateCount.count}`); 85 | console.log(` FTS5 entries: ${ftsCount.count}`); 86 | 87 | // Test FTS5 search 88 | console.log('\n🧪 Testing FTS5 search...'); 89 | const testResults = db.prepare(` 90 | SELECT COUNT(*) as count FROM templates t 91 | JOIN templates_fts ON t.id = templates_fts.rowid 92 | WHERE templates_fts MATCH 'webhook' 93 | `).get() as { count: number }; 94 | 95 | console.log(` Found ${testResults.count} templates matching "webhook"`); 96 | 97 | } catch (error) { 98 | console.error('❌ Error pre-building FTS5:', error); 99 | process.exit(1); 100 | } 101 | 102 | db.close(); 103 | console.log('\n✅ Database is ready for Docker deployment!'); 104 | } 105 | 106 | // Run if called directly 107 | if (require.main === module) { 108 | prebuildFTS5().catch(console.error); 109 | } 110 | 111 | export { prebuildFTS5 }; ``` -------------------------------------------------------------------------------- /scripts/test-telemetry-debug.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env npx tsx 2 | /** 3 | * Debug script for telemetry integration 4 | * Tests direct Supabase connection 5 | */ 6 | 7 | import { createClient } from '@supabase/supabase-js'; 8 | import dotenv from 'dotenv'; 9 | 10 | // Load environment variables 11 | dotenv.config(); 12 | 13 | async function debugTelemetry() { 14 | console.log('🔍 Debugging Telemetry Integration\n'); 15 | 16 | const supabaseUrl = process.env.SUPABASE_URL; 17 | const supabaseAnonKey = process.env.SUPABASE_ANON_KEY; 18 | 19 | if (!supabaseUrl || !supabaseAnonKey) { 20 | console.error('❌ Missing SUPABASE_URL or SUPABASE_ANON_KEY'); 21 | process.exit(1); 22 | } 23 | 24 | console.log('Environment:'); 25 | console.log(' URL:', supabaseUrl); 26 | console.log(' Key:', supabaseAnonKey.substring(0, 30) + '...'); 27 | 28 | // Create Supabase client 29 | const supabase = createClient(supabaseUrl, supabaseAnonKey, { 30 | auth: { 31 | persistSession: false, 32 | autoRefreshToken: false, 33 | } 34 | }); 35 | 36 | // Test 1: Direct insert to telemetry_events 37 | console.log('\n📝 Test 1: Direct insert to telemetry_events...'); 38 | const testEvent = { 39 | user_id: 'test-user-123', 40 | event: 'test_event', 41 | properties: { 42 | test: true, 43 | timestamp: new Date().toISOString() 44 | } 45 | }; 46 | 47 | const { data: eventData, error: eventError } = await supabase 48 | .from('telemetry_events') 49 | .insert([testEvent]) 50 | .select(); 51 | 52 | if (eventError) { 53 | console.error('❌ Event insert failed:', eventError); 54 | } else { 55 | console.log('✅ Event inserted successfully:', eventData); 56 | } 57 | 58 | // Test 2: Direct insert to telemetry_workflows 59 | console.log('\n📝 Test 2: Direct insert to telemetry_workflows...'); 60 | const testWorkflow = { 61 | user_id: 'test-user-123', 62 | workflow_hash: 'test-hash-' + Date.now(), 63 | node_count: 3, 64 | node_types: ['webhook', 'http', 'slack'], 65 | has_trigger: true, 66 | has_webhook: true, 67 | complexity: 'simple', 68 | sanitized_workflow: { 69 | nodes: [], 70 | connections: {} 71 | } 72 | }; 73 | 74 | const { data: workflowData, error: workflowError } = await supabase 75 | .from('telemetry_workflows') 76 | .insert([testWorkflow]) 77 | .select(); 78 | 79 | if (workflowError) { 80 | console.error('❌ Workflow insert failed:', workflowError); 81 | } else { 82 | console.log('✅ Workflow inserted successfully:', workflowData); 83 | } 84 | 85 | // Test 3: Try to read data (should fail with anon key due to RLS) 86 | console.log('\n📖 Test 3: Attempting to read data (should fail due to RLS)...'); 87 | const { data: readData, error: readError } = await supabase 88 | .from('telemetry_events') 89 | .select('*') 90 | .limit(1); 91 | 92 | if (readError) { 93 | console.log('✅ Read correctly blocked by RLS:', readError.message); 94 | } else { 95 | console.log('⚠️ Unexpected: Read succeeded (RLS may not be working):', readData); 96 | } 97 | 98 | // Test 4: Check table existence 99 | console.log('\n🔍 Test 4: Verifying tables exist...'); 100 | const { data: tables, error: tablesError } = await supabase 101 | .rpc('get_tables', { schema_name: 'public' }) 102 | .select('*'); 103 | 104 | if (tablesError) { 105 | // This is expected - the RPC function might not exist 106 | console.log('ℹ️ Cannot list tables (RPC function not available)'); 107 | } else { 108 | console.log('Tables found:', tables); 109 | } 110 | 111 | console.log('\n✨ Debug completed! Check your Supabase dashboard for the test data.'); 112 | console.log('Dashboard: https://supabase.com/dashboard/project/ydyufsohxdfpopqbubwk/editor'); 113 | } 114 | 115 | debugTelemetry().catch(error => { 116 | console.error('❌ Debug failed:', error); 117 | process.exit(1); 118 | }); ``` -------------------------------------------------------------------------------- /src/loaders/node-loader.ts: -------------------------------------------------------------------------------- ```typescript 1 | import path from 'path'; 2 | 3 | export interface LoadedNode { 4 | packageName: string; 5 | nodeName: string; 6 | NodeClass: any; 7 | } 8 | 9 | export class N8nNodeLoader { 10 | private readonly CORE_PACKAGES = [ 11 | { name: 'n8n-nodes-base', path: 'n8n-nodes-base' }, 12 | { name: '@n8n/n8n-nodes-langchain', path: '@n8n/n8n-nodes-langchain' } 13 | ]; 14 | 15 | async loadAllNodes(): Promise<LoadedNode[]> { 16 | const results: LoadedNode[] = []; 17 | 18 | for (const pkg of this.CORE_PACKAGES) { 19 | try { 20 | console.log(`\n📦 Loading package: ${pkg.name} from ${pkg.path}`); 21 | // Use the path property to locate the package 22 | const packageJson = require(`${pkg.path}/package.json`); 23 | console.log(` Found ${Object.keys(packageJson.n8n?.nodes || {}).length} nodes in package.json`); 24 | const nodes = await this.loadPackageNodes(pkg.name, pkg.path, packageJson); 25 | results.push(...nodes); 26 | } catch (error) { 27 | console.error(`Failed to load ${pkg.name}:`, error); 28 | } 29 | } 30 | 31 | return results; 32 | } 33 | 34 | private async loadPackageNodes(packageName: string, packagePath: string, packageJson: any): Promise<LoadedNode[]> { 35 | const n8nConfig = packageJson.n8n || {}; 36 | const nodes: LoadedNode[] = []; 37 | 38 | // Check if nodes is an array or object 39 | const nodesList = n8nConfig.nodes || []; 40 | 41 | if (Array.isArray(nodesList)) { 42 | // Handle array format (n8n-nodes-base uses this) 43 | for (const nodePath of nodesList) { 44 | try { 45 | const fullPath = require.resolve(`${packagePath}/${nodePath}`); 46 | const nodeModule = require(fullPath); 47 | 48 | // Extract node name from path (e.g., "dist/nodes/Slack/Slack.node.js" -> "Slack") 49 | const nodeNameMatch = nodePath.match(/\/([^\/]+)\.node\.(js|ts)$/); 50 | const nodeName = nodeNameMatch ? nodeNameMatch[1] : path.basename(nodePath, '.node.js'); 51 | 52 | // Handle default export and various export patterns 53 | const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0]; 54 | if (NodeClass) { 55 | nodes.push({ packageName, nodeName, NodeClass }); 56 | console.log(` ✓ Loaded ${nodeName} from ${packageName}`); 57 | } else { 58 | console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`); 59 | } 60 | } catch (error) { 61 | console.error(` ✗ Failed to load node from ${packageName}/${nodePath}:`, (error as Error).message); 62 | } 63 | } 64 | } else { 65 | // Handle object format (for other packages) 66 | for (const [nodeName, nodePath] of Object.entries(nodesList)) { 67 | try { 68 | const fullPath = require.resolve(`${packagePath}/${nodePath as string}`); 69 | const nodeModule = require(fullPath); 70 | 71 | // Handle default export and various export patterns 72 | const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0]; 73 | if (NodeClass) { 74 | nodes.push({ packageName, nodeName, NodeClass }); 75 | console.log(` ✓ Loaded ${nodeName} from ${packageName}`); 76 | } else { 77 | console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`); 78 | } 79 | } catch (error) { 80 | console.error(` ✗ Failed to load node ${nodeName} from ${packageName}:`, (error as Error).message); 81 | } 82 | } 83 | } 84 | 85 | return nodes; 86 | } 87 | } ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/workflow_management/n8n-create-workflow.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const n8nCreateWorkflowDoc: ToolDocumentation = { 4 | name: 'n8n_create_workflow', 5 | category: 'workflow_management', 6 | essentials: { 7 | description: 'Create workflow. Requires: name, nodes[], connections{}. Created inactive. Returns workflow with ID.', 8 | keyParameters: ['name', 'nodes', 'connections'], 9 | example: 'n8n_create_workflow({name: "My Flow", nodes: [...], connections: {...}})', 10 | performance: 'Network-dependent', 11 | tips: [ 12 | 'Workflow created inactive', 13 | 'Returns ID for future updates', 14 | 'Validate first with validate_workflow' 15 | ] 16 | }, 17 | full: { 18 | description: 'Creates a new workflow in n8n with specified nodes and connections. Workflow is created in inactive state. Each node requires: id, name, type, typeVersion, position, and parameters.', 19 | parameters: { 20 | name: { type: 'string', required: true, description: 'Workflow name' }, 21 | nodes: { type: 'array', required: true, description: 'Array of nodes with id, name, type, typeVersion, position, parameters' }, 22 | connections: { type: 'object', required: true, description: 'Node connections. Keys are source node IDs' }, 23 | settings: { type: 'object', description: 'Optional workflow settings (timezone, error handling, etc.)' } 24 | }, 25 | returns: 'Created workflow object with id, name, nodes, connections, active status', 26 | examples: [ 27 | `// Basic webhook to Slack workflow 28 | n8n_create_workflow({ 29 | name: "Webhook to Slack", 30 | nodes: [ 31 | { 32 | id: "webhook_1", 33 | name: "Webhook", 34 | type: "n8n-nodes-base.webhook", 35 | typeVersion: 1, 36 | position: [250, 300], 37 | parameters: { 38 | httpMethod: "POST", 39 | path: "slack-notify" 40 | } 41 | }, 42 | { 43 | id: "slack_1", 44 | name: "Slack", 45 | type: "n8n-nodes-base.slack", 46 | typeVersion: 1, 47 | position: [450, 300], 48 | parameters: { 49 | resource: "message", 50 | operation: "post", 51 | channel: "#general", 52 | text: "={{$json.message}}" 53 | } 54 | } 55 | ], 56 | connections: { 57 | "webhook_1": { 58 | "main": [[{node: "slack_1", type: "main", index: 0}]] 59 | } 60 | } 61 | })`, 62 | `// Workflow with settings and error handling 63 | n8n_create_workflow({ 64 | name: "Data Processing", 65 | nodes: [...], 66 | connections: {...}, 67 | settings: { 68 | timezone: "America/New_York", 69 | errorWorkflow: "error_handler_workflow_id", 70 | saveDataSuccessExecution: "all", 71 | saveDataErrorExecution: "all" 72 | } 73 | })` 74 | ], 75 | useCases: [ 76 | 'Deploy validated workflows', 77 | 'Automate workflow creation', 78 | 'Clone workflow structures', 79 | 'Template deployment' 80 | ], 81 | performance: 'Network-dependent - Typically 100-500ms depending on workflow size', 82 | bestPractices: [ 83 | 'Validate with validate_workflow first', 84 | 'Use unique node IDs', 85 | 'Position nodes for readability', 86 | 'Test with n8n_trigger_webhook_workflow' 87 | ], 88 | pitfalls: [ 89 | '**REQUIRES N8N_API_URL and N8N_API_KEY environment variables** - tool unavailable without n8n API access', 90 | 'Workflows created in INACTIVE state - must activate separately', 91 | 'Node IDs must be unique within workflow', 92 | 'Credentials must be configured separately in n8n', 93 | 'Node type names must include package prefix (e.g., "n8n-nodes-base.slack")' 94 | ], 95 | relatedTools: ['validate_workflow', 'n8n_update_partial_workflow', 'n8n_trigger_webhook_workflow'] 96 | } 97 | }; ``` -------------------------------------------------------------------------------- /src/utils/url-detector.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { Request } from 'express'; 2 | import { logger } from './logger'; 3 | 4 | /** 5 | * Validates a hostname to prevent header injection attacks 6 | */ 7 | function isValidHostname(host: string): boolean { 8 | // Allow alphanumeric, dots, hyphens, and optional port 9 | return /^[a-zA-Z0-9.-]+(:[0-9]+)?$/.test(host) && host.length < 256; 10 | } 11 | 12 | /** 13 | * Validates a URL string 14 | */ 15 | function isValidUrl(url: string): boolean { 16 | try { 17 | const parsed = new URL(url); 18 | // Only allow http and https protocols 19 | return parsed.protocol === 'http:' || parsed.protocol === 'https:'; 20 | } catch { 21 | return false; 22 | } 23 | } 24 | 25 | /** 26 | * Detects the base URL for the server, considering: 27 | * 1. Explicitly configured BASE_URL or PUBLIC_URL 28 | * 2. Proxy headers (X-Forwarded-Proto, X-Forwarded-Host) 29 | * 3. Host and port configuration 30 | */ 31 | export function detectBaseUrl(req: Request | null, host: string, port: number): string { 32 | try { 33 | // 1. Check for explicitly configured URL 34 | const configuredUrl = process.env.BASE_URL || process.env.PUBLIC_URL; 35 | if (configuredUrl) { 36 | if (isValidUrl(configuredUrl)) { 37 | logger.debug('Using configured BASE_URL/PUBLIC_URL', { url: configuredUrl }); 38 | return configuredUrl.replace(/\/$/, ''); // Remove trailing slash 39 | } else { 40 | logger.warn('Invalid BASE_URL/PUBLIC_URL configured, falling back to auto-detection', { url: configuredUrl }); 41 | } 42 | } 43 | 44 | // 2. If we have a request, try to detect from proxy headers 45 | if (req && process.env.TRUST_PROXY && Number(process.env.TRUST_PROXY) > 0) { 46 | const proto = req.get('X-Forwarded-Proto') || req.protocol || 'http'; 47 | const forwardedHost = req.get('X-Forwarded-Host'); 48 | const hostHeader = req.get('Host'); 49 | 50 | const detectedHost = forwardedHost || hostHeader; 51 | if (detectedHost && isValidHostname(detectedHost)) { 52 | const baseUrl = `${proto}://${detectedHost}`; 53 | logger.debug('Detected URL from proxy headers', { 54 | proto, 55 | forwardedHost, 56 | hostHeader, 57 | baseUrl 58 | }); 59 | return baseUrl; 60 | } else if (detectedHost) { 61 | logger.warn('Invalid hostname detected in proxy headers, using fallback', { detectedHost }); 62 | } 63 | } 64 | 65 | // 3. Fall back to configured host and port 66 | const displayHost = host === '0.0.0.0' ? 'localhost' : host; 67 | const protocol = 'http'; // Default to http for local bindings 68 | 69 | // Don't show standard ports (for http only in this fallback case) 70 | const needsPort = port !== 80; 71 | const baseUrl = needsPort ? 72 | `${protocol}://${displayHost}:${port}` : 73 | `${protocol}://${displayHost}`; 74 | 75 | logger.debug('Using fallback URL from host/port', { 76 | host, 77 | displayHost, 78 | port, 79 | baseUrl 80 | }); 81 | 82 | return baseUrl; 83 | } catch (error) { 84 | logger.error('Error detecting base URL, using fallback', error); 85 | // Safe fallback 86 | return `http://localhost:${port}`; 87 | } 88 | } 89 | 90 | /** 91 | * Gets the base URL for console display during startup 92 | * This is used when we don't have a request object yet 93 | */ 94 | export function getStartupBaseUrl(host: string, port: number): string { 95 | return detectBaseUrl(null, host, port); 96 | } 97 | 98 | /** 99 | * Formats endpoint URLs for display 100 | */ 101 | export function formatEndpointUrls(baseUrl: string): { 102 | health: string; 103 | mcp: string; 104 | root: string; 105 | } { 106 | return { 107 | health: `${baseUrl}/health`, 108 | mcp: `${baseUrl}/mcp`, 109 | root: baseUrl 110 | }; 111 | } ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/system/tools-documentation.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const toolsDocumentationDoc: ToolDocumentation = { 4 | name: 'tools_documentation', 5 | category: 'system', 6 | essentials: { 7 | description: 'The meta-documentation tool. Returns documentation for any MCP tool, including itself. Call without parameters for a comprehensive overview of all available tools. This is your starting point for discovering n8n MCP capabilities.', 8 | keyParameters: ['topic', 'depth'], 9 | example: 'tools_documentation({topic: "search_nodes"})', 10 | performance: 'Instant (static content)', 11 | tips: [ 12 | 'Call without parameters first to see all tools', 13 | 'Can document itself: tools_documentation({topic: "tools_documentation"})', 14 | 'Use depth:"full" for comprehensive details' 15 | ] 16 | }, 17 | full: { 18 | description: 'The self-referential documentation system for all MCP tools. This tool can document any other tool, including itself. It\'s the primary discovery mechanism for understanding what tools are available and how to use them. Returns utilitarian documentation optimized for AI agent consumption.', 19 | parameters: { 20 | topic: { type: 'string', description: 'Tool name (e.g., "search_nodes"), special topic ("javascript_code_node_guide", "python_code_node_guide"), or "overview". Leave empty for quick reference.', required: false }, 21 | depth: { type: 'string', description: 'Level of detail: "essentials" (default, concise) or "full" (comprehensive with examples)', required: false } 22 | }, 23 | returns: 'Markdown-formatted documentation tailored for the requested tool and depth. For essentials: key info, parameters, example, tips. For full: complete details, all examples, use cases, best practices.', 24 | examples: [ 25 | '// Get started - see all available tools', 26 | 'tools_documentation()', 27 | '', 28 | '// Learn about a specific tool', 29 | 'tools_documentation({topic: "search_nodes"})', 30 | '', 31 | '// Get comprehensive details', 32 | 'tools_documentation({topic: "validate_workflow", depth: "full"})', 33 | '', 34 | '// Self-referential example - document this tool', 35 | 'tools_documentation({topic: "tools_documentation", depth: "full"})', 36 | '', 37 | '// Code node guides', 38 | 'tools_documentation({topic: "javascript_code_node_guide"})', 39 | 'tools_documentation({topic: "python_code_node_guide"})' 40 | ], 41 | useCases: [ 42 | 'Initial discovery of available MCP tools', 43 | 'Learning how to use specific tools', 44 | 'Finding required and optional parameters', 45 | 'Getting working examples to copy', 46 | 'Understanding tool performance characteristics', 47 | 'Discovering related tools for workflows' 48 | ], 49 | performance: 'Instant - all documentation is pre-loaded in memory', 50 | bestPractices: [ 51 | 'Always start with tools_documentation() to see available tools', 52 | 'Use essentials for quick parameter reference during coding', 53 | 'Switch to full depth when debugging or learning new tools', 54 | 'Check Code node guides when working with Code nodes' 55 | ], 56 | pitfalls: [ 57 | 'Tool names must match exactly - use the overview to find correct names', 58 | 'Not all internal functions are documented', 59 | 'Special topics (code guides) require exact names' 60 | ], 61 | relatedTools: ['n8n_list_available_tools for dynamic tool discovery', 'list_tasks for common configurations', 'get_database_statistics to verify MCP connection'] 62 | } 63 | }; ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/templates/get-template.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const getTemplateDoc: ToolDocumentation = { 4 | name: 'get_template', 5 | category: 'templates', 6 | essentials: { 7 | description: 'Get complete workflow JSON by ID. Ready to import. IDs from list_node_templates or search_templates.', 8 | keyParameters: ['templateId'], 9 | example: 'get_template({templateId: 1234})', 10 | performance: 'Fast (<100ms) - single database lookup', 11 | tips: [ 12 | 'Get template IDs from list_node_templates or search_templates first', 13 | 'Returns complete workflow JSON ready for import into n8n', 14 | 'Includes all nodes, connections, and settings' 15 | ] 16 | }, 17 | full: { 18 | description: `Retrieves the complete workflow JSON for a specific template by its ID. The returned workflow can be directly imported into n8n through the UI or API. This tool fetches pre-built workflows from the community template library containing 399+ curated workflows.`, 19 | parameters: { 20 | templateId: { 21 | type: 'number', 22 | required: true, 23 | description: 'The numeric ID of the template to retrieve. Get IDs from list_node_templates or search_templates' 24 | } 25 | }, 26 | returns: `Returns an object containing: 27 | - template: Complete template information including workflow JSON 28 | - id: Template ID 29 | - name: Template name 30 | - description: What the workflow does 31 | - author: Creator information (name, username, verified status) 32 | - nodes: Array of node types used 33 | - views: Number of times viewed 34 | - created: Creation date 35 | - url: Link to template on n8n.io 36 | - workflow: Complete workflow JSON with structure: 37 | - nodes: Array of node objects (id, name, type, typeVersion, position, parameters) 38 | - connections: Object mapping source nodes to targets 39 | - settings: Workflow configuration (timezone, error handling, etc.) 40 | - usage: Instructions for using the workflow`, 41 | examples: [ 42 | 'get_template({templateId: 1234}) - Get Slack notification workflow', 43 | 'get_template({templateId: 5678}) - Get data sync workflow', 44 | 'get_template({templateId: 9012}) - Get AI chatbot workflow' 45 | ], 46 | useCases: [ 47 | 'Download workflows for direct import into n8n', 48 | 'Study workflow patterns and best practices', 49 | 'Get complete workflow JSON for customization', 50 | 'Clone popular workflows for your use case', 51 | 'Learn how complex automations are built' 52 | ], 53 | performance: `Fast performance with single database lookup: 54 | - Query time: <10ms for template retrieval 55 | - Workflow JSON parsing: <50ms 56 | - Total response time: <100ms 57 | - No network calls (uses local cache)`, 58 | bestPractices: [ 59 | 'Always check if template exists before attempting modifications', 60 | 'Review workflow nodes before importing to ensure compatibility', 61 | 'Save template JSON locally if planning multiple customizations', 62 | 'Check template creation date for most recent patterns', 63 | 'Verify all required credentials are configured before import' 64 | ], 65 | pitfalls: [ 66 | 'Template IDs change when database is refreshed', 67 | 'Some templates may use deprecated node versions', 68 | 'Credentials in templates are placeholders - configure your own', 69 | 'Not all templates work with all n8n versions', 70 | 'Template may reference external services you don\'t have access to' 71 | ], 72 | relatedTools: ['list_node_templates', 'search_templates', 'get_templates_for_task', 'n8n_create_workflow'] 73 | } 74 | }; ``` -------------------------------------------------------------------------------- /tests/auth.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, vi, beforeEach } from 'vitest'; 2 | import { AuthManager } from '../src/utils/auth'; 3 | 4 | describe('AuthManager', () => { 5 | let authManager: AuthManager; 6 | 7 | beforeEach(() => { 8 | authManager = new AuthManager(); 9 | }); 10 | 11 | describe('validateToken', () => { 12 | it('should return true when no authentication is required', () => { 13 | expect(authManager.validateToken('any-token')).toBe(true); 14 | expect(authManager.validateToken(undefined)).toBe(true); 15 | }); 16 | 17 | it('should validate static token correctly', () => { 18 | const expectedToken = 'secret-token'; 19 | 20 | expect(authManager.validateToken('secret-token', expectedToken)).toBe(true); 21 | expect(authManager.validateToken('wrong-token', expectedToken)).toBe(false); 22 | expect(authManager.validateToken(undefined, expectedToken)).toBe(false); 23 | }); 24 | 25 | it('should validate generated tokens', () => { 26 | const token = authManager.generateToken(1); 27 | 28 | expect(authManager.validateToken(token, 'expected-token')).toBe(true); 29 | }); 30 | 31 | it('should reject expired tokens', () => { 32 | vi.useFakeTimers(); 33 | 34 | const token = authManager.generateToken(1); // 1 hour expiry 35 | 36 | // Token should be valid initially 37 | expect(authManager.validateToken(token, 'expected-token')).toBe(true); 38 | 39 | // Fast forward 2 hours 40 | vi.advanceTimersByTime(2 * 60 * 60 * 1000); 41 | 42 | // Token should be expired 43 | expect(authManager.validateToken(token, 'expected-token')).toBe(false); 44 | 45 | vi.useRealTimers(); 46 | }); 47 | }); 48 | 49 | describe('generateToken', () => { 50 | it('should generate unique tokens', () => { 51 | const token1 = authManager.generateToken(); 52 | const token2 = authManager.generateToken(); 53 | 54 | expect(token1).not.toBe(token2); 55 | expect(token1).toHaveLength(64); // 32 bytes hex = 64 chars 56 | }); 57 | 58 | it('should set custom expiry time', () => { 59 | vi.useFakeTimers(); 60 | 61 | const token = authManager.generateToken(24); // 24 hours 62 | 63 | // Token should be valid after 23 hours 64 | vi.advanceTimersByTime(23 * 60 * 60 * 1000); 65 | expect(authManager.validateToken(token, 'expected')).toBe(true); 66 | 67 | // Token should expire after 25 hours 68 | vi.advanceTimersByTime(2 * 60 * 60 * 1000); 69 | expect(authManager.validateToken(token, 'expected')).toBe(false); 70 | 71 | vi.useRealTimers(); 72 | }); 73 | }); 74 | 75 | describe('revokeToken', () => { 76 | it('should revoke a generated token', () => { 77 | const token = authManager.generateToken(); 78 | 79 | expect(authManager.validateToken(token, 'expected')).toBe(true); 80 | 81 | authManager.revokeToken(token); 82 | 83 | expect(authManager.validateToken(token, 'expected')).toBe(false); 84 | }); 85 | }); 86 | 87 | describe('static methods', () => { 88 | it('should hash tokens consistently', () => { 89 | const token = 'my-secret-token'; 90 | const hash1 = AuthManager.hashToken(token); 91 | const hash2 = AuthManager.hashToken(token); 92 | 93 | expect(hash1).toBe(hash2); 94 | expect(hash1).toHaveLength(64); // SHA256 hex = 64 chars 95 | }); 96 | 97 | it('should compare tokens securely', () => { 98 | const token = 'my-secret-token'; 99 | const hashedToken = AuthManager.hashToken(token); 100 | 101 | expect(AuthManager.compareTokens(token, hashedToken)).toBe(true); 102 | expect(AuthManager.compareTokens('wrong-token', hashedToken)).toBe(false); 103 | }); 104 | }); 105 | }); ``` -------------------------------------------------------------------------------- /tests/unit/utils/simple-cache-memory-leak-fix.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest'; 2 | import { SimpleCache } from '../../../src/utils/simple-cache'; 3 | 4 | describe('SimpleCache Memory Leak Fix', () => { 5 | let cache: SimpleCache; 6 | 7 | beforeEach(() => { 8 | vi.useFakeTimers(); 9 | }); 10 | 11 | afterEach(() => { 12 | if (cache && typeof cache.destroy === 'function') { 13 | cache.destroy(); 14 | } 15 | vi.restoreAllMocks(); 16 | }); 17 | 18 | it('should track cleanup timer', () => { 19 | cache = new SimpleCache(); 20 | // Access private property for testing 21 | expect((cache as any).cleanupTimer).toBeDefined(); 22 | expect((cache as any).cleanupTimer).not.toBeNull(); 23 | }); 24 | 25 | it('should clear timer on destroy', () => { 26 | cache = new SimpleCache(); 27 | const timer = (cache as any).cleanupTimer; 28 | 29 | cache.destroy(); 30 | 31 | expect((cache as any).cleanupTimer).toBeNull(); 32 | // Verify timer was cleared 33 | expect(() => clearInterval(timer)).not.toThrow(); 34 | }); 35 | 36 | it('should clear cache on destroy', () => { 37 | cache = new SimpleCache(); 38 | cache.set('test-key', 'test-value', 300); 39 | 40 | expect(cache.get('test-key')).toBe('test-value'); 41 | 42 | cache.destroy(); 43 | 44 | expect(cache.get('test-key')).toBeNull(); 45 | }); 46 | 47 | it('should handle multiple destroy calls safely', () => { 48 | cache = new SimpleCache(); 49 | 50 | expect(() => { 51 | cache.destroy(); 52 | cache.destroy(); 53 | cache.destroy(); 54 | }).not.toThrow(); 55 | 56 | expect((cache as any).cleanupTimer).toBeNull(); 57 | }); 58 | 59 | it('should not create new timers after destroy', () => { 60 | cache = new SimpleCache(); 61 | const originalTimer = (cache as any).cleanupTimer; 62 | 63 | cache.destroy(); 64 | 65 | // Try to use the cache after destroy 66 | cache.set('key', 'value'); 67 | cache.get('key'); 68 | cache.clear(); 69 | 70 | // Timer should still be null 71 | expect((cache as any).cleanupTimer).toBeNull(); 72 | expect((cache as any).cleanupTimer).not.toBe(originalTimer); 73 | }); 74 | 75 | it('should clean up expired entries periodically', () => { 76 | cache = new SimpleCache(); 77 | 78 | // Set items with different TTLs 79 | cache.set('short', 'value1', 1); // 1 second 80 | cache.set('long', 'value2', 300); // 300 seconds 81 | 82 | // Advance time by 2 seconds 83 | vi.advanceTimersByTime(2000); 84 | 85 | // Advance time to trigger cleanup (60 seconds) 86 | vi.advanceTimersByTime(58000); 87 | 88 | // Short-lived item should be gone 89 | expect(cache.get('short')).toBeNull(); 90 | // Long-lived item should still exist 91 | expect(cache.get('long')).toBe('value2'); 92 | }); 93 | 94 | it('should prevent memory leak by clearing timer', () => { 95 | const timers: NodeJS.Timeout[] = []; 96 | const originalSetInterval = global.setInterval; 97 | 98 | // Mock setInterval to track created timers 99 | global.setInterval = vi.fn((callback, delay) => { 100 | const timer = originalSetInterval(callback, delay); 101 | timers.push(timer); 102 | return timer; 103 | }); 104 | 105 | // Create and destroy multiple caches 106 | for (let i = 0; i < 5; i++) { 107 | const tempCache = new SimpleCache(); 108 | tempCache.set(`key${i}`, `value${i}`); 109 | tempCache.destroy(); 110 | } 111 | 112 | // All timers should have been cleared 113 | expect(timers.length).toBe(5); 114 | 115 | // Restore original setInterval 116 | global.setInterval = originalSetInterval; 117 | }); 118 | 119 | it('should have destroy method defined', () => { 120 | cache = new SimpleCache(); 121 | expect(typeof cache.destroy).toBe('function'); 122 | }); 123 | }); ``` -------------------------------------------------------------------------------- /src/telemetry/telemetry-types.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Telemetry Types and Interfaces 3 | * Centralized type definitions for the telemetry system 4 | */ 5 | 6 | import { StartupCheckpoint } from './startup-checkpoints'; 7 | 8 | export interface TelemetryEvent { 9 | user_id: string; 10 | event: string; 11 | properties: Record<string, any>; 12 | created_at?: string; 13 | } 14 | 15 | /** 16 | * Startup error event - captures pre-handshake failures 17 | */ 18 | export interface StartupErrorEvent extends TelemetryEvent { 19 | event: 'startup_error'; 20 | properties: { 21 | checkpoint: StartupCheckpoint; 22 | errorMessage: string; 23 | errorType: string; 24 | checkpointsPassed: StartupCheckpoint[]; 25 | checkpointsPassedCount: number; 26 | startupDuration: number; 27 | platform: string; 28 | arch: string; 29 | nodeVersion: string; 30 | isDocker: boolean; 31 | }; 32 | } 33 | 34 | /** 35 | * Startup completed event - confirms server is functional 36 | */ 37 | export interface StartupCompletedEvent extends TelemetryEvent { 38 | event: 'startup_completed'; 39 | properties: { 40 | version: string; 41 | }; 42 | } 43 | 44 | /** 45 | * Enhanced session start properties with startup tracking 46 | */ 47 | export interface SessionStartProperties { 48 | version: string; 49 | platform: string; 50 | arch: string; 51 | nodeVersion: string; 52 | isDocker: boolean; 53 | cloudPlatform: string | null; 54 | // NEW: Startup tracking fields (v2.18.2) 55 | startupDurationMs?: number; 56 | checkpointsPassed?: StartupCheckpoint[]; 57 | startupErrorCount?: number; 58 | } 59 | 60 | export interface WorkflowTelemetry { 61 | user_id: string; 62 | workflow_hash: string; 63 | node_count: number; 64 | node_types: string[]; 65 | has_trigger: boolean; 66 | has_webhook: boolean; 67 | complexity: 'simple' | 'medium' | 'complex'; 68 | sanitized_workflow: any; 69 | created_at?: string; 70 | } 71 | 72 | export interface SanitizedWorkflow { 73 | nodes: any[]; 74 | connections: any; 75 | nodeCount: number; 76 | nodeTypes: string[]; 77 | hasTrigger: boolean; 78 | hasWebhook: boolean; 79 | complexity: 'simple' | 'medium' | 'complex'; 80 | workflowHash: string; 81 | } 82 | 83 | export const TELEMETRY_CONFIG = { 84 | // Batch processing 85 | BATCH_FLUSH_INTERVAL: 5000, // 5 seconds 86 | EVENT_QUEUE_THRESHOLD: 10, // Batch events for efficiency 87 | WORKFLOW_QUEUE_THRESHOLD: 5, // Batch workflows 88 | 89 | // Retry logic 90 | MAX_RETRIES: 3, 91 | RETRY_DELAY: 1000, // 1 second base delay 92 | OPERATION_TIMEOUT: 5000, // 5 seconds 93 | 94 | // Rate limiting 95 | RATE_LIMIT_WINDOW: 60000, // 1 minute 96 | RATE_LIMIT_MAX_EVENTS: 100, // Max events per window 97 | 98 | // Queue limits 99 | MAX_QUEUE_SIZE: 1000, // Maximum events to queue 100 | MAX_BATCH_SIZE: 50, // Maximum events per batch 101 | } as const; 102 | 103 | export const TELEMETRY_BACKEND = { 104 | URL: 'https://ydyufsohxdfpopqbubwk.supabase.co', 105 | ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTg3OTYyMDAsImV4cCI6MjA3NDM3MjIwMH0.xESphg6h5ozaDsm4Vla3QnDJGc6Nc_cpfoqTHRynkCk' 106 | } as const; 107 | 108 | export interface TelemetryMetrics { 109 | eventsTracked: number; 110 | eventsDropped: number; 111 | eventsFailed: number; 112 | batchesSent: number; 113 | batchesFailed: number; 114 | averageFlushTime: number; 115 | lastFlushTime?: number; 116 | rateLimitHits: number; 117 | } 118 | 119 | export enum TelemetryErrorType { 120 | VALIDATION_ERROR = 'VALIDATION_ERROR', 121 | NETWORK_ERROR = 'NETWORK_ERROR', 122 | RATE_LIMIT_ERROR = 'RATE_LIMIT_ERROR', 123 | QUEUE_OVERFLOW_ERROR = 'QUEUE_OVERFLOW_ERROR', 124 | INITIALIZATION_ERROR = 'INITIALIZATION_ERROR', 125 | UNKNOWN_ERROR = 'UNKNOWN_ERROR' 126 | } 127 | 128 | export interface TelemetryErrorContext { 129 | type: TelemetryErrorType; 130 | message: string; 131 | context?: Record<string, any>; 132 | timestamp: number; 133 | retryable: boolean; 134 | } ``` -------------------------------------------------------------------------------- /examples/enhanced-documentation-demo.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | const { DocumentationFetcher } = require('../dist/utils/documentation-fetcher'); 4 | 5 | async function demonstrateEnhancedDocumentation() { 6 | console.log('🎯 Enhanced Documentation Demo\n'); 7 | 8 | const fetcher = new DocumentationFetcher(); 9 | const nodeType = 'n8n-nodes-base.slack'; 10 | 11 | console.log(`Fetching enhanced documentation for: ${nodeType}\n`); 12 | 13 | try { 14 | const doc = await fetcher.getEnhancedNodeDocumentation(nodeType); 15 | 16 | if (!doc) { 17 | console.log('No documentation found for this node.'); 18 | return; 19 | } 20 | 21 | // Display title and description 22 | console.log('📄 Basic Information:'); 23 | console.log(`Title: ${doc.title || 'N/A'}`); 24 | console.log(`URL: ${doc.url}`); 25 | console.log(`Description: ${doc.description || 'See documentation for details'}\n`); 26 | 27 | // Display operations 28 | if (doc.operations && doc.operations.length > 0) { 29 | console.log('⚙️ Available Operations:'); 30 | // Group by resource 31 | const resourceMap = new Map(); 32 | doc.operations.forEach(op => { 33 | if (!resourceMap.has(op.resource)) { 34 | resourceMap.set(op.resource, []); 35 | } 36 | resourceMap.get(op.resource).push(op); 37 | }); 38 | 39 | resourceMap.forEach((ops, resource) => { 40 | console.log(`\n ${resource}:`); 41 | ops.forEach(op => { 42 | console.log(` - ${op.operation}: ${op.description}`); 43 | }); 44 | }); 45 | console.log(''); 46 | } 47 | 48 | // Display API methods 49 | if (doc.apiMethods && doc.apiMethods.length > 0) { 50 | console.log('🔌 API Method Mappings (first 5):'); 51 | doc.apiMethods.slice(0, 5).forEach(method => { 52 | console.log(` ${method.resource}.${method.operation} → ${method.apiMethod}`); 53 | if (method.apiUrl) { 54 | console.log(` Documentation: ${method.apiUrl}`); 55 | } 56 | }); 57 | console.log(` ... and ${Math.max(0, doc.apiMethods.length - 5)} more\n`); 58 | } 59 | 60 | // Display templates 61 | if (doc.templates && doc.templates.length > 0) { 62 | console.log('📋 Available Templates:'); 63 | doc.templates.forEach(template => { 64 | console.log(` - ${template.name}`); 65 | if (template.description) { 66 | console.log(` ${template.description}`); 67 | } 68 | }); 69 | console.log(''); 70 | } 71 | 72 | // Display related resources 73 | if (doc.relatedResources && doc.relatedResources.length > 0) { 74 | console.log('🔗 Related Resources:'); 75 | doc.relatedResources.forEach(resource => { 76 | console.log(` - ${resource.title} (${resource.type})`); 77 | console.log(` ${resource.url}`); 78 | }); 79 | console.log(''); 80 | } 81 | 82 | // Display required scopes 83 | if (doc.requiredScopes && doc.requiredScopes.length > 0) { 84 | console.log('🔐 Required Scopes:'); 85 | doc.requiredScopes.forEach(scope => { 86 | console.log(` - ${scope}`); 87 | }); 88 | console.log(''); 89 | } 90 | 91 | // Display summary 92 | console.log('📊 Summary:'); 93 | console.log(` - Total operations: ${doc.operations?.length || 0}`); 94 | console.log(` - Total API methods: ${doc.apiMethods?.length || 0}`); 95 | console.log(` - Code examples: ${doc.examples?.length || 0}`); 96 | console.log(` - Templates: ${doc.templates?.length || 0}`); 97 | console.log(` - Related resources: ${doc.relatedResources?.length || 0}`); 98 | 99 | } catch (error) { 100 | console.error('Error:', error.message); 101 | } finally { 102 | await fetcher.cleanup(); 103 | } 104 | } 105 | 106 | // Run demo 107 | demonstrateEnhancedDocumentation().catch(console.error); ``` -------------------------------------------------------------------------------- /scripts/publish-npm.sh: -------------------------------------------------------------------------------- ```bash 1 | #!/bin/bash 2 | # Script to publish n8n-mcp with runtime-only dependencies 3 | 4 | set -e 5 | 6 | # Color codes for output 7 | RED='\033[0;31m' 8 | GREEN='\033[0;32m' 9 | YELLOW='\033[1;33m' 10 | NC='\033[0m' # No Color 11 | 12 | echo "🚀 Preparing n8n-mcp for npm publish..." 13 | 14 | # Skip tests - they already run in CI before merge/publish 15 | echo "⏭️ Skipping tests (already verified in CI)" 16 | 17 | # Sync version to runtime package first 18 | echo "🔄 Syncing version to package.runtime.json..." 19 | npm run sync:runtime-version 20 | 21 | # Get version from main package.json 22 | VERSION=$(node -e "console.log(require('./package.json').version)") 23 | echo -e "${GREEN}📌 Version: $VERSION${NC}" 24 | 25 | # Check if dist directory exists 26 | if [ ! -d "dist" ]; then 27 | echo -e "${RED}❌ Error: dist directory not found. Run 'npm run build' first.${NC}" 28 | exit 1 29 | fi 30 | 31 | # Check if database exists 32 | if [ ! -f "data/nodes.db" ]; then 33 | echo -e "${RED}❌ Error: data/nodes.db not found. Run 'npm run rebuild' first.${NC}" 34 | exit 1 35 | fi 36 | 37 | # Create a temporary publish directory 38 | PUBLISH_DIR="npm-publish-temp" 39 | rm -rf $PUBLISH_DIR 40 | mkdir -p $PUBLISH_DIR 41 | 42 | # Copy necessary files 43 | echo "📦 Copying files..." 44 | cp -r dist $PUBLISH_DIR/ 45 | cp -r data $PUBLISH_DIR/ 46 | cp README.md $PUBLISH_DIR/ 47 | cp LICENSE $PUBLISH_DIR/ 48 | cp .env.example $PUBLISH_DIR/ 49 | cp .npmignore $PUBLISH_DIR/ 2>/dev/null || true 50 | 51 | # Use runtime package.json (already has correct version from sync) 52 | echo "📋 Using runtime-only dependencies..." 53 | cp package.runtime.json $PUBLISH_DIR/package.json 54 | 55 | cd $PUBLISH_DIR 56 | 57 | # Add required fields from main package.json 58 | node -e " 59 | const pkg = require('./package.json'); 60 | pkg.name = 'n8n-mcp'; 61 | pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)'; 62 | pkg.main = 'dist/index.js'; 63 | pkg.types = 'dist/index.d.ts'; 64 | pkg.exports = { 65 | '.': { 66 | types: './dist/index.d.ts', 67 | require: './dist/index.js', 68 | import: './dist/index.js' 69 | } 70 | }; 71 | pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' }; 72 | pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' }; 73 | pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation']; 74 | pkg.author = 'Romuald Czlonkowski @ www.aiadvisors.pl/en'; 75 | pkg.license = 'MIT'; 76 | pkg.bugs = { url: 'https://github.com/czlonkowski/n8n-mcp/issues' }; 77 | pkg.homepage = 'https://github.com/czlonkowski/n8n-mcp#readme'; 78 | pkg.files = ['dist/**/*', 'data/nodes.db', '.env.example', 'README.md', 'LICENSE']; 79 | // Note: node_modules are automatically included for dependencies 80 | delete pkg.private; // Remove private field so we can publish 81 | require('fs').writeFileSync('./package.json', JSON.stringify(pkg, null, 2)); 82 | " 83 | 84 | echo "" 85 | echo "📋 Package details:" 86 | echo -e "${GREEN}Name:${NC} $(node -e "console.log(require('./package.json').name)")" 87 | echo -e "${GREEN}Version:${NC} $(node -e "console.log(require('./package.json').version)")" 88 | echo -e "${GREEN}Size:${NC} ~50MB (vs 1GB+ with dev dependencies)" 89 | echo -e "${GREEN}Runtime deps:${NC} 8 packages" 90 | 91 | echo "" 92 | echo "✅ Ready to publish!" 93 | echo "" 94 | echo -e "${YELLOW}⚠️ Important: npm publishing requires OTP authentication${NC}" 95 | echo "" 96 | echo "To publish, run:" 97 | echo -e " ${GREEN}cd $PUBLISH_DIR${NC}" 98 | echo -e " ${GREEN}npm publish --otp=YOUR_OTP_CODE${NC}" 99 | echo "" 100 | echo "After publishing, clean up with:" 101 | echo -e " ${GREEN}cd ..${NC}" 102 | echo -e " ${GREEN}rm -rf $PUBLISH_DIR${NC}" 103 | echo "" 104 | echo "📝 Notes:" 105 | echo " - Get your OTP from your authenticator app" 106 | echo " - The package will be available at https://www.npmjs.com/package/n8n-mcp" 107 | echo " - Users can run 'npx n8n-mcp' immediately after publish" ``` -------------------------------------------------------------------------------- /scripts/extract-nodes-docker.sh: -------------------------------------------------------------------------------- ```bash 1 | #!/bin/bash 2 | set -e 3 | 4 | echo "🐳 n8n Node Extraction via Docker" 5 | echo "=================================" 6 | 7 | # Colors for output 8 | GREEN='\033[0;32m' 9 | YELLOW='\033[1;33m' 10 | RED='\033[0;31m' 11 | NC='\033[0m' # No Color 12 | 13 | # Function to print colored output 14 | print_status() { 15 | echo -e "${GREEN}[$(date +'%H:%M:%S')]${NC} $1" 16 | } 17 | 18 | print_warning() { 19 | echo -e "${YELLOW}[$(date +'%H:%M:%S')]${NC} ⚠️ $1" 20 | } 21 | 22 | print_error() { 23 | echo -e "${RED}[$(date +'%H:%M:%S')]${NC} ❌ $1" 24 | } 25 | 26 | # Check if Docker is running 27 | if ! docker info > /dev/null 2>&1; then 28 | print_error "Docker is not running. Please start Docker and try again." 29 | exit 1 30 | fi 31 | 32 | print_status "Docker is running ✅" 33 | 34 | # Clean up any existing containers 35 | print_status "Cleaning up existing containers..." 36 | docker-compose -f docker-compose.extract.yml down -v 2>/dev/null || true 37 | 38 | # Build the project first 39 | print_status "Building the project..." 40 | npm run build 41 | 42 | # Start the extraction process 43 | print_status "Starting n8n container to extract latest nodes..." 44 | docker-compose -f docker-compose.extract.yml up -d n8n-latest 45 | 46 | # Wait for n8n container to be healthy 47 | print_status "Waiting for n8n container to initialize..." 48 | ATTEMPTS=0 49 | MAX_ATTEMPTS=60 50 | 51 | while [ $ATTEMPTS -lt $MAX_ATTEMPTS ]; do 52 | if docker-compose -f docker-compose.extract.yml ps | grep -q "healthy"; then 53 | print_status "n8n container is ready ✅" 54 | break 55 | fi 56 | 57 | ATTEMPTS=$((ATTEMPTS + 1)) 58 | echo -n "." 59 | sleep 2 60 | done 61 | 62 | if [ $ATTEMPTS -eq $MAX_ATTEMPTS ]; then 63 | print_error "n8n container failed to become healthy" 64 | docker-compose -f docker-compose.extract.yml logs n8n-latest 65 | docker-compose -f docker-compose.extract.yml down -v 66 | exit 1 67 | fi 68 | 69 | # Run the extraction 70 | print_status "Running node extraction..." 71 | docker-compose -f docker-compose.extract.yml run --rm node-extractor 72 | 73 | # Check the results 74 | print_status "Checking extraction results..." 75 | if [ -f "./data/nodes-fresh.db" ]; then 76 | NODE_COUNT=$(sqlite3 ./data/nodes-fresh.db "SELECT COUNT(*) FROM nodes;" 2>/dev/null || echo "0") 77 | IF_VERSION=$(sqlite3 ./data/nodes-fresh.db "SELECT version FROM nodes WHERE name='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "not found") 78 | 79 | print_status "Extracted $NODE_COUNT nodes" 80 | print_status "If node version: $IF_VERSION" 81 | 82 | # Check if we got the If node source code and look for version 83 | IF_SOURCE=$(sqlite3 ./data/nodes-fresh.db "SELECT source_code FROM nodes WHERE name='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "") 84 | if [[ $IF_SOURCE =~ version:[[:space:]]*([0-9]+) ]]; then 85 | IF_CODE_VERSION="${BASH_REMATCH[1]}" 86 | print_status "If node version from source code: v$IF_CODE_VERSION" 87 | 88 | if [ "$IF_CODE_VERSION" -ge "2" ]; then 89 | print_status "✅ Successfully extracted latest If node (v$IF_CODE_VERSION)!" 90 | else 91 | print_warning "If node is still v$IF_CODE_VERSION, expected v2 or higher" 92 | fi 93 | fi 94 | else 95 | print_error "Database file not found after extraction" 96 | fi 97 | 98 | # Clean up 99 | print_status "Cleaning up Docker containers..." 100 | docker-compose -f docker-compose.extract.yml down -v 101 | 102 | print_status "✨ Extraction complete!" 103 | 104 | # Offer to restart the MCP server 105 | echo "" 106 | read -p "Would you like to restart the MCP server with the new nodes? (y/n) " -n 1 -r 107 | echo "" 108 | if [[ $REPLY =~ ^[Yy]$ ]]; then 109 | print_status "Restarting MCP server..." 110 | # Kill any existing server process 111 | pkill -f "node.*dist/index.js" || true 112 | 113 | # Start the server 114 | npm start & 115 | print_status "MCP server restarted with fresh node database" 116 | fi ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/templates/get-templates-for-task.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const getTemplatesForTaskDoc: ToolDocumentation = { 4 | name: 'get_templates_for_task', 5 | category: 'templates', 6 | essentials: { 7 | description: 'Curated templates by task: ai_automation, data_sync, webhooks, email, slack, data_transform, files, scheduling, api, database.', 8 | keyParameters: ['task'], 9 | example: 'get_templates_for_task({task: "slack_integration"})', 10 | performance: 'Fast (<100ms) - pre-categorized results', 11 | tips: [ 12 | 'Returns hand-picked templates for specific automation tasks', 13 | 'Use list_tasks to see all available task categories', 14 | 'Templates are curated for quality and relevance' 15 | ] 16 | }, 17 | full: { 18 | description: `Retrieves curated workflow templates for specific automation tasks. This tool provides hand-picked templates organized by common use cases, making it easy to find the right workflow for your needs. Each task category contains the most popular and effective templates for that particular automation scenario.`, 19 | parameters: { 20 | task: { 21 | type: 'string', 22 | required: true, 23 | description: 'The type of task to get templates for. Options: ai_automation, data_sync, webhook_processing, email_automation, slack_integration, data_transformation, file_processing, scheduling, api_integration, database_operations' 24 | } 25 | }, 26 | returns: `Returns an object containing: 27 | - task: The requested task type 28 | - templates: Array of curated templates 29 | - id: Template ID 30 | - name: Template name 31 | - description: What the workflow does 32 | - author: Creator information 33 | - nodes: Array of node types used 34 | - views: Popularity metric 35 | - created: Creation date 36 | - url: Link to template 37 | - totalFound: Number of templates in this category 38 | - availableTasks: List of all task categories (if no templates found)`, 39 | examples: [ 40 | 'get_templates_for_task({task: "slack_integration"}) - Get Slack automation workflows', 41 | 'get_templates_for_task({task: "ai_automation"}) - Get AI-powered workflows', 42 | 'get_templates_for_task({task: "data_sync"}) - Get data synchronization workflows', 43 | 'get_templates_for_task({task: "webhook_processing"}) - Get webhook handler workflows', 44 | 'get_templates_for_task({task: "email_automation"}) - Get email automation workflows' 45 | ], 46 | useCases: [ 47 | 'Find workflows for specific business needs', 48 | 'Discover best practices for common automations', 49 | 'Get started quickly with pre-built solutions', 50 | 'Learn patterns for specific integration types', 51 | 'Browse curated collections of quality workflows' 52 | ], 53 | performance: `Excellent performance with pre-categorized templates: 54 | - Query time: <10ms (indexed by task) 55 | - No filtering needed (pre-curated) 56 | - Returns 5-20 templates per category 57 | - Total response time: <100ms`, 58 | bestPractices: [ 59 | 'Start with task-based search for faster results', 60 | 'Review multiple templates to find best patterns', 61 | 'Check template age for most current approaches', 62 | 'Combine templates from same category for complex workflows', 63 | 'Use returned node lists to understand requirements' 64 | ], 65 | pitfalls: [ 66 | 'Not all tasks have many templates available', 67 | 'Task categories are predefined - no custom categories', 68 | 'Some templates may overlap between categories', 69 | 'Curation is subjective - browse all results', 70 | 'Templates may need updates for latest n8n features' 71 | ], 72 | relatedTools: ['search_templates', 'list_node_templates', 'get_template', 'list_tasks'] 73 | } 74 | }; ``` -------------------------------------------------------------------------------- /scripts/test-jmespath-validation.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env npx tsx 2 | 3 | import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator.js'; 4 | 5 | console.log('🧪 Testing JMESPath Validation\n'); 6 | 7 | const testCases = [ 8 | { 9 | name: 'JMESPath with unquoted numeric literal', 10 | config: { 11 | language: 'javaScript', 12 | jsCode: `const data = { users: [{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }] }; 13 | const adults = $jmespath(data, 'users[?age >= 18]'); 14 | return [{json: {adults}}];` 15 | }, 16 | expectError: true 17 | }, 18 | { 19 | name: 'JMESPath with properly quoted numeric literal', 20 | config: { 21 | language: 'javaScript', 22 | jsCode: `const data = { users: [{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }] }; 23 | const adults = $jmespath(data, 'users[?age >= \`18\`]'); 24 | return [{json: {adults}}];` 25 | }, 26 | expectError: false 27 | }, 28 | { 29 | name: 'Multiple JMESPath filters with unquoted numbers', 30 | config: { 31 | language: 'javaScript', 32 | jsCode: `const products = items.map(item => item.json); 33 | const expensive = $jmespath(products, '[?price > 100]'); 34 | const lowStock = $jmespath(products, '[?quantity < 10]'); 35 | const highPriority = $jmespath(products, '[?priority == 1]'); 36 | return [{json: {expensive, lowStock, highPriority}}];` 37 | }, 38 | expectError: true 39 | }, 40 | { 41 | name: 'JMESPath with string comparison (no backticks needed)', 42 | config: { 43 | language: 'javaScript', 44 | jsCode: `const data = { users: [{ name: 'John', status: 'active' }, { name: 'Jane', status: 'inactive' }] }; 45 | const activeUsers = $jmespath(data, 'users[?status == "active"]'); 46 | return [{json: {activeUsers}}];` 47 | }, 48 | expectError: false 49 | }, 50 | { 51 | name: 'Python JMESPath with unquoted numeric literal', 52 | config: { 53 | language: 'python', 54 | pythonCode: `data = { 'users': [{ 'name': 'John', 'age': 30 }, { 'name': 'Jane', 'age': 25 }] } 55 | adults = _jmespath(data, 'users[?age >= 18]') 56 | return [{'json': {'adults': adults}}]` 57 | }, 58 | expectError: true 59 | }, 60 | { 61 | name: 'Complex filter with decimal numbers', 62 | config: { 63 | language: 'javaScript', 64 | jsCode: `const items = [{ price: 99.99 }, { price: 150.50 }, { price: 200 }]; 65 | const expensive = $jmespath(items, '[?price >= 99.95]'); 66 | return [{json: {expensive}}];` 67 | }, 68 | expectError: true 69 | } 70 | ]; 71 | 72 | let passCount = 0; 73 | let failCount = 0; 74 | 75 | for (const test of testCases) { 76 | console.log(`Test: ${test.name}`); 77 | const result = EnhancedConfigValidator.validateWithMode( 78 | 'nodes-base.code', 79 | test.config, 80 | [ 81 | { name: 'language', type: 'options', options: ['javaScript', 'python'] }, 82 | { name: 'jsCode', type: 'string' }, 83 | { name: 'pythonCode', type: 'string' } 84 | ], 85 | 'operation', 86 | 'strict' 87 | ); 88 | 89 | const hasJMESPathError = result.errors.some(e => 90 | e.message.includes('JMESPath numeric literal') || 91 | e.message.includes('must be wrapped in backticks') 92 | ); 93 | 94 | const passed = hasJMESPathError === test.expectError; 95 | 96 | console.log(` Expected error: ${test.expectError}`); 97 | console.log(` Has JMESPath error: ${hasJMESPathError}`); 98 | console.log(` Result: ${passed ? '✅ PASS' : '❌ FAIL'}`); 99 | 100 | if (result.errors.length > 0) { 101 | console.log(` Errors: ${result.errors.map(e => e.message).join(', ')}`); 102 | } 103 | if (result.warnings.length > 0) { 104 | console.log(` Warnings: ${result.warnings.slice(0, 2).map(w => w.message).join(', ')}`); 105 | } 106 | 107 | if (passed) passCount++; 108 | else failCount++; 109 | 110 | console.log(); 111 | } 112 | 113 | console.log(`\n📊 Results: ${passCount} passed, ${failCount} failed`); 114 | console.log(failCount === 0 ? '✅ All JMESPath validation tests passed!' : '❌ Some tests failed'); ``` -------------------------------------------------------------------------------- /tests/mocks/n8n-api/data/executions.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Mock execution data for MSW handlers 3 | */ 4 | 5 | export interface MockExecution { 6 | id: string; 7 | workflowId: string; 8 | status: 'success' | 'error' | 'waiting' | 'running'; 9 | mode: 'manual' | 'trigger' | 'webhook' | 'internal'; 10 | startedAt: string; 11 | stoppedAt?: string; 12 | data?: any; 13 | error?: any; 14 | } 15 | 16 | export const mockExecutions: MockExecution[] = [ 17 | { 18 | id: 'exec_1', 19 | workflowId: 'workflow_1', 20 | status: 'success', 21 | mode: 'manual', 22 | startedAt: '2024-01-01T10:00:00.000Z', 23 | stoppedAt: '2024-01-01T10:00:05.000Z', 24 | data: { 25 | resultData: { 26 | runData: { 27 | 'node_2': [ 28 | { 29 | startTime: 1704106800000, 30 | executionTime: 234, 31 | data: { 32 | main: [[{ 33 | json: { 34 | status: 200, 35 | data: { message: 'Success' } 36 | } 37 | }]] 38 | } 39 | } 40 | ] 41 | } 42 | } 43 | } 44 | }, 45 | { 46 | id: 'exec_2', 47 | workflowId: 'workflow_2', 48 | status: 'error', 49 | mode: 'webhook', 50 | startedAt: '2024-01-01T11:00:00.000Z', 51 | stoppedAt: '2024-01-01T11:00:02.000Z', 52 | error: { 53 | message: 'Could not send message to Slack', 54 | stack: 'Error: Could not send message to Slack\n at SlackNode.execute', 55 | node: 'slack_1' 56 | }, 57 | data: { 58 | resultData: { 59 | runData: { 60 | 'webhook_1': [ 61 | { 62 | startTime: 1704110400000, 63 | executionTime: 10, 64 | data: { 65 | main: [[{ 66 | json: { 67 | headers: { 'content-type': 'application/json' }, 68 | body: { message: 'Test webhook' } 69 | } 70 | }]] 71 | } 72 | } 73 | ] 74 | } 75 | } 76 | } 77 | }, 78 | { 79 | id: 'exec_3', 80 | workflowId: 'workflow_3', 81 | status: 'waiting', 82 | mode: 'trigger', 83 | startedAt: '2024-01-01T12:00:00.000Z', 84 | data: { 85 | resultData: { 86 | runData: {} 87 | }, 88 | waitingExecutions: { 89 | 'agent_1': { 90 | reason: 'Waiting for user input' 91 | } 92 | } 93 | } 94 | } 95 | ]; 96 | 97 | /** 98 | * Factory functions for creating mock executions 99 | */ 100 | export const executionFactory = { 101 | /** 102 | * Create a successful execution 103 | */ 104 | success: (workflowId: string, data?: any): MockExecution => ({ 105 | id: `exec_${Date.now()}`, 106 | workflowId, 107 | status: 'success', 108 | mode: 'manual', 109 | startedAt: new Date().toISOString(), 110 | stoppedAt: new Date(Date.now() + 5000).toISOString(), 111 | data: data || { 112 | resultData: { 113 | runData: { 114 | 'node_1': [{ 115 | startTime: Date.now(), 116 | executionTime: 100, 117 | data: { 118 | main: [[{ json: { success: true } }]] 119 | } 120 | }] 121 | } 122 | } 123 | } 124 | }), 125 | 126 | /** 127 | * Create a failed execution 128 | */ 129 | error: (workflowId: string, error: { message: string; node?: string }): MockExecution => ({ 130 | id: `exec_${Date.now()}`, 131 | workflowId, 132 | status: 'error', 133 | mode: 'manual', 134 | startedAt: new Date().toISOString(), 135 | stoppedAt: new Date(Date.now() + 2000).toISOString(), 136 | error: { 137 | message: error.message, 138 | stack: `Error: ${error.message}\n at Node.execute`, 139 | node: error.node 140 | }, 141 | data: { 142 | resultData: { 143 | runData: {} 144 | } 145 | } 146 | }), 147 | 148 | /** 149 | * Create a custom execution 150 | */ 151 | custom: (config: Partial<MockExecution>): MockExecution => ({ 152 | id: `exec_${Date.now()}`, 153 | workflowId: 'workflow_1', 154 | status: 'success', 155 | mode: 'manual', 156 | startedAt: new Date().toISOString(), 157 | ...config 158 | }) 159 | }; ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/index.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from './types'; 2 | 3 | // Import all tool documentations 4 | import { searchNodesDoc, listNodesDoc, listAiToolsDoc, getDatabaseStatisticsDoc } from './discovery'; 5 | import { 6 | getNodeEssentialsDoc, 7 | getNodeInfoDoc, 8 | getNodeDocumentationDoc, 9 | searchNodePropertiesDoc, 10 | getNodeAsToolInfoDoc, 11 | getPropertyDependenciesDoc 12 | } from './configuration'; 13 | import { 14 | validateNodeMinimalDoc, 15 | validateNodeOperationDoc, 16 | validateWorkflowDoc, 17 | validateWorkflowConnectionsDoc, 18 | validateWorkflowExpressionsDoc 19 | } from './validation'; 20 | import { 21 | listTasksDoc, 22 | listNodeTemplatesDoc, 23 | getTemplateDoc, 24 | searchTemplatesDoc, 25 | searchTemplatesByMetadataDoc, 26 | getTemplatesForTaskDoc 27 | } from './templates'; 28 | import { 29 | toolsDocumentationDoc, 30 | n8nDiagnosticDoc, 31 | n8nHealthCheckDoc, 32 | n8nListAvailableToolsDoc 33 | } from './system'; 34 | import { 35 | aiAgentsGuide 36 | } from './guides'; 37 | import { 38 | n8nCreateWorkflowDoc, 39 | n8nGetWorkflowDoc, 40 | n8nGetWorkflowDetailsDoc, 41 | n8nGetWorkflowStructureDoc, 42 | n8nGetWorkflowMinimalDoc, 43 | n8nUpdateFullWorkflowDoc, 44 | n8nUpdatePartialWorkflowDoc, 45 | n8nDeleteWorkflowDoc, 46 | n8nListWorkflowsDoc, 47 | n8nValidateWorkflowDoc, 48 | n8nAutofixWorkflowDoc, 49 | n8nTriggerWebhookWorkflowDoc, 50 | n8nGetExecutionDoc, 51 | n8nListExecutionsDoc, 52 | n8nDeleteExecutionDoc 53 | } from './workflow_management'; 54 | 55 | // Combine all tool documentations into a single object 56 | export const toolsDocumentation: Record<string, ToolDocumentation> = { 57 | // System tools 58 | tools_documentation: toolsDocumentationDoc, 59 | n8n_diagnostic: n8nDiagnosticDoc, 60 | n8n_health_check: n8nHealthCheckDoc, 61 | n8n_list_available_tools: n8nListAvailableToolsDoc, 62 | 63 | // Guides 64 | ai_agents_guide: aiAgentsGuide, 65 | 66 | // Discovery tools 67 | search_nodes: searchNodesDoc, 68 | list_nodes: listNodesDoc, 69 | list_ai_tools: listAiToolsDoc, 70 | get_database_statistics: getDatabaseStatisticsDoc, 71 | 72 | // Configuration tools 73 | get_node_essentials: getNodeEssentialsDoc, 74 | get_node_info: getNodeInfoDoc, 75 | get_node_documentation: getNodeDocumentationDoc, 76 | search_node_properties: searchNodePropertiesDoc, 77 | get_node_as_tool_info: getNodeAsToolInfoDoc, 78 | get_property_dependencies: getPropertyDependenciesDoc, 79 | 80 | // Validation tools 81 | validate_node_minimal: validateNodeMinimalDoc, 82 | validate_node_operation: validateNodeOperationDoc, 83 | validate_workflow: validateWorkflowDoc, 84 | validate_workflow_connections: validateWorkflowConnectionsDoc, 85 | validate_workflow_expressions: validateWorkflowExpressionsDoc, 86 | 87 | // Template tools 88 | list_tasks: listTasksDoc, 89 | list_node_templates: listNodeTemplatesDoc, 90 | get_template: getTemplateDoc, 91 | search_templates: searchTemplatesDoc, 92 | search_templates_by_metadata: searchTemplatesByMetadataDoc, 93 | get_templates_for_task: getTemplatesForTaskDoc, 94 | 95 | // Workflow Management tools (n8n API) 96 | n8n_create_workflow: n8nCreateWorkflowDoc, 97 | n8n_get_workflow: n8nGetWorkflowDoc, 98 | n8n_get_workflow_details: n8nGetWorkflowDetailsDoc, 99 | n8n_get_workflow_structure: n8nGetWorkflowStructureDoc, 100 | n8n_get_workflow_minimal: n8nGetWorkflowMinimalDoc, 101 | n8n_update_full_workflow: n8nUpdateFullWorkflowDoc, 102 | n8n_update_partial_workflow: n8nUpdatePartialWorkflowDoc, 103 | n8n_delete_workflow: n8nDeleteWorkflowDoc, 104 | n8n_list_workflows: n8nListWorkflowsDoc, 105 | n8n_validate_workflow: n8nValidateWorkflowDoc, 106 | n8n_autofix_workflow: n8nAutofixWorkflowDoc, 107 | n8n_trigger_webhook_workflow: n8nTriggerWebhookWorkflowDoc, 108 | n8n_get_execution: n8nGetExecutionDoc, 109 | n8n_list_executions: n8nListExecutionsDoc, 110 | n8n_delete_execution: n8nDeleteExecutionDoc 111 | }; 112 | 113 | // Re-export types 114 | export type { ToolDocumentation } from './types'; ``` -------------------------------------------------------------------------------- /src/utils/mcp-client.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { Client } from '@modelcontextprotocol/sdk/client/index.js'; 2 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js'; 3 | import { WebSocketClientTransport } from '@modelcontextprotocol/sdk/client/websocket.js'; 4 | import { 5 | CallToolRequest, 6 | ListToolsRequest, 7 | ListResourcesRequest, 8 | ReadResourceRequest, 9 | ListPromptsRequest, 10 | GetPromptRequest, 11 | CallToolResultSchema, 12 | ListToolsResultSchema, 13 | ListResourcesResultSchema, 14 | ReadResourceResultSchema, 15 | ListPromptsResultSchema, 16 | GetPromptResultSchema, 17 | } from '@modelcontextprotocol/sdk/types.js'; 18 | 19 | export interface MCPClientConfig { 20 | serverUrl: string; 21 | authToken?: string; 22 | connectionType: 'http' | 'websocket' | 'stdio'; 23 | } 24 | 25 | export class MCPClient { 26 | private client: Client; 27 | private config: MCPClientConfig; 28 | private connected: boolean = false; 29 | 30 | constructor(config: MCPClientConfig) { 31 | this.config = config; 32 | this.client = new Client( 33 | { 34 | name: 'n8n-mcp-client', 35 | version: '1.0.0', 36 | }, 37 | { 38 | capabilities: {}, 39 | } 40 | ); 41 | } 42 | 43 | async connect(): Promise<void> { 44 | if (this.connected) { 45 | return; 46 | } 47 | 48 | let transport; 49 | 50 | switch (this.config.connectionType) { 51 | case 'websocket': 52 | const wsUrl = this.config.serverUrl.replace(/^http/, 'ws'); 53 | transport = new WebSocketClientTransport(new URL(wsUrl)); 54 | break; 55 | 56 | case 'stdio': 57 | // For stdio, the serverUrl should be the command to execute 58 | const [command, ...args] = this.config.serverUrl.split(' '); 59 | transport = new StdioClientTransport({ 60 | command, 61 | args, 62 | }); 63 | break; 64 | 65 | default: 66 | throw new Error(`HTTP transport is not yet supported for MCP clients`); 67 | } 68 | 69 | await this.client.connect(transport); 70 | this.connected = true; 71 | } 72 | 73 | async disconnect(): Promise<void> { 74 | if (this.connected) { 75 | await this.client.close(); 76 | this.connected = false; 77 | } 78 | } 79 | 80 | async listTools(): Promise<any> { 81 | await this.ensureConnected(); 82 | return await this.client.request( 83 | { method: 'tools/list' } as ListToolsRequest, 84 | ListToolsResultSchema 85 | ); 86 | } 87 | 88 | async callTool(name: string, args: any): Promise<any> { 89 | await this.ensureConnected(); 90 | return await this.client.request( 91 | { 92 | method: 'tools/call', 93 | params: { 94 | name, 95 | arguments: args, 96 | }, 97 | } as CallToolRequest, 98 | CallToolResultSchema 99 | ); 100 | } 101 | 102 | async listResources(): Promise<any> { 103 | await this.ensureConnected(); 104 | return await this.client.request( 105 | { method: 'resources/list' } as ListResourcesRequest, 106 | ListResourcesResultSchema 107 | ); 108 | } 109 | 110 | async readResource(uri: string): Promise<any> { 111 | await this.ensureConnected(); 112 | return await this.client.request( 113 | { 114 | method: 'resources/read', 115 | params: { 116 | uri, 117 | }, 118 | } as ReadResourceRequest, 119 | ReadResourceResultSchema 120 | ); 121 | } 122 | 123 | async listPrompts(): Promise<any> { 124 | await this.ensureConnected(); 125 | return await this.client.request( 126 | { method: 'prompts/list' } as ListPromptsRequest, 127 | ListPromptsResultSchema 128 | ); 129 | } 130 | 131 | async getPrompt(name: string, args?: any): Promise<any> { 132 | await this.ensureConnected(); 133 | return await this.client.request( 134 | { 135 | method: 'prompts/get', 136 | params: { 137 | name, 138 | arguments: args, 139 | }, 140 | } as GetPromptRequest, 141 | GetPromptResultSchema 142 | ); 143 | } 144 | 145 | private async ensureConnected(): Promise<void> { 146 | if (!this.connected) { 147 | await this.connect(); 148 | } 149 | } 150 | } ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/workflow_management/n8n-validate-workflow.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const n8nValidateWorkflowDoc: ToolDocumentation = { 4 | name: 'n8n_validate_workflow', 5 | category: 'workflow_management', 6 | essentials: { 7 | description: 'Validate workflow from n8n instance by ID - checks nodes, connections, expressions, and returns errors/warnings', 8 | keyParameters: ['id'], 9 | example: 'n8n_validate_workflow({id: "wf_abc123"})', 10 | performance: 'Network-dependent (100-500ms) - fetches and validates workflow', 11 | tips: [ 12 | 'Use options.profile to control validation strictness (minimal/runtime/ai-friendly/strict)', 13 | 'Validation includes node configs, connections, and n8n expression syntax', 14 | 'Returns categorized errors, warnings, and actionable fix suggestions' 15 | ] 16 | }, 17 | full: { 18 | description: `Validates a workflow stored in your n8n instance by fetching it via API and running comprehensive validation checks. This tool: 19 | 20 | - Fetches the workflow from n8n using the workflow ID 21 | - Validates all node configurations based on their schemas 22 | - Checks workflow connections and data flow 23 | - Validates n8n expression syntax in all fields 24 | - Returns categorized issues with fix suggestions 25 | 26 | The validation uses the same engine as validate_workflow but works with workflows already in n8n, making it perfect for validating existing workflows before execution. 27 | 28 | Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`, 29 | parameters: { 30 | id: { 31 | type: 'string', 32 | required: true, 33 | description: 'The workflow ID to validate from your n8n instance' 34 | }, 35 | options: { 36 | type: 'object', 37 | required: false, 38 | description: 'Validation options: {validateNodes: bool (default true), validateConnections: bool (default true), validateExpressions: bool (default true), profile: "minimal"|"runtime"|"ai-friendly"|"strict" (default "runtime")}' 39 | } 40 | }, 41 | returns: 'ValidationResult object containing isValid boolean, arrays of errors/warnings, and suggestions for fixes', 42 | examples: [ 43 | 'n8n_validate_workflow({id: "wf_abc123"}) - Validate with default settings', 44 | 'n8n_validate_workflow({id: "wf_abc123", options: {profile: "strict"}}) - Strict validation', 45 | 'n8n_validate_workflow({id: "wf_abc123", options: {validateExpressions: false}}) - Skip expression validation' 46 | ], 47 | useCases: [ 48 | 'Validating workflows before running them in production', 49 | 'Checking imported workflows for compatibility', 50 | 'Debugging workflow execution failures', 51 | 'Ensuring workflows follow best practices', 52 | 'Pre-deployment validation in CI/CD pipelines' 53 | ], 54 | performance: 'Depends on workflow size and API latency. Typically 100-500ms for medium workflows.', 55 | bestPractices: [ 56 | 'Run validation before activating workflows in production', 57 | 'Use "runtime" profile for pre-execution checks', 58 | 'Use "strict" profile for code review and best practices', 59 | 'Fix errors before warnings - errors will likely cause execution failures', 60 | 'Pay attention to expression validation - syntax errors are common' 61 | ], 62 | pitfalls: [ 63 | 'Requires valid API credentials - check n8n_health_check first', 64 | 'Large workflows may take longer to validate', 65 | 'Some warnings may be intentional (e.g., optional parameters)', 66 | 'Profile affects validation time - strict is slower but more thorough', 67 | 'Expression validation may flag working but non-standard syntax' 68 | ], 69 | relatedTools: ['validate_workflow', 'n8n_get_workflow', 'validate_workflow_expressions', 'n8n_health_check', 'n8n_autofix_workflow'] 70 | } 71 | }; ``` -------------------------------------------------------------------------------- /N8N_HTTP_STREAMABLE_SETUP.md: -------------------------------------------------------------------------------- ```markdown 1 | # n8n MCP HTTP Streamable Configuration Guide 2 | 3 | ## Overview 4 | 5 | This guide shows how to configure the n8n-nodes-mcp community node to connect to n8n-mcp using the **recommended HTTP Streamable transport**. 6 | 7 | ## Prerequisites 8 | 9 | 1. Install n8n-nodes-mcp community node: 10 | - Go to n8n Settings → Community Nodes 11 | - Install: `n8n-nodes-mcp` 12 | - Restart n8n if prompted 13 | 14 | 2. Ensure environment variable is set: 15 | ```bash 16 | N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true 17 | ``` 18 | 19 | ## Quick Start 20 | 21 | ### Step 1: Start Services 22 | 23 | ```bash 24 | # Stop any existing containers 25 | docker stop n8n n8n-mcp && docker rm n8n n8n-mcp 26 | 27 | # Start with HTTP Streamable configuration 28 | docker-compose -f docker-compose.n8n.yml up -d 29 | 30 | # Services will be available at: 31 | # - n8n: http://localhost:5678 32 | # - n8n-mcp: http://localhost:3000 33 | ``` 34 | 35 | ### Step 2: Create MCP Credentials in n8n 36 | 37 | 1. Open n8n at http://localhost:5678 38 | 2. Go to Credentials → Add credential 39 | 3. Search for "MCP" and select "MCP API" 40 | 4. Configure the fields as follows: 41 | - **Credential Name**: `n8n MCP Server` 42 | - **HTTP Stream URL**: ` 43 | - **Messages Post Endpoint**: (leave empty) 44 | - **Additional Headers**: 45 | ```json 46 | { 47 | "Authorization": "Bearer test-secure-token-123456789" 48 | } 49 | ``` 50 | 5. Save the credential 51 | 52 | ### Step 3: Configure MCP Client Node 53 | 54 | Add an MCP Client node to your workflow with these settings: 55 | 56 | - **Connection Type**: `HTTP Streamable` 57 | - **HTTP Streamable URL**: `http://n8n-mcp:3000/mcp` 58 | - **Authentication**: `Bearer Auth` 59 | - **Credentials**: Select the credential you created 60 | - **Operation**: Choose your operation (e.g., "List Tools", "Call Tool") 61 | 62 | ### Step 4: Test the Connection 63 | 64 | 1. Execute the workflow 65 | 2. The MCP Client should successfully connect and return results 66 | 67 | ## Available Operations 68 | 69 | ### List Tools 70 | Shows all available MCP tools: 71 | - `tools_documentation` 72 | - `list_nodes` 73 | - `get_node_info` 74 | - `search_nodes` 75 | - `get_node_essentials` 76 | - `validate_node_config` 77 | - And many more... 78 | 79 | ### Call Tool 80 | Execute specific tools with arguments: 81 | 82 | **Example: Get Node Info** 83 | - Tool Name: `get_node_info` 84 | - Arguments: `{ "nodeType": "n8n-nodes-base.httpRequest" }` 85 | 86 | **Example: Search Nodes** 87 | - Tool Name: `search_nodes` 88 | - Arguments: `{ "query": "webhook", "limit": 5 }` 89 | 90 | ## Import Example Workflow 91 | 92 | Import the pre-configured workflow: 93 | 1. Go to Workflows → Add workflow → Import from File 94 | 2. Select: `examples/n8n-mcp-streamable-workflow.json` 95 | 3. Update the credentials with your bearer token 96 | 97 | ## Troubleshooting 98 | 99 | ### Connection Refused 100 | - Verify services are running: `docker ps` 101 | - Check logs: `docker logs n8n-mcp` 102 | - Ensure you're using `http://n8n-mcp:3000/mcp` (container name) not `localhost` 103 | 104 | ### Authentication Failed 105 | - Verify bearer token matches exactly 106 | - Check CORS settings allow n8n origin 107 | 108 | ### Test Endpoint Manually 109 | ```bash 110 | # Test health check 111 | curl http://localhost:3000/health 112 | 113 | # Test MCP endpoint (should return error without proper JSON-RPC body) 114 | curl -X POST http://localhost:3000/mcp \ 115 | -H "Authorization: Bearer test-secure-token-123456789" \ 116 | -H "Content-Type: application/json" 117 | ``` 118 | 119 | ## Architecture Notes 120 | 121 | - **Transport**: HTTP Streamable (StreamableHTTPServerTransport) 122 | - **Protocol**: JSON-RPC 2.0 over HTTP POST 123 | - **Authentication**: Bearer token in Authorization header 124 | - **Endpoint**: Single `/mcp` endpoint handles all operations 125 | - **Stateless**: Each request creates a new MCP server instance 126 | 127 | ## Why HTTP Streamable? 128 | 129 | 1. **Recommended by MCP**: The official recommended transport method 130 | 2. **Better Performance**: More efficient than SSE 131 | 3. **Simpler Implementation**: Single POST endpoint 132 | 4. **Future Proof**: SSE is deprecated in MCP spec ``` -------------------------------------------------------------------------------- /src/scripts/sanitize-templates.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env node 2 | import { createDatabaseAdapter } from '../database/database-adapter'; 3 | import { logger } from '../utils/logger'; 4 | import { TemplateSanitizer } from '../utils/template-sanitizer'; 5 | import { gunzipSync, gzipSync } from 'zlib'; 6 | 7 | async function sanitizeTemplates() { 8 | console.log('🧹 Sanitizing workflow templates in database...\n'); 9 | 10 | const db = await createDatabaseAdapter('./data/nodes.db'); 11 | const sanitizer = new TemplateSanitizer(); 12 | 13 | try { 14 | // Get all templates - check both old and new format 15 | const templates = db.prepare('SELECT id, name, workflow_json, workflow_json_compressed FROM templates').all() as any[]; 16 | console.log(`Found ${templates.length} templates to check\n`); 17 | 18 | let sanitizedCount = 0; 19 | const problematicTemplates: any[] = []; 20 | 21 | for (const template of templates) { 22 | let originalWorkflow: any = null; 23 | let useCompressed = false; 24 | 25 | // Try compressed format first (newer format) 26 | if (template.workflow_json_compressed) { 27 | try { 28 | const buffer = Buffer.from(template.workflow_json_compressed, 'base64'); 29 | const decompressed = gunzipSync(buffer).toString('utf-8'); 30 | originalWorkflow = JSON.parse(decompressed); 31 | useCompressed = true; 32 | } catch (e) { 33 | console.log(`⚠️ Failed to decompress template ${template.id}, trying uncompressed`); 34 | } 35 | } 36 | 37 | // Fall back to uncompressed format (deprecated) 38 | if (!originalWorkflow && template.workflow_json) { 39 | try { 40 | originalWorkflow = JSON.parse(template.workflow_json); 41 | } catch (e) { 42 | console.log(`⚠️ Skipping template ${template.id}: Invalid JSON in both formats`); 43 | continue; 44 | } 45 | } 46 | 47 | if (!originalWorkflow) { 48 | continue; // Skip templates without workflow data 49 | } 50 | 51 | const { sanitized: sanitizedWorkflow, wasModified } = sanitizer.sanitizeWorkflow(originalWorkflow); 52 | 53 | if (wasModified) { 54 | // Get detected tokens for reporting 55 | const detectedTokens = sanitizer.detectTokens(originalWorkflow); 56 | 57 | // Update the template with sanitized version in the same format 58 | if (useCompressed) { 59 | const compressed = gzipSync(JSON.stringify(sanitizedWorkflow)).toString('base64'); 60 | const stmt = db.prepare('UPDATE templates SET workflow_json_compressed = ? WHERE id = ?'); 61 | stmt.run(compressed, template.id); 62 | } else { 63 | const stmt = db.prepare('UPDATE templates SET workflow_json = ? WHERE id = ?'); 64 | stmt.run(JSON.stringify(sanitizedWorkflow), template.id); 65 | } 66 | 67 | sanitizedCount++; 68 | problematicTemplates.push({ 69 | id: template.id, 70 | name: template.name, 71 | tokens: detectedTokens 72 | }); 73 | 74 | console.log(`✅ Sanitized template ${template.id}: ${template.name}`); 75 | detectedTokens.forEach(token => { 76 | console.log(` - Found: ${token.substring(0, 20)}...`); 77 | }); 78 | } 79 | } 80 | 81 | console.log(`\n📊 Summary:`); 82 | console.log(` Total templates: ${templates.length}`); 83 | console.log(` Sanitized: ${sanitizedCount}`); 84 | 85 | if (problematicTemplates.length > 0) { 86 | console.log(`\n⚠️ Templates that contained API tokens:`); 87 | problematicTemplates.forEach(t => { 88 | console.log(` - ${t.id}: ${t.name}`); 89 | }); 90 | } 91 | 92 | console.log('\n✨ Sanitization complete!'); 93 | } catch (error) { 94 | console.error('❌ Error sanitizing templates:', error); 95 | process.exit(1); 96 | } finally { 97 | db.close(); 98 | } 99 | } 100 | 101 | // Run if called directly 102 | if (require.main === module) { 103 | sanitizeTemplates().catch(console.error); 104 | } ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/configuration/get-property-dependencies.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const getPropertyDependenciesDoc: ToolDocumentation = { 4 | name: 'get_property_dependencies', 5 | category: 'configuration', 6 | essentials: { 7 | description: 'Shows property dependencies and visibility rules - which fields appear when.', 8 | keyParameters: ['nodeType', 'config?'], 9 | example: 'get_property_dependencies({nodeType: "nodes-base.httpRequest"})', 10 | performance: 'Fast - analyzes property conditions', 11 | tips: [ 12 | 'Shows which properties depend on other property values', 13 | 'Test visibility impact with optional config parameter', 14 | 'Helps understand complex conditional property displays' 15 | ] 16 | }, 17 | full: { 18 | description: `Analyzes property dependencies and visibility conditions for a node. Shows which properties control the visibility of other properties (e.g., sendBody=true reveals body-related fields). Optionally test how a specific configuration affects property visibility.`, 19 | parameters: { 20 | nodeType: { 21 | type: 'string', 22 | required: true, 23 | description: 'The node type to analyze (e.g., "nodes-base.httpRequest")', 24 | examples: [ 25 | 'nodes-base.httpRequest', 26 | 'nodes-base.slack', 27 | 'nodes-base.if', 28 | 'nodes-base.switch' 29 | ] 30 | }, 31 | config: { 32 | type: 'object', 33 | required: false, 34 | description: 'Optional partial configuration to check visibility impact', 35 | examples: [ 36 | '{ method: "POST", sendBody: true }', 37 | '{ operation: "create", resource: "contact" }', 38 | '{ mode: "rules" }' 39 | ] 40 | } 41 | }, 42 | returns: `Object containing: 43 | - nodeType: The analyzed node type 44 | - displayName: Human-readable node name 45 | - controllingProperties: Properties that control visibility of others 46 | - dependentProperties: Properties whose visibility depends on others 47 | - complexDependencies: Multi-condition dependencies 48 | - currentConfig: If config provided, shows: 49 | - providedValues: The configuration you passed 50 | - visibilityImpact: Which properties are visible/hidden`, 51 | examples: [ 52 | 'get_property_dependencies({nodeType: "nodes-base.httpRequest"}) - Analyze HTTP Request dependencies', 53 | 'get_property_dependencies({nodeType: "nodes-base.httpRequest", config: {sendBody: true}}) - Test visibility with sendBody enabled', 54 | 'get_property_dependencies({nodeType: "nodes-base.if", config: {mode: "rules"}}) - Check If node in rules mode' 55 | ], 56 | useCases: [ 57 | 'Understanding which properties control others', 58 | 'Debugging why certain fields are not visible', 59 | 'Building dynamic UIs that match n8n behavior', 60 | 'Testing configurations before applying them', 61 | 'Understanding complex node property relationships' 62 | ], 63 | performance: 'Fast - analyzes property metadata without database queries', 64 | bestPractices: [ 65 | 'Use before configuring complex nodes with many conditional fields', 66 | 'Test different config values to understand visibility rules', 67 | 'Check dependencies when properties seem to be missing', 68 | 'Use for nodes with multiple operation modes (Slack, Google Sheets)', 69 | 'Combine with search_node_properties to find specific fields' 70 | ], 71 | pitfalls: [ 72 | 'Some properties have complex multi-condition dependencies', 73 | 'Visibility rules can be nested (property A controls B which controls C)', 74 | 'Not all hidden properties are due to dependencies (some are deprecated)', 75 | 'Config parameter only tests visibility, does not validate values' 76 | ], 77 | relatedTools: ['search_node_properties', 'get_node_essentials', 'validate_node_operation'] 78 | } 79 | }; ``` -------------------------------------------------------------------------------- /.claude/agents/code-reviewer.md: -------------------------------------------------------------------------------- ```markdown 1 | --- 2 | name: code-reviewer 3 | description: Use this agent when you need to review recently written or modified code for quality, security, and maintainability. This agent should be invoked immediately after code changes are made to ensure high standards are maintained. Examples:\n\n<example>\nContext: The user has created a code-review agent that should be called after writing code.\nuser: "Please write a function that validates email addresses"\nassistant: "I'll create an email validation function for you:"\n<function implementation>\nassistant: "Now let me use the code-reviewer agent to review this code for quality and security"\n<commentary>\nSince new code was just written, the code-reviewer agent should be invoked to ensure it meets quality standards.\n</commentary>\n</example>\n\n<example>\nContext: The user has modified existing code and wants it reviewed.\nuser: "I've updated the authentication logic, can you check if it's secure?"\nassistant: "I'll use the code-reviewer agent to analyze your authentication changes for security and best practices"\n<commentary>\nThe user has made changes to security-critical code, so the code-reviewer agent is the appropriate tool to ensure the modifications are secure and well-implemented.\n</commentary>\n</example> 4 | --- 5 | 6 | You are a senior code reviewer with extensive experience in software engineering, security, and best practices. Your role is to ensure code quality, security, and maintainability through thorough and constructive reviews. 7 | 8 | When invoked, you will: 9 | 10 | 1. **Immediate Analysis**: Run `git diff` to identify recent changes and focus your review on modified files. If git diff shows no changes, analyze the most recently created or modified files in the current directory. 11 | 12 | 2. **Comprehensive Review**: Evaluate code against these critical criteria: 13 | - **Readability**: Code is simple, clear, and self-documenting 14 | - **Naming**: Functions, variables, and classes have descriptive, meaningful names 15 | - **DRY Principle**: No duplicated code; common logic is properly abstracted 16 | - **Error Handling**: All edge cases handled; errors are caught and logged appropriately 17 | - **Security**: No hardcoded secrets, API keys, or sensitive data; proper authentication/authorization 18 | - **Input Validation**: All user inputs are validated and sanitized 19 | - **Testing**: Adequate test coverage for critical paths and edge cases 20 | - **Performance**: No obvious bottlenecks; efficient algorithms and data structures used 21 | 22 | 3. **Structured Feedback**: Organize your review into three priority levels: 23 | - **🚨 Critical Issues (Must Fix)**: Security vulnerabilities, bugs that will cause failures, or severe performance problems 24 | - **⚠️ Warnings (Should Fix)**: Code smells, missing error handling, or practices that could lead to future issues 25 | - **💡 Suggestions (Consider Improving)**: Opportunities for better readability, performance optimizations, or architectural improvements 26 | 27 | 4. **Actionable Recommendations**: For each issue identified: 28 | - Explain why it's a problem 29 | - Provide a specific code example showing how to fix it 30 | - Reference relevant best practices or documentation when applicable 31 | 32 | 5. **Positive Reinforcement**: Acknowledge well-written code sections and good practices observed 33 | 34 | Your review style should be: 35 | - Constructive and educational, not critical or harsh 36 | - Specific with line numbers and code snippets 37 | - Focused on the most impactful improvements 38 | - Considerate of the project's context and constraints 39 | 40 | Begin each review with a brief summary of what was reviewed and your overall assessment, then dive into the detailed findings organized by priority. 41 | ``` -------------------------------------------------------------------------------- /scripts/deploy-to-vm.sh: -------------------------------------------------------------------------------- ```bash 1 | #!/bin/bash 2 | 3 | # Deployment script for n8n Documentation MCP Server 4 | # Target: n8ndocumentation.aiservices.pl 5 | 6 | set -e 7 | 8 | echo "🚀 n8n Documentation MCP Server - VM Deployment" 9 | echo "==============================================" 10 | 11 | # Configuration 12 | SERVER_USER=${SERVER_USER:-root} 13 | SERVER_HOST=${SERVER_HOST:-n8ndocumentation.aiservices.pl} 14 | APP_DIR="/opt/n8n-mcp" 15 | SERVICE_NAME="n8n-docs-mcp" 16 | 17 | # Colors 18 | GREEN='\033[0;32m' 19 | YELLOW='\033[1;33m' 20 | RED='\033[0;31m' 21 | NC='\033[0m' # No Color 22 | 23 | # Check if .env exists 24 | if [ ! -f .env ]; then 25 | echo -e "${RED}❌ .env file not found. Please create it from .env.example${NC}" 26 | exit 1 27 | fi 28 | 29 | # Check required environment variables 30 | source .env 31 | if [ "$MCP_DOMAIN" != "n8ndocumentation.aiservices.pl" ]; then 32 | echo -e "${YELLOW}⚠️ Warning: MCP_DOMAIN is not set to n8ndocumentation.aiservices.pl${NC}" 33 | read -p "Continue anyway? (y/N) " -n 1 -r 34 | echo 35 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then 36 | exit 1 37 | fi 38 | fi 39 | 40 | if [ -z "$MCP_AUTH_TOKEN" ] || [ "$MCP_AUTH_TOKEN" == "your-secure-auth-token-here" ]; then 41 | echo -e "${RED}❌ MCP_AUTH_TOKEN not set or using default value${NC}" 42 | echo "Generate a secure token with: openssl rand -hex 32" 43 | exit 1 44 | fi 45 | 46 | echo -e "${GREEN}✅ Configuration validated${NC}" 47 | 48 | # Build the project locally 49 | echo -e "\n${YELLOW}Building project...${NC}" 50 | npm run build 51 | 52 | # Create deployment package 53 | echo -e "\n${YELLOW}Creating deployment package...${NC}" 54 | rm -rf deploy-package 55 | mkdir -p deploy-package 56 | 57 | # Copy necessary files 58 | cp -r dist deploy-package/ 59 | cp -r data deploy-package/ 60 | cp package*.json deploy-package/ 61 | cp .env deploy-package/ 62 | cp ecosystem.config.js deploy-package/ 2>/dev/null || true 63 | 64 | # Create tarball 65 | tar -czf deploy-package.tar.gz deploy-package 66 | 67 | echo -e "${GREEN}✅ Deployment package created${NC}" 68 | 69 | # Upload to server 70 | echo -e "\n${YELLOW}Uploading to server...${NC}" 71 | scp deploy-package.tar.gz $SERVER_USER@$SERVER_HOST:/tmp/ 72 | 73 | # Deploy on server 74 | echo -e "\n${YELLOW}Deploying on server...${NC}" 75 | ssh $SERVER_USER@$SERVER_HOST << 'ENDSSH' 76 | set -e 77 | 78 | # Create app directory 79 | mkdir -p /opt/n8n-mcp 80 | cd /opt/n8n-mcp 81 | 82 | # Stop existing service if running 83 | pm2 stop n8n-docs-mcp 2>/dev/null || true 84 | 85 | # Extract deployment package 86 | tar -xzf /tmp/deploy-package.tar.gz --strip-components=1 87 | rm /tmp/deploy-package.tar.gz 88 | 89 | # Install production dependencies 90 | npm ci --only=production 91 | 92 | # Create PM2 ecosystem file if not exists 93 | if [ ! -f ecosystem.config.js ]; then 94 | cat > ecosystem.config.js << 'EOF' 95 | module.exports = { 96 | apps: [{ 97 | name: 'n8n-docs-mcp', 98 | script: './dist/index-http.js', 99 | instances: 1, 100 | autorestart: true, 101 | watch: false, 102 | max_memory_restart: '1G', 103 | env: { 104 | NODE_ENV: 'production' 105 | }, 106 | error_file: './logs/error.log', 107 | out_file: './logs/out.log', 108 | log_file: './logs/combined.log', 109 | time: true 110 | }] 111 | }; 112 | EOF 113 | fi 114 | 115 | # Create logs directory 116 | mkdir -p logs 117 | 118 | # Start with PM2 119 | pm2 start ecosystem.config.js 120 | pm2 save 121 | 122 | echo "✅ Deployment complete!" 123 | echo "" 124 | echo "Service status:" 125 | pm2 status n8n-docs-mcp 126 | ENDSSH 127 | 128 | # Clean up local files 129 | rm -rf deploy-package deploy-package.tar.gz 130 | 131 | echo -e "\n${GREEN}🎉 Deployment successful!${NC}" 132 | echo -e "\nServer endpoints:" 133 | echo -e " Health: https://$SERVER_HOST/health" 134 | echo -e " Stats: https://$SERVER_HOST/stats" 135 | echo -e " MCP: https://$SERVER_HOST/mcp" 136 | echo -e "\nClaude Desktop configuration:" 137 | echo -e " { 138 | \"mcpServers\": { 139 | \"n8n-nodes-remote\": { 140 | \"command\": \"npx\", 141 | \"args\": [ 142 | \"-y\", 143 | \"@modelcontextprotocol/client-http\", 144 | \"https://$SERVER_HOST/mcp\" 145 | ], 146 | \"env\": { 147 | \"MCP_AUTH_TOKEN\": \"$MCP_AUTH_TOKEN\" 148 | } 149 | } 150 | } 151 | }" ``` -------------------------------------------------------------------------------- /scripts/migrate-nodes-fts.ts: -------------------------------------------------------------------------------- ```typescript 1 | #!/usr/bin/env node 2 | 3 | import * as path from 'path'; 4 | import { createDatabaseAdapter } from '../src/database/database-adapter'; 5 | import { logger } from '../src/utils/logger'; 6 | 7 | /** 8 | * Migrate existing database to add FTS5 support for nodes 9 | */ 10 | async function migrateNodesFTS() { 11 | logger.info('Starting nodes FTS5 migration...'); 12 | 13 | const dbPath = path.join(process.cwd(), 'data', 'nodes.db'); 14 | const db = await createDatabaseAdapter(dbPath); 15 | 16 | try { 17 | // Check if nodes_fts already exists 18 | const tableExists = db.prepare(` 19 | SELECT name FROM sqlite_master 20 | WHERE type='table' AND name='nodes_fts' 21 | `).get(); 22 | 23 | if (tableExists) { 24 | logger.info('nodes_fts table already exists, skipping migration'); 25 | return; 26 | } 27 | 28 | logger.info('Creating nodes_fts virtual table...'); 29 | 30 | // Create the FTS5 virtual table 31 | db.prepare(` 32 | CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5( 33 | node_type, 34 | display_name, 35 | description, 36 | documentation, 37 | operations, 38 | content=nodes, 39 | content_rowid=rowid, 40 | tokenize='porter' 41 | ) 42 | `).run(); 43 | 44 | // Populate the FTS table with existing data 45 | logger.info('Populating nodes_fts with existing data...'); 46 | 47 | const nodes = db.prepare('SELECT rowid, * FROM nodes').all() as any[]; 48 | logger.info(`Migrating ${nodes.length} nodes to FTS index...`); 49 | 50 | const insertStmt = db.prepare(` 51 | INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations) 52 | VALUES (?, ?, ?, ?, ?, ?) 53 | `); 54 | 55 | for (const node of nodes) { 56 | insertStmt.run( 57 | node.rowid, 58 | node.node_type, 59 | node.display_name, 60 | node.description || '', 61 | node.documentation || '', 62 | node.operations || '' 63 | ); 64 | } 65 | 66 | // Create triggers to keep FTS in sync 67 | logger.info('Creating synchronization triggers...'); 68 | 69 | db.prepare(` 70 | CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes 71 | BEGIN 72 | INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations) 73 | VALUES (new.rowid, new.node_type, new.display_name, new.description, new.documentation, new.operations); 74 | END 75 | `).run(); 76 | 77 | db.prepare(` 78 | CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes 79 | BEGIN 80 | UPDATE nodes_fts 81 | SET node_type = new.node_type, 82 | display_name = new.display_name, 83 | description = new.description, 84 | documentation = new.documentation, 85 | operations = new.operations 86 | WHERE rowid = new.rowid; 87 | END 88 | `).run(); 89 | 90 | db.prepare(` 91 | CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes 92 | BEGIN 93 | DELETE FROM nodes_fts WHERE rowid = old.rowid; 94 | END 95 | `).run(); 96 | 97 | // Test the FTS search 98 | logger.info('Testing FTS search...'); 99 | 100 | const testResults = db.prepare(` 101 | SELECT n.* FROM nodes n 102 | JOIN nodes_fts ON n.rowid = nodes_fts.rowid 103 | WHERE nodes_fts MATCH 'webhook' 104 | ORDER BY rank 105 | LIMIT 5 106 | `).all(); 107 | 108 | logger.info(`FTS test search found ${testResults.length} results for 'webhook'`); 109 | 110 | // Persist if using sql.js 111 | if ('persist' in db) { 112 | logger.info('Persisting database changes...'); 113 | (db as any).persist(); 114 | } 115 | 116 | logger.info('✅ FTS5 migration completed successfully!'); 117 | 118 | } catch (error) { 119 | logger.error('Migration failed:', error); 120 | throw error; 121 | } finally { 122 | db.close(); 123 | } 124 | } 125 | 126 | // Run migration 127 | migrateNodesFTS().catch(error => { 128 | logger.error('Migration error:', error); 129 | process.exit(1); 130 | }); ``` -------------------------------------------------------------------------------- /src/utils/example-generator.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Generates example workflows and parameters for n8n nodes 3 | */ 4 | export class ExampleGenerator { 5 | /** 6 | * Generate an example workflow from node definition 7 | */ 8 | static generateFromNodeDefinition(nodeDefinition: any): any { 9 | const nodeName = nodeDefinition.displayName || 'Example Node'; 10 | const nodeType = nodeDefinition.name || 'n8n-nodes-base.exampleNode'; 11 | 12 | return { 13 | name: `${nodeName} Example Workflow`, 14 | nodes: [ 15 | { 16 | parameters: this.generateExampleParameters(nodeDefinition), 17 | id: this.generateNodeId(), 18 | name: nodeName, 19 | type: nodeType, 20 | typeVersion: nodeDefinition.version || 1, 21 | position: [250, 300], 22 | }, 23 | ], 24 | connections: {}, 25 | active: false, 26 | settings: {}, 27 | tags: ['example', 'generated'], 28 | }; 29 | } 30 | 31 | /** 32 | * Generate example parameters based on node properties 33 | */ 34 | static generateExampleParameters(nodeDefinition: any): any { 35 | const params: any = {}; 36 | 37 | // If properties are available, generate examples based on them 38 | if (Array.isArray(nodeDefinition.properties)) { 39 | for (const prop of nodeDefinition.properties) { 40 | if (prop.name && prop.type) { 41 | params[prop.name] = this.generateExampleValue(prop); 42 | } 43 | } 44 | } 45 | 46 | // Add common parameters based on node type 47 | if (nodeDefinition.displayName?.toLowerCase().includes('trigger')) { 48 | params.pollTimes = { 49 | item: [ 50 | { 51 | mode: 'everyMinute', 52 | }, 53 | ], 54 | }; 55 | } 56 | 57 | return params; 58 | } 59 | 60 | /** 61 | * Generate example value based on property definition 62 | */ 63 | private static generateExampleValue(property: any): any { 64 | switch (property.type) { 65 | case 'string': 66 | if (property.name.toLowerCase().includes('url')) { 67 | return 'https://example.com'; 68 | } 69 | if (property.name.toLowerCase().includes('email')) { 70 | return '[email protected]'; 71 | } 72 | if (property.name.toLowerCase().includes('name')) { 73 | return 'Example Name'; 74 | } 75 | return property.default || 'example-value'; 76 | 77 | case 'number': 78 | return property.default || 10; 79 | 80 | case 'boolean': 81 | return property.default !== undefined ? property.default : true; 82 | 83 | case 'options': 84 | if (property.options && property.options.length > 0) { 85 | return property.options[0].value; 86 | } 87 | return property.default || ''; 88 | 89 | case 'collection': 90 | case 'fixedCollection': 91 | return {}; 92 | 93 | default: 94 | return property.default || null; 95 | } 96 | } 97 | 98 | /** 99 | * Generate a unique node ID 100 | */ 101 | private static generateNodeId(): string { 102 | return Math.random().toString(36).substring(2, 15) + 103 | Math.random().toString(36).substring(2, 15); 104 | } 105 | 106 | /** 107 | * Generate example based on node operations 108 | */ 109 | static generateFromOperations(operations: any[]): any { 110 | const examples: any[] = []; 111 | 112 | if (!operations || operations.length === 0) { 113 | return examples; 114 | } 115 | 116 | // Group operations by resource 117 | const resourceMap = new Map<string, any[]>(); 118 | for (const op of operations) { 119 | if (!resourceMap.has(op.resource)) { 120 | resourceMap.set(op.resource, []); 121 | } 122 | resourceMap.get(op.resource)!.push(op); 123 | } 124 | 125 | // Generate example for each resource 126 | for (const [resource, ops] of resourceMap) { 127 | examples.push({ 128 | resource, 129 | operation: ops[0].operation, 130 | description: `Example: ${ops[0].description}`, 131 | parameters: { 132 | resource, 133 | operation: ops[0].operation, 134 | }, 135 | }); 136 | } 137 | 138 | return examples; 139 | } 140 | } ``` -------------------------------------------------------------------------------- /verify-telemetry-fix.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Verification script to test that telemetry permissions are fixed 5 | * Run this AFTER applying the GRANT permissions fix 6 | */ 7 | 8 | const { createClient } = require('@supabase/supabase-js'); 9 | const crypto = require('crypto'); 10 | 11 | const TELEMETRY_BACKEND = { 12 | URL: 'https://ydyufsohxdfpopqbubwk.supabase.co', 13 | ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTg3OTYyMDAsImV4cCI6MjA3NDM3MjIwMH0.xESphg6h5ozaDsm4Vla3QnDJGc6Nc_cpfoqTHRynkCk' 14 | }; 15 | 16 | async function verifyTelemetryFix() { 17 | console.log('🔍 VERIFYING TELEMETRY PERMISSIONS FIX'); 18 | console.log('====================================\n'); 19 | 20 | const supabase = createClient(TELEMETRY_BACKEND.URL, TELEMETRY_BACKEND.ANON_KEY, { 21 | auth: { 22 | persistSession: false, 23 | autoRefreshToken: false, 24 | } 25 | }); 26 | 27 | const testUserId = 'verify-' + crypto.randomBytes(4).toString('hex'); 28 | 29 | // Test 1: Event insert 30 | console.log('📝 Test 1: Event insert'); 31 | try { 32 | const { data, error } = await supabase 33 | .from('telemetry_events') 34 | .insert([{ 35 | user_id: testUserId, 36 | event: 'verification_test', 37 | properties: { fixed: true } 38 | }]); 39 | 40 | if (error) { 41 | console.error('❌ Event insert failed:', error.message); 42 | return false; 43 | } else { 44 | console.log('✅ Event insert successful'); 45 | } 46 | } catch (e) { 47 | console.error('❌ Event insert exception:', e.message); 48 | return false; 49 | } 50 | 51 | // Test 2: Workflow insert 52 | console.log('📝 Test 2: Workflow insert'); 53 | try { 54 | const { data, error } = await supabase 55 | .from('telemetry_workflows') 56 | .insert([{ 57 | user_id: testUserId, 58 | workflow_hash: 'verify-' + crypto.randomBytes(4).toString('hex'), 59 | node_count: 2, 60 | node_types: ['n8n-nodes-base.webhook', 'n8n-nodes-base.set'], 61 | has_trigger: true, 62 | has_webhook: true, 63 | complexity: 'simple', 64 | sanitized_workflow: { 65 | nodes: [{ 66 | id: 'test-node', 67 | type: 'n8n-nodes-base.webhook', 68 | position: [100, 100], 69 | parameters: {} 70 | }], 71 | connections: {} 72 | } 73 | }]); 74 | 75 | if (error) { 76 | console.error('❌ Workflow insert failed:', error.message); 77 | return false; 78 | } else { 79 | console.log('✅ Workflow insert successful'); 80 | } 81 | } catch (e) { 82 | console.error('❌ Workflow insert exception:', e.message); 83 | return false; 84 | } 85 | 86 | // Test 3: Upsert operation (like real telemetry) 87 | console.log('📝 Test 3: Upsert operation'); 88 | try { 89 | const workflowHash = 'upsert-verify-' + crypto.randomBytes(4).toString('hex'); 90 | 91 | const { data, error } = await supabase 92 | .from('telemetry_workflows') 93 | .upsert([{ 94 | user_id: testUserId, 95 | workflow_hash: workflowHash, 96 | node_count: 3, 97 | node_types: ['n8n-nodes-base.webhook', 'n8n-nodes-base.set', 'n8n-nodes-base.if'], 98 | has_trigger: true, 99 | has_webhook: true, 100 | complexity: 'medium', 101 | sanitized_workflow: { 102 | nodes: [], 103 | connections: {} 104 | } 105 | }], { 106 | onConflict: 'workflow_hash', 107 | ignoreDuplicates: true, 108 | }); 109 | 110 | if (error) { 111 | console.error('❌ Upsert failed:', error.message); 112 | return false; 113 | } else { 114 | console.log('✅ Upsert successful'); 115 | } 116 | } catch (e) { 117 | console.error('❌ Upsert exception:', e.message); 118 | return false; 119 | } 120 | 121 | console.log('\n🎉 All tests passed! Telemetry permissions are fixed.'); 122 | console.log('👍 Workflow telemetry should now work in the actual application.'); 123 | 124 | return true; 125 | } 126 | 127 | async function main() { 128 | const success = await verifyTelemetryFix(); 129 | process.exit(success ? 0 : 1); 130 | } 131 | 132 | main().catch(console.error); ``` -------------------------------------------------------------------------------- /src/mcp/tool-docs/templates/list-node-templates.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { ToolDocumentation } from '../types'; 2 | 3 | export const listNodeTemplatesDoc: ToolDocumentation = { 4 | name: 'list_node_templates', 5 | category: 'templates', 6 | essentials: { 7 | description: 'Find templates using specific nodes. 399 community workflows. Use FULL types: "n8n-nodes-base.httpRequest".', 8 | keyParameters: ['nodeTypes', 'limit'], 9 | example: 'list_node_templates({nodeTypes: ["n8n-nodes-base.slack"]})', 10 | performance: 'Fast (<100ms) - indexed node search', 11 | tips: [ 12 | 'Must use FULL node type with package prefix: "n8n-nodes-base.slack"', 13 | 'Can search for multiple nodes to find workflows using all of them', 14 | 'Returns templates sorted by popularity (view count)' 15 | ] 16 | }, 17 | full: { 18 | description: `Finds workflow templates that use specific n8n nodes. This is the best way to discover how particular nodes are used in real workflows. Search the community library of 399+ templates by specifying which nodes you want to see in action. Templates are sorted by popularity to show the most useful examples first.`, 19 | parameters: { 20 | nodeTypes: { 21 | type: 'array', 22 | required: true, 23 | description: 'Array of node types to search for. Must use full type names with package prefix (e.g., ["n8n-nodes-base.httpRequest", "n8n-nodes-base.openAi"])' 24 | }, 25 | limit: { 26 | type: 'number', 27 | required: false, 28 | description: 'Maximum number of templates to return. Default 10, max 100' 29 | } 30 | }, 31 | returns: `Returns an object containing: 32 | - templates: Array of matching templates 33 | - id: Template ID for retrieval 34 | - name: Template name 35 | - description: What the workflow does 36 | - author: Creator details (name, username, verified) 37 | - nodes: Complete list of nodes used 38 | - views: View count (popularity metric) 39 | - created: Creation date 40 | - url: Link to template on n8n.io 41 | - totalFound: Total number of matching templates 42 | - tip: Usage hints if no results`, 43 | examples: [ 44 | 'list_node_templates({nodeTypes: ["n8n-nodes-base.slack"]}) - Find all Slack workflows', 45 | 'list_node_templates({nodeTypes: ["n8n-nodes-base.httpRequest", "n8n-nodes-base.postgres"]}) - Find workflows using both HTTP and Postgres', 46 | 'list_node_templates({nodeTypes: ["@n8n/n8n-nodes-langchain.openAi"], limit: 20}) - Find AI workflows with OpenAI', 47 | 'list_node_templates({nodeTypes: ["n8n-nodes-base.webhook", "n8n-nodes-base.respondToWebhook"]}) - Find webhook examples' 48 | ], 49 | useCases: [ 50 | 'Learn how to use specific nodes through examples', 51 | 'Find workflows combining particular integrations', 52 | 'Discover patterns for node combinations', 53 | 'See real-world usage of complex nodes', 54 | 'Find templates for your exact tech stack' 55 | ], 56 | performance: `Optimized for node-based searches: 57 | - Indexed by node type for fast lookups 58 | - Query time: <50ms for single node 59 | - Multiple nodes: <100ms (uses AND logic) 60 | - Returns pre-sorted by popularity 61 | - No full-text search needed`, 62 | bestPractices: [ 63 | 'Always use full node type with package prefix', 64 | 'Search for core nodes that define the workflow purpose', 65 | 'Start with single node searches, then refine', 66 | 'Check node types with list_nodes if unsure of names', 67 | 'Review multiple templates to learn different approaches' 68 | ], 69 | pitfalls: [ 70 | 'Node types must match exactly - no partial matches', 71 | 'Package prefix required: "slack" won\'t work, use "n8n-nodes-base.slack"', 72 | 'Some nodes have version numbers: "n8n-nodes-base.httpRequestV3"', 73 | 'Templates may use old node versions not in current n8n', 74 | 'AND logic means all specified nodes must be present' 75 | ], 76 | relatedTools: ['get_template', 'search_templates', 'get_templates_for_task', 'list_nodes'] 77 | } 78 | }; ``` -------------------------------------------------------------------------------- /src/mcp-tools-engine.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * MCPEngine - A simplified interface for benchmarking MCP tool execution 3 | * This directly implements the MCP tool functionality without server dependencies 4 | */ 5 | import { NodeRepository } from './database/node-repository'; 6 | import { PropertyFilter } from './services/property-filter'; 7 | import { TaskTemplates } from './services/task-templates'; 8 | import { ConfigValidator } from './services/config-validator'; 9 | import { EnhancedConfigValidator } from './services/enhanced-config-validator'; 10 | import { WorkflowValidator, WorkflowValidationResult } from './services/workflow-validator'; 11 | 12 | export class MCPEngine { 13 | private workflowValidator: WorkflowValidator; 14 | 15 | constructor(private repository: NodeRepository) { 16 | this.workflowValidator = new WorkflowValidator(repository, EnhancedConfigValidator); 17 | } 18 | 19 | async listNodes(args: any = {}) { 20 | return this.repository.getAllNodes(args.limit); 21 | } 22 | 23 | async searchNodes(args: any) { 24 | return this.repository.searchNodes(args.query, args.mode || 'OR', args.limit || 20); 25 | } 26 | 27 | async getNodeInfo(args: any) { 28 | return this.repository.getNodeByType(args.nodeType); 29 | } 30 | 31 | async getNodeEssentials(args: any) { 32 | const node = await this.repository.getNodeByType(args.nodeType); 33 | if (!node) return null; 34 | 35 | // Filter to essentials using static method 36 | const essentials = PropertyFilter.getEssentials(node.properties || [], args.nodeType); 37 | return { 38 | nodeType: node.nodeType, 39 | displayName: node.displayName, 40 | description: node.description, 41 | category: node.category, 42 | required: essentials.required, 43 | common: essentials.common 44 | }; 45 | } 46 | 47 | async getNodeDocumentation(args: any) { 48 | const node = await this.repository.getNodeByType(args.nodeType); 49 | return node?.documentation || null; 50 | } 51 | 52 | async validateNodeOperation(args: any) { 53 | // Get node properties and validate 54 | const node = await this.repository.getNodeByType(args.nodeType); 55 | if (!node) { 56 | return { 57 | valid: false, 58 | errors: [{ type: 'invalid_configuration', property: '', message: 'Node type not found' }], 59 | warnings: [], 60 | suggestions: [], 61 | visibleProperties: [], 62 | hiddenProperties: [] 63 | }; 64 | } 65 | 66 | // CRITICAL FIX: Extract user-provided keys before validation 67 | // This prevents false warnings about default values 68 | const userProvidedKeys = new Set(Object.keys(args.config || {})); 69 | 70 | return ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys); 71 | } 72 | 73 | async validateNodeMinimal(args: any) { 74 | // Get node and check minimal requirements 75 | const node = await this.repository.getNodeByType(args.nodeType); 76 | if (!node) { 77 | return { missingFields: [], error: 'Node type not found' }; 78 | } 79 | 80 | const missingFields: string[] = []; 81 | const requiredFields = PropertyFilter.getEssentials(node.properties || [], args.nodeType).required; 82 | 83 | for (const field of requiredFields) { 84 | if (!args.config[field.name]) { 85 | missingFields.push(field.name); 86 | } 87 | } 88 | 89 | return { missingFields }; 90 | } 91 | 92 | async searchNodeProperties(args: any) { 93 | return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20); 94 | } 95 | 96 | async listAITools(args: any) { 97 | return this.repository.getAIToolNodes(); 98 | } 99 | 100 | async getDatabaseStatistics(args: any) { 101 | const count = await this.repository.getNodeCount(); 102 | const aiTools = await this.repository.getAIToolNodes(); 103 | return { 104 | totalNodes: count, 105 | aiToolsCount: aiTools.length, 106 | categories: ['trigger', 'transform', 'output', 'input'] 107 | }; 108 | } 109 | 110 | async validateWorkflow(args: any): Promise<WorkflowValidationResult> { 111 | return this.workflowValidator.validateWorkflow(args.workflow, args.options); 112 | } 113 | } ``` -------------------------------------------------------------------------------- /docs/tools-documentation-usage.md: -------------------------------------------------------------------------------- ```markdown 1 | # MCP Tools Documentation Usage Guide 2 | 3 | The `tools_documentation` tool provides comprehensive documentation for all MCP tools, making it easy for LLMs to understand how to use the tools effectively. 4 | 5 | ## Basic Usage 6 | 7 | ### 1. Get Documentation for Specific Tools 8 | 9 | ```json 10 | { 11 | "name": "tools_documentation", 12 | "arguments": { 13 | "tools": ["search_nodes", "get_node_essentials"] 14 | } 15 | } 16 | ``` 17 | 18 | Returns detailed documentation including parameters, examples, and best practices for the specified tools. 19 | 20 | ### 2. Search Tools by Keyword 21 | 22 | ```json 23 | { 24 | "name": "tools_documentation", 25 | "arguments": { 26 | "search": "validation" 27 | } 28 | } 29 | ``` 30 | 31 | Finds all tools related to validation, including their descriptions and use cases. 32 | 33 | ### 3. Browse Tools by Category 34 | 35 | ```json 36 | { 37 | "name": "tools_documentation", 38 | "arguments": { 39 | "category": "workflow_management" 40 | } 41 | } 42 | ``` 43 | 44 | Available categories: 45 | - **discovery**: Tools for finding and exploring nodes 46 | - **configuration**: Tools for configuring nodes 47 | - **validation**: Tools for validating configurations 48 | - **workflow_management**: Tools for creating and updating workflows 49 | - **execution**: Tools for running workflows 50 | - **templates**: Tools for working with workflow templates 51 | 52 | ### 4. Get All Categories 53 | 54 | ```json 55 | { 56 | "name": "tools_documentation", 57 | "arguments": {} 58 | } 59 | ``` 60 | 61 | Returns a list of all categories and the tools in each category. 62 | 63 | ### 5. Include Quick Reference Guide 64 | 65 | ```json 66 | { 67 | "name": "tools_documentation", 68 | "arguments": { 69 | "tools": ["n8n_create_workflow"], 70 | "includeQuickReference": true 71 | } 72 | } 73 | ``` 74 | 75 | Includes a quick reference guide with workflow building process, performance tips, and common patterns. 76 | 77 | ## Response Format 78 | 79 | The tool returns structured documentation with: 80 | 81 | - **Parameters**: Complete parameter descriptions with types, requirements, and defaults 82 | - **Return Format**: Example of what the tool returns 83 | - **Common Use Cases**: Real-world scenarios where the tool is useful 84 | - **Examples**: Working examples with input and expected output 85 | - **Performance Notes**: Speed and efficiency considerations 86 | - **Best Practices**: Recommended usage patterns 87 | - **Common Pitfalls**: Mistakes to avoid 88 | - **Related Tools**: Other tools that work well together 89 | 90 | ## Example: Learning About search_nodes 91 | 92 | Request: 93 | ```json 94 | { 95 | "name": "tools_documentation", 96 | "arguments": { 97 | "tools": ["search_nodes"] 98 | } 99 | } 100 | ``` 101 | 102 | Response includes: 103 | - How to search effectively (single words work best) 104 | - Performance characteristics (fast, cached) 105 | - Common searches (http, webhook, email, database, slack) 106 | - Pitfalls to avoid (multi-word searches use OR logic) 107 | - Related tools for next steps 108 | 109 | ## Tips for LLMs 110 | 111 | 1. **Start with categories**: Browse available tools by category to understand what's possible 112 | 2. **Search by task**: Use search to find tools for specific tasks like "validation" or "workflow" 113 | 3. **Learn tool combinations**: Check "Related Tools" to understand workflow patterns 114 | 4. **Check examples**: Every tool has working examples to copy and modify 115 | 5. **Avoid pitfalls**: Pay attention to "Common Pitfalls" to prevent errors 116 | 117 | ## Integration with Workflow Building 118 | 119 | The documentation helps build workflows efficiently: 120 | 121 | 1. **Discovery Phase**: Use `search_nodes` and `list_nodes` documentation 122 | 2. **Configuration Phase**: Learn from `get_node_essentials` examples 123 | 3. **Validation Phase**: Understand validation tool options and profiles 124 | 4. **Creation Phase**: Follow `n8n_create_workflow` best practices 125 | 5. **Update Phase**: Master `n8n_update_partial_workflow` operations 126 | 127 | ## Performance Optimization 128 | 129 | The documentation emphasizes performance: 130 | - Which tools are fast (essentials) vs slow (full info) 131 | - Optimal parameters (e.g., limit: 200+ for list_nodes) 132 | - Caching behavior 133 | - Token savings with partial updates 134 | 135 | This documentation system ensures LLMs can use the MCP tools effectively without trial and error. ``` -------------------------------------------------------------------------------- /tests/test-storage-system.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Test the node storage and search system 5 | */ 6 | 7 | const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor'); 8 | const { NodeStorageService } = require('../dist/services/node-storage-service'); 9 | 10 | async function testStorageSystem() { 11 | console.log('=== Node Storage System Test ===\n'); 12 | 13 | const extractor = new NodeSourceExtractor(); 14 | const storage = new NodeStorageService(); 15 | 16 | // 1. Extract and store some nodes 17 | console.log('1. Extracting and storing nodes...\n'); 18 | 19 | const testNodes = [ 20 | 'n8n-nodes-base.Function', 21 | 'n8n-nodes-base.Webhook', 22 | 'n8n-nodes-base.HttpRequest', 23 | '@n8n/n8n-nodes-langchain.Agent' 24 | ]; 25 | 26 | let stored = 0; 27 | for (const nodeType of testNodes) { 28 | try { 29 | console.log(` Extracting ${nodeType}...`); 30 | const nodeInfo = await extractor.extractNodeSource(nodeType); 31 | await storage.storeNode(nodeInfo); 32 | stored++; 33 | console.log(` ✅ Stored successfully`); 34 | } catch (error) { 35 | console.log(` ❌ Failed: ${error.message}`); 36 | } 37 | } 38 | 39 | console.log(`\n Total stored: ${stored}/${testNodes.length}\n`); 40 | 41 | // 2. Test search functionality 42 | console.log('2. Testing search functionality...\n'); 43 | 44 | const searchTests = [ 45 | { query: 'function', desc: 'Search for "function"' }, 46 | { query: 'webhook', desc: 'Search for "webhook"' }, 47 | { packageName: 'n8n-nodes-base', desc: 'Filter by package' }, 48 | { hasCredentials: false, desc: 'Nodes without credentials' } 49 | ]; 50 | 51 | for (const test of searchTests) { 52 | console.log(` ${test.desc}:`); 53 | const results = await storage.searchNodes(test); 54 | console.log(` Found ${results.length} nodes`); 55 | if (results.length > 0) { 56 | console.log(` First result: ${results[0].nodeType}`); 57 | } 58 | } 59 | 60 | // 3. Get statistics 61 | console.log('\n3. Storage statistics:\n'); 62 | 63 | const stats = await storage.getStatistics(); 64 | console.log(` Total nodes: ${stats.totalNodes}`); 65 | console.log(` Total packages: ${stats.totalPackages}`); 66 | console.log(` Total code size: ${(stats.totalCodeSize / 1024).toFixed(2)} KB`); 67 | console.log(` Average node size: ${(stats.averageNodeSize / 1024).toFixed(2)} KB`); 68 | console.log(` Nodes with credentials: ${stats.nodesWithCredentials}`); 69 | 70 | console.log('\n Package distribution:'); 71 | stats.packageDistribution.forEach(pkg => { 72 | console.log(` ${pkg.package}: ${pkg.count} nodes`); 73 | }); 74 | 75 | // 4. Test bulk extraction 76 | console.log('\n4. Testing bulk extraction (first 10 nodes)...\n'); 77 | 78 | const allNodes = await extractor.listAvailableNodes(); 79 | const nodesToExtract = allNodes.slice(0, 10); 80 | 81 | const nodeInfos = []; 82 | for (const node of nodesToExtract) { 83 | try { 84 | const nodeType = node.packageName ? `${node.packageName}.${node.name}` : node.name; 85 | const nodeInfo = await extractor.extractNodeSource(nodeType); 86 | nodeInfos.push(nodeInfo); 87 | } catch (error) { 88 | // Skip failed extractions 89 | } 90 | } 91 | 92 | if (nodeInfos.length > 0) { 93 | const bulkResult = await storage.bulkStoreNodes(nodeInfos); 94 | console.log(` Bulk stored: ${bulkResult.stored}`); 95 | console.log(` Failed: ${bulkResult.failed}`); 96 | } 97 | 98 | // 5. Export for database 99 | console.log('\n5. Exporting for database...\n'); 100 | 101 | const dbExport = await storage.exportForDatabase(); 102 | console.log(` Exported ${dbExport.nodes.length} nodes`); 103 | console.log(` Total packages: ${dbExport.metadata.totalPackages}`); 104 | console.log(` Export timestamp: ${dbExport.metadata.exportedAt}`); 105 | 106 | // Save export to file 107 | const fs = require('fs').promises; 108 | const exportFile = path.join(__dirname, 'node-storage-export.json'); 109 | await fs.writeFile(exportFile, JSON.stringify(dbExport, null, 2)); 110 | console.log(` Saved to: ${exportFile}`); 111 | 112 | console.log('\n✅ Storage system test completed!'); 113 | } 114 | 115 | const path = require('path'); 116 | testStorageSystem().catch(console.error); ```