This is page 4 of 67. Use http://codebase.md/czlonkowski/n8n-mcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── _config.yml
├── .claude
│ └── agents
│ ├── code-reviewer.md
│ ├── context-manager.md
│ ├── debugger.md
│ ├── deployment-engineer.md
│ ├── mcp-backend-engineer.md
│ ├── n8n-mcp-tester.md
│ ├── technical-researcher.md
│ └── test-automator.md
├── .dockerignore
├── .env.docker
├── .env.example
├── .env.n8n.example
├── .env.test
├── .env.test.example
├── .github
│ ├── ABOUT.md
│ ├── BENCHMARK_THRESHOLDS.md
│ ├── FUNDING.yml
│ ├── gh-pages.yml
│ ├── secret_scanning.yml
│ └── workflows
│ ├── benchmark-pr.yml
│ ├── benchmark.yml
│ ├── docker-build-fast.yml
│ ├── docker-build-n8n.yml
│ ├── docker-build.yml
│ ├── release.yml
│ ├── test.yml
│ └── update-n8n-deps.yml
├── .gitignore
├── .npmignore
├── ANALYSIS_QUICK_REFERENCE.md
├── ATTRIBUTION.md
├── CHANGELOG.md
├── CLAUDE.md
├── codecov.yml
├── coverage.json
├── data
│ ├── .gitkeep
│ ├── nodes.db
│ ├── nodes.db-shm
│ ├── nodes.db-wal
│ └── templates.db
├── deploy
│ └── quick-deploy-n8n.sh
├── docker
│ ├── docker-entrypoint.sh
│ ├── n8n-mcp
│ ├── parse-config.js
│ └── README.md
├── docker-compose.buildkit.yml
├── docker-compose.extract.yml
├── docker-compose.n8n.yml
├── docker-compose.override.yml.example
├── docker-compose.test-n8n.yml
├── docker-compose.yml
├── Dockerfile
├── Dockerfile.railway
├── Dockerfile.test
├── docs
│ ├── AUTOMATED_RELEASES.md
│ ├── BENCHMARKS.md
│ ├── CHANGELOG.md
│ ├── CI_TEST_INFRASTRUCTURE.md
│ ├── CLAUDE_CODE_SETUP.md
│ ├── CLAUDE_INTERVIEW.md
│ ├── CODECOV_SETUP.md
│ ├── CODEX_SETUP.md
│ ├── CURSOR_SETUP.md
│ ├── DEPENDENCY_UPDATES.md
│ ├── DOCKER_README.md
│ ├── DOCKER_TROUBLESHOOTING.md
│ ├── FINAL_AI_VALIDATION_SPEC.md
│ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md
│ ├── HTTP_DEPLOYMENT.md
│ ├── img
│ │ ├── cc_command.png
│ │ ├── cc_connected.png
│ │ ├── codex_connected.png
│ │ ├── cursor_tut.png
│ │ ├── Railway_api.png
│ │ ├── Railway_server_address.png
│ │ ├── skills.png
│ │ ├── vsc_ghcp_chat_agent_mode.png
│ │ ├── vsc_ghcp_chat_instruction_files.png
│ │ ├── vsc_ghcp_chat_thinking_tool.png
│ │ └── windsurf_tut.png
│ ├── INSTALLATION.md
│ ├── LIBRARY_USAGE.md
│ ├── local
│ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md
│ │ ├── DEEP_DIVE_ANALYSIS_README.md
│ │ ├── Deep_dive_p1_p2.md
│ │ ├── integration-testing-plan.md
│ │ ├── integration-tests-phase1-summary.md
│ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
│ │ ├── P0_IMPLEMENTATION_PLAN.md
│ │ └── TEMPLATE_MINING_ANALYSIS.md
│ ├── MCP_ESSENTIALS_README.md
│ ├── MCP_QUICK_START_GUIDE.md
│ ├── N8N_DEPLOYMENT.md
│ ├── RAILWAY_DEPLOYMENT.md
│ ├── README_CLAUDE_SETUP.md
│ ├── README.md
│ ├── SESSION_PERSISTENCE.md
│ ├── tools-documentation-usage.md
│ ├── TYPE_STRUCTURE_VALIDATION.md
│ ├── VS_CODE_PROJECT_SETUP.md
│ ├── WINDSURF_SETUP.md
│ └── workflow-diff-examples.md
├── examples
│ └── enhanced-documentation-demo.js
├── fetch_log.txt
├── LICENSE
├── MEMORY_N8N_UPDATE.md
├── MEMORY_TEMPLATE_UPDATE.md
├── monitor_fetch.sh
├── N8N_HTTP_STREAMABLE_SETUP.md
├── n8n-nodes.db
├── P0-R3-TEST-PLAN.md
├── package-lock.json
├── package.json
├── package.runtime.json
├── PRIVACY.md
├── railway.json
├── README_ANALYSIS.md
├── README.md
├── renovate.json
├── scripts
│ ├── analyze-optimization.sh
│ ├── audit-schema-coverage.ts
│ ├── backfill-mutation-hashes.ts
│ ├── build-optimized.sh
│ ├── compare-benchmarks.js
│ ├── demo-optimization.sh
│ ├── deploy-http.sh
│ ├── deploy-to-vm.sh
│ ├── export-webhook-workflows.ts
│ ├── extract-changelog.js
│ ├── extract-from-docker.js
│ ├── extract-nodes-docker.sh
│ ├── extract-nodes-simple.sh
│ ├── format-benchmark-results.js
│ ├── generate-benchmark-stub.js
│ ├── generate-detailed-reports.js
│ ├── generate-initial-release-notes.js
│ ├── generate-release-notes.js
│ ├── generate-test-summary.js
│ ├── http-bridge.js
│ ├── mcp-http-client.js
│ ├── migrate-nodes-fts.ts
│ ├── migrate-tool-docs.ts
│ ├── n8n-docs-mcp.service
│ ├── nginx-n8n-mcp.conf
│ ├── prebuild-fts5.ts
│ ├── prepare-release.js
│ ├── process-batch-metadata.ts
│ ├── publish-npm-quick.sh
│ ├── publish-npm.sh
│ ├── quick-test.ts
│ ├── run-benchmarks-ci.js
│ ├── sync-runtime-version.js
│ ├── test-ai-validation-debug.ts
│ ├── test-code-node-enhancements.ts
│ ├── test-code-node-fixes.ts
│ ├── test-docker-config.sh
│ ├── test-docker-fingerprint.ts
│ ├── test-docker-optimization.sh
│ ├── test-docker.sh
│ ├── test-empty-connection-validation.ts
│ ├── test-error-message-tracking.ts
│ ├── test-error-output-validation.ts
│ ├── test-error-validation.js
│ ├── test-essentials.ts
│ ├── test-expression-code-validation.ts
│ ├── test-expression-format-validation.js
│ ├── test-fts5-search.ts
│ ├── test-fuzzy-fix.ts
│ ├── test-fuzzy-simple.ts
│ ├── test-helpers-validation.ts
│ ├── test-http-search.ts
│ ├── test-http.sh
│ ├── test-jmespath-validation.ts
│ ├── test-multi-tenant-simple.ts
│ ├── test-multi-tenant.ts
│ ├── test-n8n-integration.sh
│ ├── test-node-info.js
│ ├── test-node-type-validation.ts
│ ├── test-nodes-base-prefix.ts
│ ├── test-operation-validation.ts
│ ├── test-optimized-docker.sh
│ ├── test-release-automation.js
│ ├── test-search-improvements.ts
│ ├── test-security.ts
│ ├── test-single-session.sh
│ ├── test-sqljs-triggers.ts
│ ├── test-structure-validation.ts
│ ├── test-telemetry-debug.ts
│ ├── test-telemetry-direct.ts
│ ├── test-telemetry-env.ts
│ ├── test-telemetry-integration.ts
│ ├── test-telemetry-no-select.ts
│ ├── test-telemetry-security.ts
│ ├── test-telemetry-simple.ts
│ ├── test-typeversion-validation.ts
│ ├── test-url-configuration.ts
│ ├── test-user-id-persistence.ts
│ ├── test-webhook-validation.ts
│ ├── test-workflow-insert.ts
│ ├── test-workflow-sanitizer.ts
│ ├── test-workflow-tracking-debug.ts
│ ├── test-workflow-versioning.ts
│ ├── update-and-publish-prep.sh
│ ├── update-n8n-deps.js
│ ├── update-readme-version.js
│ ├── vitest-benchmark-json-reporter.js
│ └── vitest-benchmark-reporter.ts
├── SECURITY.md
├── src
│ ├── config
│ │ └── n8n-api.ts
│ ├── constants
│ │ └── type-structures.ts
│ ├── data
│ │ └── canonical-ai-tool-examples.json
│ ├── database
│ │ ├── database-adapter.ts
│ │ ├── migrations
│ │ │ └── add-template-node-configs.sql
│ │ ├── node-repository.ts
│ │ ├── nodes.db
│ │ ├── schema-optimized.sql
│ │ └── schema.sql
│ ├── errors
│ │ └── validation-service-error.ts
│ ├── http-server-single-session.ts
│ ├── http-server.ts
│ ├── index.ts
│ ├── loaders
│ │ └── node-loader.ts
│ ├── mappers
│ │ └── docs-mapper.ts
│ ├── mcp
│ │ ├── handlers-n8n-manager.ts
│ │ ├── handlers-workflow-diff.ts
│ │ ├── index.ts
│ │ ├── server.ts
│ │ ├── stdio-wrapper.ts
│ │ ├── tool-docs
│ │ │ ├── configuration
│ │ │ │ ├── get-node.ts
│ │ │ │ └── index.ts
│ │ │ ├── discovery
│ │ │ │ ├── index.ts
│ │ │ │ └── search-nodes.ts
│ │ │ ├── guides
│ │ │ │ ├── ai-agents-guide.ts
│ │ │ │ └── index.ts
│ │ │ ├── index.ts
│ │ │ ├── system
│ │ │ │ ├── index.ts
│ │ │ │ ├── n8n-diagnostic.ts
│ │ │ │ ├── n8n-health-check.ts
│ │ │ │ ├── n8n-list-available-tools.ts
│ │ │ │ └── tools-documentation.ts
│ │ │ ├── templates
│ │ │ │ ├── get-template.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── search-templates.ts
│ │ │ ├── types.ts
│ │ │ ├── validation
│ │ │ │ ├── index.ts
│ │ │ │ ├── validate-node.ts
│ │ │ │ └── validate-workflow.ts
│ │ │ └── workflow_management
│ │ │ ├── index.ts
│ │ │ ├── n8n-autofix-workflow.ts
│ │ │ ├── n8n-create-workflow.ts
│ │ │ ├── n8n-delete-workflow.ts
│ │ │ ├── n8n-executions.ts
│ │ │ ├── n8n-get-workflow.ts
│ │ │ ├── n8n-list-workflows.ts
│ │ │ ├── n8n-trigger-webhook-workflow.ts
│ │ │ ├── n8n-update-full-workflow.ts
│ │ │ ├── n8n-update-partial-workflow.ts
│ │ │ ├── n8n-validate-workflow.ts
│ │ │ └── n8n-workflow-versions.ts
│ │ ├── tools-documentation.ts
│ │ ├── tools-n8n-friendly.ts
│ │ ├── tools-n8n-manager.ts
│ │ ├── tools.ts
│ │ └── workflow-examples.ts
│ ├── mcp-engine.ts
│ ├── mcp-tools-engine.ts
│ ├── n8n
│ │ ├── MCPApi.credentials.ts
│ │ └── MCPNode.node.ts
│ ├── parsers
│ │ ├── node-parser.ts
│ │ ├── property-extractor.ts
│ │ └── simple-parser.ts
│ ├── scripts
│ │ ├── debug-http-search.ts
│ │ ├── extract-from-docker.ts
│ │ ├── fetch-templates-robust.ts
│ │ ├── fetch-templates.ts
│ │ ├── rebuild-database.ts
│ │ ├── rebuild-optimized.ts
│ │ ├── rebuild.ts
│ │ ├── sanitize-templates.ts
│ │ ├── seed-canonical-ai-examples.ts
│ │ ├── test-autofix-documentation.ts
│ │ ├── test-autofix-workflow.ts
│ │ ├── test-execution-filtering.ts
│ │ ├── test-node-suggestions.ts
│ │ ├── test-protocol-negotiation.ts
│ │ ├── test-summary.ts
│ │ ├── test-telemetry-mutations-verbose.ts
│ │ ├── test-telemetry-mutations.ts
│ │ ├── test-webhook-autofix.ts
│ │ ├── validate.ts
│ │ └── validation-summary.ts
│ ├── services
│ │ ├── ai-node-validator.ts
│ │ ├── ai-tool-validators.ts
│ │ ├── breaking-change-detector.ts
│ │ ├── breaking-changes-registry.ts
│ │ ├── confidence-scorer.ts
│ │ ├── config-validator.ts
│ │ ├── enhanced-config-validator.ts
│ │ ├── example-generator.ts
│ │ ├── execution-processor.ts
│ │ ├── expression-format-validator.ts
│ │ ├── expression-validator.ts
│ │ ├── n8n-api-client.ts
│ │ ├── n8n-validation.ts
│ │ ├── node-documentation-service.ts
│ │ ├── node-migration-service.ts
│ │ ├── node-sanitizer.ts
│ │ ├── node-similarity-service.ts
│ │ ├── node-specific-validators.ts
│ │ ├── node-version-service.ts
│ │ ├── operation-similarity-service.ts
│ │ ├── post-update-validator.ts
│ │ ├── property-dependencies.ts
│ │ ├── property-filter.ts
│ │ ├── resource-similarity-service.ts
│ │ ├── sqlite-storage-service.ts
│ │ ├── task-templates.ts
│ │ ├── type-structure-service.ts
│ │ ├── universal-expression-validator.ts
│ │ ├── workflow-auto-fixer.ts
│ │ ├── workflow-diff-engine.ts
│ │ ├── workflow-validator.ts
│ │ └── workflow-versioning-service.ts
│ ├── telemetry
│ │ ├── batch-processor.ts
│ │ ├── config-manager.ts
│ │ ├── early-error-logger.ts
│ │ ├── error-sanitization-utils.ts
│ │ ├── error-sanitizer.ts
│ │ ├── event-tracker.ts
│ │ ├── event-validator.ts
│ │ ├── index.ts
│ │ ├── intent-classifier.ts
│ │ ├── intent-sanitizer.ts
│ │ ├── mutation-tracker.ts
│ │ ├── mutation-types.ts
│ │ ├── mutation-validator.ts
│ │ ├── performance-monitor.ts
│ │ ├── rate-limiter.ts
│ │ ├── startup-checkpoints.ts
│ │ ├── telemetry-error.ts
│ │ ├── telemetry-manager.ts
│ │ ├── telemetry-types.ts
│ │ └── workflow-sanitizer.ts
│ ├── templates
│ │ ├── batch-processor.ts
│ │ ├── metadata-generator.ts
│ │ ├── README.md
│ │ ├── template-fetcher.ts
│ │ ├── template-repository.ts
│ │ └── template-service.ts
│ ├── types
│ │ ├── index.ts
│ │ ├── instance-context.ts
│ │ ├── n8n-api.ts
│ │ ├── node-types.ts
│ │ ├── session-state.ts
│ │ ├── type-structures.ts
│ │ └── workflow-diff.ts
│ └── utils
│ ├── auth.ts
│ ├── bridge.ts
│ ├── cache-utils.ts
│ ├── console-manager.ts
│ ├── documentation-fetcher.ts
│ ├── enhanced-documentation-fetcher.ts
│ ├── error-handler.ts
│ ├── example-generator.ts
│ ├── expression-utils.ts
│ ├── fixed-collection-validator.ts
│ ├── logger.ts
│ ├── mcp-client.ts
│ ├── n8n-errors.ts
│ ├── node-classification.ts
│ ├── node-source-extractor.ts
│ ├── node-type-normalizer.ts
│ ├── node-type-utils.ts
│ ├── node-utils.ts
│ ├── npm-version-checker.ts
│ ├── protocol-version.ts
│ ├── simple-cache.ts
│ ├── ssrf-protection.ts
│ ├── template-node-resolver.ts
│ ├── template-sanitizer.ts
│ ├── url-detector.ts
│ ├── validation-schemas.ts
│ └── version.ts
├── test-output.txt
├── test-reinit-fix.sh
├── tests
│ ├── __snapshots__
│ │ └── .gitkeep
│ ├── auth.test.ts
│ ├── benchmarks
│ │ ├── database-queries.bench.ts
│ │ ├── index.ts
│ │ ├── mcp-tools.bench.ts
│ │ ├── mcp-tools.bench.ts.disabled
│ │ ├── mcp-tools.bench.ts.skip
│ │ ├── node-loading.bench.ts.disabled
│ │ ├── README.md
│ │ ├── search-operations.bench.ts.disabled
│ │ └── validation-performance.bench.ts.disabled
│ ├── bridge.test.ts
│ ├── comprehensive-extraction-test.js
│ ├── data
│ │ └── .gitkeep
│ ├── debug-slack-doc.js
│ ├── demo-enhanced-documentation.js
│ ├── docker-tests-README.md
│ ├── error-handler.test.ts
│ ├── examples
│ │ └── using-database-utils.test.ts
│ ├── extracted-nodes-db
│ │ ├── database-import.json
│ │ ├── extraction-report.json
│ │ ├── insert-nodes.sql
│ │ ├── n8n-nodes-base__Airtable.json
│ │ ├── n8n-nodes-base__Discord.json
│ │ ├── n8n-nodes-base__Function.json
│ │ ├── n8n-nodes-base__HttpRequest.json
│ │ ├── n8n-nodes-base__If.json
│ │ ├── n8n-nodes-base__Slack.json
│ │ ├── n8n-nodes-base__SplitInBatches.json
│ │ └── n8n-nodes-base__Webhook.json
│ ├── factories
│ │ ├── node-factory.ts
│ │ └── property-definition-factory.ts
│ ├── fixtures
│ │ ├── .gitkeep
│ │ ├── database
│ │ │ └── test-nodes.json
│ │ ├── factories
│ │ │ ├── node.factory.ts
│ │ │ └── parser-node.factory.ts
│ │ └── template-configs.ts
│ ├── helpers
│ │ └── env-helpers.ts
│ ├── http-server-auth.test.ts
│ ├── integration
│ │ ├── ai-validation
│ │ │ ├── ai-agent-validation.test.ts
│ │ │ ├── ai-tool-validation.test.ts
│ │ │ ├── chat-trigger-validation.test.ts
│ │ │ ├── e2e-validation.test.ts
│ │ │ ├── helpers.ts
│ │ │ ├── llm-chain-validation.test.ts
│ │ │ ├── README.md
│ │ │ └── TEST_REPORT.md
│ │ ├── ci
│ │ │ └── database-population.test.ts
│ │ ├── database
│ │ │ ├── connection-management.test.ts
│ │ │ ├── empty-database.test.ts
│ │ │ ├── fts5-search.test.ts
│ │ │ ├── node-fts5-search.test.ts
│ │ │ ├── node-repository.test.ts
│ │ │ ├── performance.test.ts
│ │ │ ├── sqljs-memory-leak.test.ts
│ │ │ ├── template-node-configs.test.ts
│ │ │ ├── template-repository.test.ts
│ │ │ ├── test-utils.ts
│ │ │ └── transactions.test.ts
│ │ ├── database-integration.test.ts
│ │ ├── docker
│ │ │ ├── docker-config.test.ts
│ │ │ ├── docker-entrypoint.test.ts
│ │ │ └── test-helpers.ts
│ │ ├── flexible-instance-config.test.ts
│ │ ├── mcp
│ │ │ └── template-examples-e2e.test.ts
│ │ ├── mcp-protocol
│ │ │ ├── basic-connection.test.ts
│ │ │ ├── error-handling.test.ts
│ │ │ ├── performance.test.ts
│ │ │ ├── protocol-compliance.test.ts
│ │ │ ├── README.md
│ │ │ ├── session-management.test.ts
│ │ │ ├── test-helpers.ts
│ │ │ ├── tool-invocation.test.ts
│ │ │ └── workflow-error-validation.test.ts
│ │ ├── msw-setup.test.ts
│ │ ├── n8n-api
│ │ │ ├── executions
│ │ │ │ ├── delete-execution.test.ts
│ │ │ │ ├── get-execution.test.ts
│ │ │ │ ├── list-executions.test.ts
│ │ │ │ └── trigger-webhook.test.ts
│ │ │ ├── scripts
│ │ │ │ └── cleanup-orphans.ts
│ │ │ ├── system
│ │ │ │ ├── diagnostic.test.ts
│ │ │ │ └── health-check.test.ts
│ │ │ ├── test-connection.ts
│ │ │ ├── types
│ │ │ │ └── mcp-responses.ts
│ │ │ ├── utils
│ │ │ │ ├── cleanup-helpers.ts
│ │ │ │ ├── credentials.ts
│ │ │ │ ├── factories.ts
│ │ │ │ ├── fixtures.ts
│ │ │ │ ├── mcp-context.ts
│ │ │ │ ├── n8n-client.ts
│ │ │ │ ├── node-repository.ts
│ │ │ │ ├── response-types.ts
│ │ │ │ ├── test-context.ts
│ │ │ │ └── webhook-workflows.ts
│ │ │ └── workflows
│ │ │ ├── autofix-workflow.test.ts
│ │ │ ├── create-workflow.test.ts
│ │ │ ├── delete-workflow.test.ts
│ │ │ ├── get-workflow-details.test.ts
│ │ │ ├── get-workflow-minimal.test.ts
│ │ │ ├── get-workflow-structure.test.ts
│ │ │ ├── get-workflow.test.ts
│ │ │ ├── list-workflows.test.ts
│ │ │ ├── smart-parameters.test.ts
│ │ │ ├── update-partial-workflow.test.ts
│ │ │ ├── update-workflow.test.ts
│ │ │ └── validate-workflow.test.ts
│ │ ├── security
│ │ │ ├── command-injection-prevention.test.ts
│ │ │ └── rate-limiting.test.ts
│ │ ├── setup
│ │ │ ├── integration-setup.ts
│ │ │ └── msw-test-server.ts
│ │ ├── telemetry
│ │ │ ├── docker-user-id-stability.test.ts
│ │ │ └── mcp-telemetry.test.ts
│ │ ├── templates
│ │ │ └── metadata-operations.test.ts
│ │ ├── validation
│ │ │ └── real-world-structure-validation.test.ts
│ │ ├── workflow-creation-node-type-format.test.ts
│ │ └── workflow-diff
│ │ ├── ai-node-connection-validation.test.ts
│ │ └── node-rename-integration.test.ts
│ ├── logger.test.ts
│ ├── MOCKING_STRATEGY.md
│ ├── mocks
│ │ ├── n8n-api
│ │ │ ├── data
│ │ │ │ ├── credentials.ts
│ │ │ │ ├── executions.ts
│ │ │ │ └── workflows.ts
│ │ │ ├── handlers.ts
│ │ │ └── index.ts
│ │ └── README.md
│ ├── node-storage-export.json
│ ├── setup
│ │ ├── global-setup.ts
│ │ ├── msw-setup.ts
│ │ ├── TEST_ENV_DOCUMENTATION.md
│ │ └── test-env.ts
│ ├── test-database-extraction.js
│ ├── test-direct-extraction.js
│ ├── test-enhanced-documentation.js
│ ├── test-enhanced-integration.js
│ ├── test-mcp-extraction.js
│ ├── test-mcp-server-extraction.js
│ ├── test-mcp-tools-integration.js
│ ├── test-node-documentation-service.js
│ ├── test-node-list.js
│ ├── test-package-info.js
│ ├── test-parsing-operations.js
│ ├── test-slack-node-complete.js
│ ├── test-small-rebuild.js
│ ├── test-sqlite-search.js
│ ├── test-storage-system.js
│ ├── unit
│ │ ├── __mocks__
│ │ │ ├── n8n-nodes-base.test.ts
│ │ │ ├── n8n-nodes-base.ts
│ │ │ └── README.md
│ │ ├── constants
│ │ │ └── type-structures.test.ts
│ │ ├── database
│ │ │ ├── __mocks__
│ │ │ │ └── better-sqlite3.ts
│ │ │ ├── database-adapter-unit.test.ts
│ │ │ ├── node-repository-core.test.ts
│ │ │ ├── node-repository-operations.test.ts
│ │ │ ├── node-repository-outputs.test.ts
│ │ │ ├── README.md
│ │ │ └── template-repository-core.test.ts
│ │ ├── docker
│ │ │ ├── config-security.test.ts
│ │ │ ├── edge-cases.test.ts
│ │ │ ├── parse-config.test.ts
│ │ │ └── serve-command.test.ts
│ │ ├── errors
│ │ │ └── validation-service-error.test.ts
│ │ ├── examples
│ │ │ └── using-n8n-nodes-base-mock.test.ts
│ │ ├── flexible-instance-security-advanced.test.ts
│ │ ├── flexible-instance-security.test.ts
│ │ ├── http-server
│ │ │ ├── multi-tenant-support.test.ts
│ │ │ └── session-persistence.test.ts
│ │ ├── http-server-n8n-mode.test.ts
│ │ ├── http-server-n8n-reinit.test.ts
│ │ ├── http-server-session-management.test.ts
│ │ ├── loaders
│ │ │ └── node-loader.test.ts
│ │ ├── mappers
│ │ │ └── docs-mapper.test.ts
│ │ ├── mcp
│ │ │ ├── disabled-tools-additional.test.ts
│ │ │ ├── disabled-tools.test.ts
│ │ │ ├── get-node-essentials-examples.test.ts
│ │ │ ├── get-node-unified.test.ts
│ │ │ ├── handlers-n8n-manager-simple.test.ts
│ │ │ ├── handlers-n8n-manager.test.ts
│ │ │ ├── handlers-workflow-diff.test.ts
│ │ │ ├── lru-cache-behavior.test.ts
│ │ │ ├── multi-tenant-tool-listing.test.ts.disabled
│ │ │ ├── parameter-validation.test.ts
│ │ │ ├── search-nodes-examples.test.ts
│ │ │ ├── tools-documentation.test.ts
│ │ │ └── tools.test.ts
│ │ ├── mcp-engine
│ │ │ └── session-persistence.test.ts
│ │ ├── monitoring
│ │ │ └── cache-metrics.test.ts
│ │ ├── MULTI_TENANT_TEST_COVERAGE.md
│ │ ├── multi-tenant-integration.test.ts
│ │ ├── parsers
│ │ │ ├── node-parser-outputs.test.ts
│ │ │ ├── node-parser.test.ts
│ │ │ ├── property-extractor.test.ts
│ │ │ └── simple-parser.test.ts
│ │ ├── scripts
│ │ │ └── fetch-templates-extraction.test.ts
│ │ ├── services
│ │ │ ├── ai-node-validator.test.ts
│ │ │ ├── ai-tool-validators.test.ts
│ │ │ ├── breaking-change-detector.test.ts
│ │ │ ├── confidence-scorer.test.ts
│ │ │ ├── config-validator-basic.test.ts
│ │ │ ├── config-validator-edge-cases.test.ts
│ │ │ ├── config-validator-node-specific.test.ts
│ │ │ ├── config-validator-security.test.ts
│ │ │ ├── debug-validator.test.ts
│ │ │ ├── enhanced-config-validator-integration.test.ts
│ │ │ ├── enhanced-config-validator-operations.test.ts
│ │ │ ├── enhanced-config-validator-type-structures.test.ts
│ │ │ ├── enhanced-config-validator.test.ts
│ │ │ ├── example-generator.test.ts
│ │ │ ├── execution-processor.test.ts
│ │ │ ├── expression-format-validator.test.ts
│ │ │ ├── expression-validator-edge-cases.test.ts
│ │ │ ├── expression-validator.test.ts
│ │ │ ├── fixed-collection-validation.test.ts
│ │ │ ├── loop-output-edge-cases.test.ts
│ │ │ ├── n8n-api-client.test.ts
│ │ │ ├── n8n-validation-sticky-notes.test.ts
│ │ │ ├── n8n-validation.test.ts
│ │ │ ├── node-migration-service.test.ts
│ │ │ ├── node-sanitizer.test.ts
│ │ │ ├── node-similarity-service.test.ts
│ │ │ ├── node-specific-validators.test.ts
│ │ │ ├── node-version-service.test.ts
│ │ │ ├── operation-similarity-service-comprehensive.test.ts
│ │ │ ├── operation-similarity-service.test.ts
│ │ │ ├── post-update-validator.test.ts
│ │ │ ├── property-dependencies.test.ts
│ │ │ ├── property-filter-edge-cases.test.ts
│ │ │ ├── property-filter.test.ts
│ │ │ ├── resource-similarity-service-comprehensive.test.ts
│ │ │ ├── resource-similarity-service.test.ts
│ │ │ ├── task-templates.test.ts
│ │ │ ├── template-service.test.ts
│ │ │ ├── type-structure-service.test.ts
│ │ │ ├── universal-expression-validator.test.ts
│ │ │ ├── validation-fixes.test.ts
│ │ │ ├── workflow-auto-fixer.test.ts
│ │ │ ├── workflow-diff-engine.test.ts
│ │ │ ├── workflow-diff-node-rename.test.ts
│ │ │ ├── workflow-fixed-collection-validation.test.ts
│ │ │ ├── workflow-validator-comprehensive.test.ts
│ │ │ ├── workflow-validator-edge-cases.test.ts
│ │ │ ├── workflow-validator-error-outputs.test.ts
│ │ │ ├── workflow-validator-expression-format.test.ts
│ │ │ ├── workflow-validator-loops-simple.test.ts
│ │ │ ├── workflow-validator-loops.test.ts
│ │ │ ├── workflow-validator-mocks.test.ts
│ │ │ ├── workflow-validator-performance.test.ts
│ │ │ ├── workflow-validator-with-mocks.test.ts
│ │ │ ├── workflow-validator.test.ts
│ │ │ └── workflow-versioning-service.test.ts
│ │ ├── telemetry
│ │ │ ├── batch-processor.test.ts
│ │ │ ├── config-manager.test.ts
│ │ │ ├── event-tracker.test.ts
│ │ │ ├── event-validator.test.ts
│ │ │ ├── mutation-tracker.test.ts
│ │ │ ├── mutation-validator.test.ts
│ │ │ ├── rate-limiter.test.ts
│ │ │ ├── telemetry-error.test.ts
│ │ │ ├── telemetry-manager.test.ts
│ │ │ ├── v2.18.3-fixes-verification.test.ts
│ │ │ └── workflow-sanitizer.test.ts
│ │ ├── templates
│ │ │ ├── batch-processor.test.ts
│ │ │ ├── metadata-generator.test.ts
│ │ │ ├── template-repository-metadata.test.ts
│ │ │ └── template-repository-security.test.ts
│ │ ├── test-env-example.test.ts
│ │ ├── test-infrastructure.test.ts
│ │ ├── types
│ │ │ ├── instance-context-coverage.test.ts
│ │ │ ├── instance-context-multi-tenant.test.ts
│ │ │ └── type-structures.test.ts
│ │ ├── utils
│ │ │ ├── auth-timing-safe.test.ts
│ │ │ ├── cache-utils.test.ts
│ │ │ ├── console-manager.test.ts
│ │ │ ├── database-utils.test.ts
│ │ │ ├── expression-utils.test.ts
│ │ │ ├── fixed-collection-validator.test.ts
│ │ │ ├── n8n-errors.test.ts
│ │ │ ├── node-classification.test.ts
│ │ │ ├── node-type-normalizer.test.ts
│ │ │ ├── node-type-utils.test.ts
│ │ │ ├── node-utils.test.ts
│ │ │ ├── simple-cache-memory-leak-fix.test.ts
│ │ │ ├── ssrf-protection.test.ts
│ │ │ └── template-node-resolver.test.ts
│ │ └── validation-fixes.test.ts
│ └── utils
│ ├── assertions.ts
│ ├── builders
│ │ └── workflow.builder.ts
│ ├── data-generators.ts
│ ├── database-utils.ts
│ ├── README.md
│ └── test-helpers.ts
├── thumbnail.png
├── tsconfig.build.json
├── tsconfig.json
├── types
│ ├── mcp.d.ts
│ └── test-env.d.ts
├── versioned-nodes.md
├── vitest.config.benchmark.ts
├── vitest.config.integration.ts
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/src/mcp/tool-docs/system/tools-documentation.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ToolDocumentation } from '../types';
2 |
3 | export const toolsDocumentationDoc: ToolDocumentation = {
4 | name: 'tools_documentation',
5 | category: 'system',
6 | essentials: {
7 | description: 'The meta-documentation tool. Returns documentation for any MCP tool, including itself. Call without parameters for a comprehensive overview of all available tools. This is your starting point for discovering n8n MCP capabilities.',
8 | keyParameters: ['topic', 'depth'],
9 | example: 'tools_documentation({topic: "search_nodes"})',
10 | performance: 'Instant (static content)',
11 | tips: [
12 | 'Call without parameters first to see all tools',
13 | 'Can document itself: tools_documentation({topic: "tools_documentation"})',
14 | 'Use depth:"full" for comprehensive details'
15 | ]
16 | },
17 | full: {
18 | description: 'The self-referential documentation system for all MCP tools. This tool can document any other tool, including itself. It\'s the primary discovery mechanism for understanding what tools are available and how to use them. Returns utilitarian documentation optimized for AI agent consumption.',
19 | parameters: {
20 | topic: { type: 'string', description: 'Tool name (e.g., "search_nodes"), special topic ("javascript_code_node_guide", "python_code_node_guide"), or "overview". Leave empty for quick reference.', required: false },
21 | depth: { type: 'string', description: 'Level of detail: "essentials" (default, concise) or "full" (comprehensive with examples)', required: false }
22 | },
23 | returns: 'Markdown-formatted documentation tailored for the requested tool and depth. For essentials: key info, parameters, example, tips. For full: complete details, all examples, use cases, best practices.',
24 | examples: [
25 | '// Get started - see all available tools',
26 | 'tools_documentation()',
27 | '',
28 | '// Learn about a specific tool',
29 | 'tools_documentation({topic: "search_nodes"})',
30 | '',
31 | '// Get comprehensive details',
32 | 'tools_documentation({topic: "validate_workflow", depth: "full"})',
33 | '',
34 | '// Self-referential example - document this tool',
35 | 'tools_documentation({topic: "tools_documentation", depth: "full"})',
36 | '',
37 | '// Code node guides',
38 | 'tools_documentation({topic: "javascript_code_node_guide"})',
39 | 'tools_documentation({topic: "python_code_node_guide"})'
40 | ],
41 | useCases: [
42 | 'Initial discovery of available MCP tools',
43 | 'Learning how to use specific tools',
44 | 'Finding required and optional parameters',
45 | 'Getting working examples to copy',
46 | 'Understanding tool performance characteristics',
47 | 'Discovering related tools for workflows'
48 | ],
49 | performance: 'Instant - all documentation is pre-loaded in memory',
50 | bestPractices: [
51 | 'Always start with tools_documentation() to see available tools',
52 | 'Use essentials for quick parameter reference during coding',
53 | 'Switch to full depth when debugging or learning new tools',
54 | 'Check Code node guides when working with Code nodes'
55 | ],
56 | pitfalls: [
57 | 'Tool names must match exactly - use the overview to find correct names',
58 | 'Not all internal functions are documented',
59 | 'Special topics (code guides) require exact names'
60 | ],
61 | relatedTools: ['n8n_health_check for verifying API connection', 'search_templates for workflow examples', 'search_nodes for finding nodes']
62 | }
63 | };
```
--------------------------------------------------------------------------------
/scripts/test-telemetry-debug.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env npx tsx
2 | /**
3 | * Debug script for telemetry integration
4 | * Tests direct Supabase connection
5 | */
6 |
7 | import { createClient } from '@supabase/supabase-js';
8 | import dotenv from 'dotenv';
9 |
10 | // Load environment variables
11 | dotenv.config();
12 |
13 | async function debugTelemetry() {
14 | console.log('🔍 Debugging Telemetry Integration\n');
15 |
16 | const supabaseUrl = process.env.SUPABASE_URL;
17 | const supabaseAnonKey = process.env.SUPABASE_ANON_KEY;
18 |
19 | if (!supabaseUrl || !supabaseAnonKey) {
20 | console.error('❌ Missing SUPABASE_URL or SUPABASE_ANON_KEY');
21 | process.exit(1);
22 | }
23 |
24 | console.log('Environment:');
25 | console.log(' URL:', supabaseUrl);
26 | console.log(' Key:', supabaseAnonKey.substring(0, 30) + '...');
27 |
28 | // Create Supabase client
29 | const supabase = createClient(supabaseUrl, supabaseAnonKey, {
30 | auth: {
31 | persistSession: false,
32 | autoRefreshToken: false,
33 | }
34 | });
35 |
36 | // Test 1: Direct insert to telemetry_events
37 | console.log('\n📝 Test 1: Direct insert to telemetry_events...');
38 | const testEvent = {
39 | user_id: 'test-user-123',
40 | event: 'test_event',
41 | properties: {
42 | test: true,
43 | timestamp: new Date().toISOString()
44 | }
45 | };
46 |
47 | const { data: eventData, error: eventError } = await supabase
48 | .from('telemetry_events')
49 | .insert([testEvent])
50 | .select();
51 |
52 | if (eventError) {
53 | console.error('❌ Event insert failed:', eventError);
54 | } else {
55 | console.log('✅ Event inserted successfully:', eventData);
56 | }
57 |
58 | // Test 2: Direct insert to telemetry_workflows
59 | console.log('\n📝 Test 2: Direct insert to telemetry_workflows...');
60 | const testWorkflow = {
61 | user_id: 'test-user-123',
62 | workflow_hash: 'test-hash-' + Date.now(),
63 | node_count: 3,
64 | node_types: ['webhook', 'http', 'slack'],
65 | has_trigger: true,
66 | has_webhook: true,
67 | complexity: 'simple',
68 | sanitized_workflow: {
69 | nodes: [],
70 | connections: {}
71 | }
72 | };
73 |
74 | const { data: workflowData, error: workflowError } = await supabase
75 | .from('telemetry_workflows')
76 | .insert([testWorkflow])
77 | .select();
78 |
79 | if (workflowError) {
80 | console.error('❌ Workflow insert failed:', workflowError);
81 | } else {
82 | console.log('✅ Workflow inserted successfully:', workflowData);
83 | }
84 |
85 | // Test 3: Try to read data (should fail with anon key due to RLS)
86 | console.log('\n📖 Test 3: Attempting to read data (should fail due to RLS)...');
87 | const { data: readData, error: readError } = await supabase
88 | .from('telemetry_events')
89 | .select('*')
90 | .limit(1);
91 |
92 | if (readError) {
93 | console.log('✅ Read correctly blocked by RLS:', readError.message);
94 | } else {
95 | console.log('⚠️ Unexpected: Read succeeded (RLS may not be working):', readData);
96 | }
97 |
98 | // Test 4: Check table existence
99 | console.log('\n🔍 Test 4: Verifying tables exist...');
100 | const { data: tables, error: tablesError } = await supabase
101 | .rpc('get_tables', { schema_name: 'public' })
102 | .select('*');
103 |
104 | if (tablesError) {
105 | // This is expected - the RPC function might not exist
106 | console.log('ℹ️ Cannot list tables (RPC function not available)');
107 | } else {
108 | console.log('Tables found:', tables);
109 | }
110 |
111 | console.log('\n✨ Debug completed! Check your Supabase dashboard for the test data.');
112 | console.log('Dashboard: https://supabase.com/dashboard/project/ydyufsohxdfpopqbubwk/editor');
113 | }
114 |
115 | debugTelemetry().catch(error => {
116 | console.error('❌ Debug failed:', error);
117 | process.exit(1);
118 | });
```
--------------------------------------------------------------------------------
/src/loaders/node-loader.ts:
--------------------------------------------------------------------------------
```typescript
1 | import path from 'path';
2 |
3 | export interface LoadedNode {
4 | packageName: string;
5 | nodeName: string;
6 | NodeClass: any;
7 | }
8 |
9 | export class N8nNodeLoader {
10 | private readonly CORE_PACKAGES = [
11 | { name: 'n8n-nodes-base', path: 'n8n-nodes-base' },
12 | { name: '@n8n/n8n-nodes-langchain', path: '@n8n/n8n-nodes-langchain' }
13 | ];
14 |
15 | async loadAllNodes(): Promise<LoadedNode[]> {
16 | const results: LoadedNode[] = [];
17 |
18 | for (const pkg of this.CORE_PACKAGES) {
19 | try {
20 | console.log(`\n📦 Loading package: ${pkg.name} from ${pkg.path}`);
21 | // Use the path property to locate the package
22 | const packageJson = require(`${pkg.path}/package.json`);
23 | console.log(` Found ${Object.keys(packageJson.n8n?.nodes || {}).length} nodes in package.json`);
24 | const nodes = await this.loadPackageNodes(pkg.name, pkg.path, packageJson);
25 | results.push(...nodes);
26 | } catch (error) {
27 | console.error(`Failed to load ${pkg.name}:`, error);
28 | }
29 | }
30 |
31 | return results;
32 | }
33 |
34 | private async loadPackageNodes(packageName: string, packagePath: string, packageJson: any): Promise<LoadedNode[]> {
35 | const n8nConfig = packageJson.n8n || {};
36 | const nodes: LoadedNode[] = [];
37 |
38 | // Check if nodes is an array or object
39 | const nodesList = n8nConfig.nodes || [];
40 |
41 | if (Array.isArray(nodesList)) {
42 | // Handle array format (n8n-nodes-base uses this)
43 | for (const nodePath of nodesList) {
44 | try {
45 | const fullPath = require.resolve(`${packagePath}/${nodePath}`);
46 | const nodeModule = require(fullPath);
47 |
48 | // Extract node name from path (e.g., "dist/nodes/Slack/Slack.node.js" -> "Slack")
49 | const nodeNameMatch = nodePath.match(/\/([^\/]+)\.node\.(js|ts)$/);
50 | const nodeName = nodeNameMatch ? nodeNameMatch[1] : path.basename(nodePath, '.node.js');
51 |
52 | // Handle default export and various export patterns
53 | const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
54 | if (NodeClass) {
55 | nodes.push({ packageName, nodeName, NodeClass });
56 | console.log(` ✓ Loaded ${nodeName} from ${packageName}`);
57 | } else {
58 | console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`);
59 | }
60 | } catch (error) {
61 | console.error(` ✗ Failed to load node from ${packageName}/${nodePath}:`, (error as Error).message);
62 | }
63 | }
64 | } else {
65 | // Handle object format (for other packages)
66 | for (const [nodeName, nodePath] of Object.entries(nodesList)) {
67 | try {
68 | const fullPath = require.resolve(`${packagePath}/${nodePath as string}`);
69 | const nodeModule = require(fullPath);
70 |
71 | // Handle default export and various export patterns
72 | const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
73 | if (NodeClass) {
74 | nodes.push({ packageName, nodeName, NodeClass });
75 | console.log(` ✓ Loaded ${nodeName} from ${packageName}`);
76 | } else {
77 | console.warn(` ⚠ No valid export found for ${nodeName} in ${packageName}`);
78 | }
79 | } catch (error) {
80 | console.error(` ✗ Failed to load node ${nodeName} from ${packageName}:`, (error as Error).message);
81 | }
82 | }
83 | }
84 |
85 | return nodes;
86 | }
87 | }
```
--------------------------------------------------------------------------------
/src/utils/url-detector.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Request } from 'express';
2 | import { logger } from './logger';
3 |
4 | /**
5 | * Validates a hostname to prevent header injection attacks
6 | */
7 | function isValidHostname(host: string): boolean {
8 | // Allow alphanumeric, dots, hyphens, and optional port
9 | return /^[a-zA-Z0-9.-]+(:[0-9]+)?$/.test(host) && host.length < 256;
10 | }
11 |
12 | /**
13 | * Validates a URL string
14 | */
15 | function isValidUrl(url: string): boolean {
16 | try {
17 | const parsed = new URL(url);
18 | // Only allow http and https protocols
19 | return parsed.protocol === 'http:' || parsed.protocol === 'https:';
20 | } catch {
21 | return false;
22 | }
23 | }
24 |
25 | /**
26 | * Detects the base URL for the server, considering:
27 | * 1. Explicitly configured BASE_URL or PUBLIC_URL
28 | * 2. Proxy headers (X-Forwarded-Proto, X-Forwarded-Host)
29 | * 3. Host and port configuration
30 | */
31 | export function detectBaseUrl(req: Request | null, host: string, port: number): string {
32 | try {
33 | // 1. Check for explicitly configured URL
34 | const configuredUrl = process.env.BASE_URL || process.env.PUBLIC_URL;
35 | if (configuredUrl) {
36 | if (isValidUrl(configuredUrl)) {
37 | logger.debug('Using configured BASE_URL/PUBLIC_URL', { url: configuredUrl });
38 | return configuredUrl.replace(/\/$/, ''); // Remove trailing slash
39 | } else {
40 | logger.warn('Invalid BASE_URL/PUBLIC_URL configured, falling back to auto-detection', { url: configuredUrl });
41 | }
42 | }
43 |
44 | // 2. If we have a request, try to detect from proxy headers
45 | if (req && process.env.TRUST_PROXY && Number(process.env.TRUST_PROXY) > 0) {
46 | const proto = req.get('X-Forwarded-Proto') || req.protocol || 'http';
47 | const forwardedHost = req.get('X-Forwarded-Host');
48 | const hostHeader = req.get('Host');
49 |
50 | const detectedHost = forwardedHost || hostHeader;
51 | if (detectedHost && isValidHostname(detectedHost)) {
52 | const baseUrl = `${proto}://${detectedHost}`;
53 | logger.debug('Detected URL from proxy headers', {
54 | proto,
55 | forwardedHost,
56 | hostHeader,
57 | baseUrl
58 | });
59 | return baseUrl;
60 | } else if (detectedHost) {
61 | logger.warn('Invalid hostname detected in proxy headers, using fallback', { detectedHost });
62 | }
63 | }
64 |
65 | // 3. Fall back to configured host and port
66 | const displayHost = host === '0.0.0.0' ? 'localhost' : host;
67 | const protocol = 'http'; // Default to http for local bindings
68 |
69 | // Don't show standard ports (for http only in this fallback case)
70 | const needsPort = port !== 80;
71 | const baseUrl = needsPort ?
72 | `${protocol}://${displayHost}:${port}` :
73 | `${protocol}://${displayHost}`;
74 |
75 | logger.debug('Using fallback URL from host/port', {
76 | host,
77 | displayHost,
78 | port,
79 | baseUrl
80 | });
81 |
82 | return baseUrl;
83 | } catch (error) {
84 | logger.error('Error detecting base URL, using fallback', error);
85 | // Safe fallback
86 | return `http://localhost:${port}`;
87 | }
88 | }
89 |
90 | /**
91 | * Gets the base URL for console display during startup
92 | * This is used when we don't have a request object yet
93 | */
94 | export function getStartupBaseUrl(host: string, port: number): string {
95 | return detectBaseUrl(null, host, port);
96 | }
97 |
98 | /**
99 | * Formats endpoint URLs for display
100 | */
101 | export function formatEndpointUrls(baseUrl: string): {
102 | health: string;
103 | mcp: string;
104 | root: string;
105 | } {
106 | return {
107 | health: `${baseUrl}/health`,
108 | mcp: `${baseUrl}/mcp`,
109 | root: baseUrl
110 | };
111 | }
```
--------------------------------------------------------------------------------
/tests/auth.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, vi, beforeEach } from 'vitest';
2 | import { AuthManager } from '../src/utils/auth';
3 |
4 | describe('AuthManager', () => {
5 | let authManager: AuthManager;
6 |
7 | beforeEach(() => {
8 | authManager = new AuthManager();
9 | });
10 |
11 | describe('validateToken', () => {
12 | it('should return true when no authentication is required', () => {
13 | expect(authManager.validateToken('any-token')).toBe(true);
14 | expect(authManager.validateToken(undefined)).toBe(true);
15 | });
16 |
17 | it('should validate static token correctly', () => {
18 | const expectedToken = 'secret-token';
19 |
20 | expect(authManager.validateToken('secret-token', expectedToken)).toBe(true);
21 | expect(authManager.validateToken('wrong-token', expectedToken)).toBe(false);
22 | expect(authManager.validateToken(undefined, expectedToken)).toBe(false);
23 | });
24 |
25 | it('should validate generated tokens', () => {
26 | const token = authManager.generateToken(1);
27 |
28 | expect(authManager.validateToken(token, 'expected-token')).toBe(true);
29 | });
30 |
31 | it('should reject expired tokens', () => {
32 | vi.useFakeTimers();
33 |
34 | const token = authManager.generateToken(1); // 1 hour expiry
35 |
36 | // Token should be valid initially
37 | expect(authManager.validateToken(token, 'expected-token')).toBe(true);
38 |
39 | // Fast forward 2 hours
40 | vi.advanceTimersByTime(2 * 60 * 60 * 1000);
41 |
42 | // Token should be expired
43 | expect(authManager.validateToken(token, 'expected-token')).toBe(false);
44 |
45 | vi.useRealTimers();
46 | });
47 | });
48 |
49 | describe('generateToken', () => {
50 | it('should generate unique tokens', () => {
51 | const token1 = authManager.generateToken();
52 | const token2 = authManager.generateToken();
53 |
54 | expect(token1).not.toBe(token2);
55 | expect(token1).toHaveLength(64); // 32 bytes hex = 64 chars
56 | });
57 |
58 | it('should set custom expiry time', () => {
59 | vi.useFakeTimers();
60 |
61 | const token = authManager.generateToken(24); // 24 hours
62 |
63 | // Token should be valid after 23 hours
64 | vi.advanceTimersByTime(23 * 60 * 60 * 1000);
65 | expect(authManager.validateToken(token, 'expected')).toBe(true);
66 |
67 | // Token should expire after 25 hours
68 | vi.advanceTimersByTime(2 * 60 * 60 * 1000);
69 | expect(authManager.validateToken(token, 'expected')).toBe(false);
70 |
71 | vi.useRealTimers();
72 | });
73 | });
74 |
75 | describe('revokeToken', () => {
76 | it('should revoke a generated token', () => {
77 | const token = authManager.generateToken();
78 |
79 | expect(authManager.validateToken(token, 'expected')).toBe(true);
80 |
81 | authManager.revokeToken(token);
82 |
83 | expect(authManager.validateToken(token, 'expected')).toBe(false);
84 | });
85 | });
86 |
87 | describe('static methods', () => {
88 | it('should hash tokens consistently', () => {
89 | const token = 'my-secret-token';
90 | const hash1 = AuthManager.hashToken(token);
91 | const hash2 = AuthManager.hashToken(token);
92 |
93 | expect(hash1).toBe(hash2);
94 | expect(hash1).toHaveLength(64); // SHA256 hex = 64 chars
95 | });
96 |
97 | it('should compare tokens securely', () => {
98 | const token = 'my-secret-token';
99 | const hashedToken = AuthManager.hashToken(token);
100 |
101 | expect(AuthManager.compareTokens(token, hashedToken)).toBe(true);
102 | expect(AuthManager.compareTokens('wrong-token', hashedToken)).toBe(false);
103 | });
104 | });
105 | });
```
--------------------------------------------------------------------------------
/tests/unit/utils/simple-cache-memory-leak-fix.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
2 | import { SimpleCache } from '../../../src/utils/simple-cache';
3 |
4 | describe('SimpleCache Memory Leak Fix', () => {
5 | let cache: SimpleCache;
6 |
7 | beforeEach(() => {
8 | vi.useFakeTimers();
9 | });
10 |
11 | afterEach(() => {
12 | if (cache && typeof cache.destroy === 'function') {
13 | cache.destroy();
14 | }
15 | vi.restoreAllMocks();
16 | });
17 |
18 | it('should track cleanup timer', () => {
19 | cache = new SimpleCache();
20 | // Access private property for testing
21 | expect((cache as any).cleanupTimer).toBeDefined();
22 | expect((cache as any).cleanupTimer).not.toBeNull();
23 | });
24 |
25 | it('should clear timer on destroy', () => {
26 | cache = new SimpleCache();
27 | const timer = (cache as any).cleanupTimer;
28 |
29 | cache.destroy();
30 |
31 | expect((cache as any).cleanupTimer).toBeNull();
32 | // Verify timer was cleared
33 | expect(() => clearInterval(timer)).not.toThrow();
34 | });
35 |
36 | it('should clear cache on destroy', () => {
37 | cache = new SimpleCache();
38 | cache.set('test-key', 'test-value', 300);
39 |
40 | expect(cache.get('test-key')).toBe('test-value');
41 |
42 | cache.destroy();
43 |
44 | expect(cache.get('test-key')).toBeNull();
45 | });
46 |
47 | it('should handle multiple destroy calls safely', () => {
48 | cache = new SimpleCache();
49 |
50 | expect(() => {
51 | cache.destroy();
52 | cache.destroy();
53 | cache.destroy();
54 | }).not.toThrow();
55 |
56 | expect((cache as any).cleanupTimer).toBeNull();
57 | });
58 |
59 | it('should not create new timers after destroy', () => {
60 | cache = new SimpleCache();
61 | const originalTimer = (cache as any).cleanupTimer;
62 |
63 | cache.destroy();
64 |
65 | // Try to use the cache after destroy
66 | cache.set('key', 'value');
67 | cache.get('key');
68 | cache.clear();
69 |
70 | // Timer should still be null
71 | expect((cache as any).cleanupTimer).toBeNull();
72 | expect((cache as any).cleanupTimer).not.toBe(originalTimer);
73 | });
74 |
75 | it('should clean up expired entries periodically', () => {
76 | cache = new SimpleCache();
77 |
78 | // Set items with different TTLs
79 | cache.set('short', 'value1', 1); // 1 second
80 | cache.set('long', 'value2', 300); // 300 seconds
81 |
82 | // Advance time by 2 seconds
83 | vi.advanceTimersByTime(2000);
84 |
85 | // Advance time to trigger cleanup (60 seconds)
86 | vi.advanceTimersByTime(58000);
87 |
88 | // Short-lived item should be gone
89 | expect(cache.get('short')).toBeNull();
90 | // Long-lived item should still exist
91 | expect(cache.get('long')).toBe('value2');
92 | });
93 |
94 | it('should prevent memory leak by clearing timer', () => {
95 | const timers: NodeJS.Timeout[] = [];
96 | const originalSetInterval = global.setInterval;
97 |
98 | // Mock setInterval to track created timers
99 | global.setInterval = vi.fn((callback, delay) => {
100 | const timer = originalSetInterval(callback, delay);
101 | timers.push(timer);
102 | return timer;
103 | });
104 |
105 | // Create and destroy multiple caches
106 | for (let i = 0; i < 5; i++) {
107 | const tempCache = new SimpleCache();
108 | tempCache.set(`key${i}`, `value${i}`);
109 | tempCache.destroy();
110 | }
111 |
112 | // All timers should have been cleared
113 | expect(timers.length).toBe(5);
114 |
115 | // Restore original setInterval
116 | global.setInterval = originalSetInterval;
117 | });
118 |
119 | it('should have destroy method defined', () => {
120 | cache = new SimpleCache();
121 | expect(typeof cache.destroy).toBe('function');
122 | });
123 | });
```
--------------------------------------------------------------------------------
/examples/enhanced-documentation-demo.js:
--------------------------------------------------------------------------------
```javascript
1 | #!/usr/bin/env node
2 |
3 | const { DocumentationFetcher } = require('../dist/utils/documentation-fetcher');
4 |
5 | async function demonstrateEnhancedDocumentation() {
6 | console.log('🎯 Enhanced Documentation Demo\n');
7 |
8 | const fetcher = new DocumentationFetcher();
9 | const nodeType = 'n8n-nodes-base.slack';
10 |
11 | console.log(`Fetching enhanced documentation for: ${nodeType}\n`);
12 |
13 | try {
14 | const doc = await fetcher.getEnhancedNodeDocumentation(nodeType);
15 |
16 | if (!doc) {
17 | console.log('No documentation found for this node.');
18 | return;
19 | }
20 |
21 | // Display title and description
22 | console.log('📄 Basic Information:');
23 | console.log(`Title: ${doc.title || 'N/A'}`);
24 | console.log(`URL: ${doc.url}`);
25 | console.log(`Description: ${doc.description || 'See documentation for details'}\n`);
26 |
27 | // Display operations
28 | if (doc.operations && doc.operations.length > 0) {
29 | console.log('⚙️ Available Operations:');
30 | // Group by resource
31 | const resourceMap = new Map();
32 | doc.operations.forEach(op => {
33 | if (!resourceMap.has(op.resource)) {
34 | resourceMap.set(op.resource, []);
35 | }
36 | resourceMap.get(op.resource).push(op);
37 | });
38 |
39 | resourceMap.forEach((ops, resource) => {
40 | console.log(`\n ${resource}:`);
41 | ops.forEach(op => {
42 | console.log(` - ${op.operation}: ${op.description}`);
43 | });
44 | });
45 | console.log('');
46 | }
47 |
48 | // Display API methods
49 | if (doc.apiMethods && doc.apiMethods.length > 0) {
50 | console.log('🔌 API Method Mappings (first 5):');
51 | doc.apiMethods.slice(0, 5).forEach(method => {
52 | console.log(` ${method.resource}.${method.operation} → ${method.apiMethod}`);
53 | if (method.apiUrl) {
54 | console.log(` Documentation: ${method.apiUrl}`);
55 | }
56 | });
57 | console.log(` ... and ${Math.max(0, doc.apiMethods.length - 5)} more\n`);
58 | }
59 |
60 | // Display templates
61 | if (doc.templates && doc.templates.length > 0) {
62 | console.log('📋 Available Templates:');
63 | doc.templates.forEach(template => {
64 | console.log(` - ${template.name}`);
65 | if (template.description) {
66 | console.log(` ${template.description}`);
67 | }
68 | });
69 | console.log('');
70 | }
71 |
72 | // Display related resources
73 | if (doc.relatedResources && doc.relatedResources.length > 0) {
74 | console.log('🔗 Related Resources:');
75 | doc.relatedResources.forEach(resource => {
76 | console.log(` - ${resource.title} (${resource.type})`);
77 | console.log(` ${resource.url}`);
78 | });
79 | console.log('');
80 | }
81 |
82 | // Display required scopes
83 | if (doc.requiredScopes && doc.requiredScopes.length > 0) {
84 | console.log('🔐 Required Scopes:');
85 | doc.requiredScopes.forEach(scope => {
86 | console.log(` - ${scope}`);
87 | });
88 | console.log('');
89 | }
90 |
91 | // Display summary
92 | console.log('📊 Summary:');
93 | console.log(` - Total operations: ${doc.operations?.length || 0}`);
94 | console.log(` - Total API methods: ${doc.apiMethods?.length || 0}`);
95 | console.log(` - Code examples: ${doc.examples?.length || 0}`);
96 | console.log(` - Templates: ${doc.templates?.length || 0}`);
97 | console.log(` - Related resources: ${doc.relatedResources?.length || 0}`);
98 |
99 | } catch (error) {
100 | console.error('Error:', error.message);
101 | } finally {
102 | await fetcher.cleanup();
103 | }
104 | }
105 |
106 | // Run demo
107 | demonstrateEnhancedDocumentation().catch(console.error);
```
--------------------------------------------------------------------------------
/scripts/publish-npm.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | # Script to publish n8n-mcp with runtime-only dependencies
3 |
4 | set -e
5 |
6 | # Color codes for output
7 | RED='\033[0;31m'
8 | GREEN='\033[0;32m'
9 | YELLOW='\033[1;33m'
10 | NC='\033[0m' # No Color
11 |
12 | echo "🚀 Preparing n8n-mcp for npm publish..."
13 |
14 | # Skip tests - they already run in CI before merge/publish
15 | echo "⏭️ Skipping tests (already verified in CI)"
16 |
17 | # Sync version to runtime package first
18 | echo "🔄 Syncing version to package.runtime.json..."
19 | npm run sync:runtime-version
20 |
21 | # Get version from main package.json
22 | VERSION=$(node -e "console.log(require('./package.json').version)")
23 | echo -e "${GREEN}📌 Version: $VERSION${NC}"
24 |
25 | # Check if dist directory exists
26 | if [ ! -d "dist" ]; then
27 | echo -e "${RED}❌ Error: dist directory not found. Run 'npm run build' first.${NC}"
28 | exit 1
29 | fi
30 |
31 | # Check if database exists
32 | if [ ! -f "data/nodes.db" ]; then
33 | echo -e "${RED}❌ Error: data/nodes.db not found. Run 'npm run rebuild' first.${NC}"
34 | exit 1
35 | fi
36 |
37 | # Create a temporary publish directory
38 | PUBLISH_DIR="npm-publish-temp"
39 | rm -rf $PUBLISH_DIR
40 | mkdir -p $PUBLISH_DIR
41 |
42 | # Copy necessary files
43 | echo "📦 Copying files..."
44 | cp -r dist $PUBLISH_DIR/
45 | cp -r data $PUBLISH_DIR/
46 | cp README.md $PUBLISH_DIR/
47 | cp LICENSE $PUBLISH_DIR/
48 | cp .env.example $PUBLISH_DIR/
49 | cp .npmignore $PUBLISH_DIR/ 2>/dev/null || true
50 |
51 | # Use runtime package.json (already has correct version from sync)
52 | echo "📋 Using runtime-only dependencies..."
53 | cp package.runtime.json $PUBLISH_DIR/package.json
54 |
55 | cd $PUBLISH_DIR
56 |
57 | # Add required fields from main package.json
58 | node -e "
59 | const pkg = require('./package.json');
60 | pkg.name = 'n8n-mcp';
61 | pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
62 | pkg.main = 'dist/index.js';
63 | pkg.types = 'dist/index.d.ts';
64 | pkg.exports = {
65 | '.': {
66 | types: './dist/index.d.ts',
67 | require: './dist/index.js',
68 | import: './dist/index.js'
69 | }
70 | };
71 | pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
72 | pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
73 | pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
74 | pkg.author = 'Romuald Czlonkowski @ www.aiadvisors.pl/en';
75 | pkg.license = 'MIT';
76 | pkg.bugs = { url: 'https://github.com/czlonkowski/n8n-mcp/issues' };
77 | pkg.homepage = 'https://github.com/czlonkowski/n8n-mcp#readme';
78 | pkg.files = ['dist/**/*', 'data/nodes.db', '.env.example', 'README.md', 'LICENSE'];
79 | // Note: node_modules are automatically included for dependencies
80 | delete pkg.private; // Remove private field so we can publish
81 | require('fs').writeFileSync('./package.json', JSON.stringify(pkg, null, 2));
82 | "
83 |
84 | echo ""
85 | echo "📋 Package details:"
86 | echo -e "${GREEN}Name:${NC} $(node -e "console.log(require('./package.json').name)")"
87 | echo -e "${GREEN}Version:${NC} $(node -e "console.log(require('./package.json').version)")"
88 | echo -e "${GREEN}Size:${NC} ~50MB (vs 1GB+ with dev dependencies)"
89 | echo -e "${GREEN}Runtime deps:${NC} 8 packages"
90 |
91 | echo ""
92 | echo "✅ Ready to publish!"
93 | echo ""
94 | echo -e "${YELLOW}⚠️ Important: npm publishing requires OTP authentication${NC}"
95 | echo ""
96 | echo "To publish, run:"
97 | echo -e " ${GREEN}cd $PUBLISH_DIR${NC}"
98 | echo -e " ${GREEN}npm publish --otp=YOUR_OTP_CODE${NC}"
99 | echo ""
100 | echo "After publishing, clean up with:"
101 | echo -e " ${GREEN}cd ..${NC}"
102 | echo -e " ${GREEN}rm -rf $PUBLISH_DIR${NC}"
103 | echo ""
104 | echo "📝 Notes:"
105 | echo " - Get your OTP from your authenticator app"
106 | echo " - The package will be available at https://www.npmjs.com/package/n8n-mcp"
107 | echo " - Users can run 'npx n8n-mcp' immediately after publish"
```
--------------------------------------------------------------------------------
/scripts/extract-nodes-docker.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | set -e
3 |
4 | echo "🐳 n8n Node Extraction via Docker"
5 | echo "================================="
6 |
7 | # Colors for output
8 | GREEN='\033[0;32m'
9 | YELLOW='\033[1;33m'
10 | RED='\033[0;31m'
11 | NC='\033[0m' # No Color
12 |
13 | # Function to print colored output
14 | print_status() {
15 | echo -e "${GREEN}[$(date +'%H:%M:%S')]${NC} $1"
16 | }
17 |
18 | print_warning() {
19 | echo -e "${YELLOW}[$(date +'%H:%M:%S')]${NC} ⚠️ $1"
20 | }
21 |
22 | print_error() {
23 | echo -e "${RED}[$(date +'%H:%M:%S')]${NC} ❌ $1"
24 | }
25 |
26 | # Check if Docker is running
27 | if ! docker info > /dev/null 2>&1; then
28 | print_error "Docker is not running. Please start Docker and try again."
29 | exit 1
30 | fi
31 |
32 | print_status "Docker is running ✅"
33 |
34 | # Clean up any existing containers
35 | print_status "Cleaning up existing containers..."
36 | docker-compose -f docker-compose.extract.yml down -v 2>/dev/null || true
37 |
38 | # Build the project first
39 | print_status "Building the project..."
40 | npm run build
41 |
42 | # Start the extraction process
43 | print_status "Starting n8n container to extract latest nodes..."
44 | docker-compose -f docker-compose.extract.yml up -d n8n-latest
45 |
46 | # Wait for n8n container to be healthy
47 | print_status "Waiting for n8n container to initialize..."
48 | ATTEMPTS=0
49 | MAX_ATTEMPTS=60
50 |
51 | while [ $ATTEMPTS -lt $MAX_ATTEMPTS ]; do
52 | if docker-compose -f docker-compose.extract.yml ps | grep -q "healthy"; then
53 | print_status "n8n container is ready ✅"
54 | break
55 | fi
56 |
57 | ATTEMPTS=$((ATTEMPTS + 1))
58 | echo -n "."
59 | sleep 2
60 | done
61 |
62 | if [ $ATTEMPTS -eq $MAX_ATTEMPTS ]; then
63 | print_error "n8n container failed to become healthy"
64 | docker-compose -f docker-compose.extract.yml logs n8n-latest
65 | docker-compose -f docker-compose.extract.yml down -v
66 | exit 1
67 | fi
68 |
69 | # Run the extraction
70 | print_status "Running node extraction..."
71 | docker-compose -f docker-compose.extract.yml run --rm node-extractor
72 |
73 | # Check the results
74 | print_status "Checking extraction results..."
75 | if [ -f "./data/nodes-fresh.db" ]; then
76 | NODE_COUNT=$(sqlite3 ./data/nodes-fresh.db "SELECT COUNT(*) FROM nodes;" 2>/dev/null || echo "0")
77 | IF_VERSION=$(sqlite3 ./data/nodes-fresh.db "SELECT version FROM nodes WHERE name='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "not found")
78 |
79 | print_status "Extracted $NODE_COUNT nodes"
80 | print_status "If node version: $IF_VERSION"
81 |
82 | # Check if we got the If node source code and look for version
83 | IF_SOURCE=$(sqlite3 ./data/nodes-fresh.db "SELECT source_code FROM nodes WHERE name='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "")
84 | if [[ $IF_SOURCE =~ version:[[:space:]]*([0-9]+) ]]; then
85 | IF_CODE_VERSION="${BASH_REMATCH[1]}"
86 | print_status "If node version from source code: v$IF_CODE_VERSION"
87 |
88 | if [ "$IF_CODE_VERSION" -ge "2" ]; then
89 | print_status "✅ Successfully extracted latest If node (v$IF_CODE_VERSION)!"
90 | else
91 | print_warning "If node is still v$IF_CODE_VERSION, expected v2 or higher"
92 | fi
93 | fi
94 | else
95 | print_error "Database file not found after extraction"
96 | fi
97 |
98 | # Clean up
99 | print_status "Cleaning up Docker containers..."
100 | docker-compose -f docker-compose.extract.yml down -v
101 |
102 | print_status "✨ Extraction complete!"
103 |
104 | # Offer to restart the MCP server
105 | echo ""
106 | read -p "Would you like to restart the MCP server with the new nodes? (y/n) " -n 1 -r
107 | echo ""
108 | if [[ $REPLY =~ ^[Yy]$ ]]; then
109 | print_status "Restarting MCP server..."
110 | # Kill any existing server process
111 | pkill -f "node.*dist/index.js" || true
112 |
113 | # Start the server
114 | npm start &
115 | print_status "MCP server restarted with fresh node database"
116 | fi
```
--------------------------------------------------------------------------------
/scripts/test-jmespath-validation.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env npx tsx
2 |
3 | import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator.js';
4 |
5 | console.log('🧪 Testing JMESPath Validation\n');
6 |
7 | const testCases = [
8 | {
9 | name: 'JMESPath with unquoted numeric literal',
10 | config: {
11 | language: 'javaScript',
12 | jsCode: `const data = { users: [{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }] };
13 | const adults = $jmespath(data, 'users[?age >= 18]');
14 | return [{json: {adults}}];`
15 | },
16 | expectError: true
17 | },
18 | {
19 | name: 'JMESPath with properly quoted numeric literal',
20 | config: {
21 | language: 'javaScript',
22 | jsCode: `const data = { users: [{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }] };
23 | const adults = $jmespath(data, 'users[?age >= \`18\`]');
24 | return [{json: {adults}}];`
25 | },
26 | expectError: false
27 | },
28 | {
29 | name: 'Multiple JMESPath filters with unquoted numbers',
30 | config: {
31 | language: 'javaScript',
32 | jsCode: `const products = items.map(item => item.json);
33 | const expensive = $jmespath(products, '[?price > 100]');
34 | const lowStock = $jmespath(products, '[?quantity < 10]');
35 | const highPriority = $jmespath(products, '[?priority == 1]');
36 | return [{json: {expensive, lowStock, highPriority}}];`
37 | },
38 | expectError: true
39 | },
40 | {
41 | name: 'JMESPath with string comparison (no backticks needed)',
42 | config: {
43 | language: 'javaScript',
44 | jsCode: `const data = { users: [{ name: 'John', status: 'active' }, { name: 'Jane', status: 'inactive' }] };
45 | const activeUsers = $jmespath(data, 'users[?status == "active"]');
46 | return [{json: {activeUsers}}];`
47 | },
48 | expectError: false
49 | },
50 | {
51 | name: 'Python JMESPath with unquoted numeric literal',
52 | config: {
53 | language: 'python',
54 | pythonCode: `data = { 'users': [{ 'name': 'John', 'age': 30 }, { 'name': 'Jane', 'age': 25 }] }
55 | adults = _jmespath(data, 'users[?age >= 18]')
56 | return [{'json': {'adults': adults}}]`
57 | },
58 | expectError: true
59 | },
60 | {
61 | name: 'Complex filter with decimal numbers',
62 | config: {
63 | language: 'javaScript',
64 | jsCode: `const items = [{ price: 99.99 }, { price: 150.50 }, { price: 200 }];
65 | const expensive = $jmespath(items, '[?price >= 99.95]');
66 | return [{json: {expensive}}];`
67 | },
68 | expectError: true
69 | }
70 | ];
71 |
72 | let passCount = 0;
73 | let failCount = 0;
74 |
75 | for (const test of testCases) {
76 | console.log(`Test: ${test.name}`);
77 | const result = EnhancedConfigValidator.validateWithMode(
78 | 'nodes-base.code',
79 | test.config,
80 | [
81 | { name: 'language', type: 'options', options: ['javaScript', 'python'] },
82 | { name: 'jsCode', type: 'string' },
83 | { name: 'pythonCode', type: 'string' }
84 | ],
85 | 'operation',
86 | 'strict'
87 | );
88 |
89 | const hasJMESPathError = result.errors.some(e =>
90 | e.message.includes('JMESPath numeric literal') ||
91 | e.message.includes('must be wrapped in backticks')
92 | );
93 |
94 | const passed = hasJMESPathError === test.expectError;
95 |
96 | console.log(` Expected error: ${test.expectError}`);
97 | console.log(` Has JMESPath error: ${hasJMESPathError}`);
98 | console.log(` Result: ${passed ? '✅ PASS' : '❌ FAIL'}`);
99 |
100 | if (result.errors.length > 0) {
101 | console.log(` Errors: ${result.errors.map(e => e.message).join(', ')}`);
102 | }
103 | if (result.warnings.length > 0) {
104 | console.log(` Warnings: ${result.warnings.slice(0, 2).map(w => w.message).join(', ')}`);
105 | }
106 |
107 | if (passed) passCount++;
108 | else failCount++;
109 |
110 | console.log();
111 | }
112 |
113 | console.log(`\n📊 Results: ${passCount} passed, ${failCount} failed`);
114 | console.log(failCount === 0 ? '✅ All JMESPath validation tests passed!' : '❌ Some tests failed');
```
--------------------------------------------------------------------------------
/src/telemetry/telemetry-types.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Telemetry Types and Interfaces
3 | * Centralized type definitions for the telemetry system
4 | */
5 |
6 | import { StartupCheckpoint } from './startup-checkpoints';
7 |
8 | export interface TelemetryEvent {
9 | user_id: string;
10 | event: string;
11 | properties: Record<string, any>;
12 | created_at?: string;
13 | }
14 |
15 | /**
16 | * Startup error event - captures pre-handshake failures
17 | */
18 | export interface StartupErrorEvent extends TelemetryEvent {
19 | event: 'startup_error';
20 | properties: {
21 | checkpoint: StartupCheckpoint;
22 | errorMessage: string;
23 | errorType: string;
24 | checkpointsPassed: StartupCheckpoint[];
25 | checkpointsPassedCount: number;
26 | startupDuration: number;
27 | platform: string;
28 | arch: string;
29 | nodeVersion: string;
30 | isDocker: boolean;
31 | };
32 | }
33 |
34 | /**
35 | * Startup completed event - confirms server is functional
36 | */
37 | export interface StartupCompletedEvent extends TelemetryEvent {
38 | event: 'startup_completed';
39 | properties: {
40 | version: string;
41 | };
42 | }
43 |
44 | /**
45 | * Enhanced session start properties with startup tracking
46 | */
47 | export interface SessionStartProperties {
48 | version: string;
49 | platform: string;
50 | arch: string;
51 | nodeVersion: string;
52 | isDocker: boolean;
53 | cloudPlatform: string | null;
54 | // NEW: Startup tracking fields (v2.18.2)
55 | startupDurationMs?: number;
56 | checkpointsPassed?: StartupCheckpoint[];
57 | startupErrorCount?: number;
58 | }
59 |
60 | export interface WorkflowTelemetry {
61 | user_id: string;
62 | workflow_hash: string;
63 | node_count: number;
64 | node_types: string[];
65 | has_trigger: boolean;
66 | has_webhook: boolean;
67 | complexity: 'simple' | 'medium' | 'complex';
68 | sanitized_workflow: any;
69 | created_at?: string;
70 | }
71 |
72 | export interface SanitizedWorkflow {
73 | nodes: any[];
74 | connections: any;
75 | nodeCount: number;
76 | nodeTypes: string[];
77 | hasTrigger: boolean;
78 | hasWebhook: boolean;
79 | complexity: 'simple' | 'medium' | 'complex';
80 | workflowHash: string;
81 | }
82 |
83 | export const TELEMETRY_CONFIG = {
84 | // Batch processing
85 | BATCH_FLUSH_INTERVAL: 5000, // 5 seconds
86 | EVENT_QUEUE_THRESHOLD: 10, // Batch events for efficiency
87 | WORKFLOW_QUEUE_THRESHOLD: 5, // Batch workflows
88 |
89 | // Retry logic
90 | MAX_RETRIES: 3,
91 | RETRY_DELAY: 1000, // 1 second base delay
92 | OPERATION_TIMEOUT: 5000, // 5 seconds
93 |
94 | // Rate limiting
95 | RATE_LIMIT_WINDOW: 60000, // 1 minute
96 | RATE_LIMIT_MAX_EVENTS: 100, // Max events per window
97 |
98 | // Queue limits
99 | MAX_QUEUE_SIZE: 1000, // Maximum events to queue
100 | MAX_BATCH_SIZE: 50, // Maximum events per batch
101 | } as const;
102 |
103 | export const TELEMETRY_BACKEND = {
104 | URL: 'https://ydyufsohxdfpopqbubwk.supabase.co',
105 | ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTg3OTYyMDAsImV4cCI6MjA3NDM3MjIwMH0.xESphg6h5ozaDsm4Vla3QnDJGc6Nc_cpfoqTHRynkCk'
106 | } as const;
107 |
108 | export interface TelemetryMetrics {
109 | eventsTracked: number;
110 | eventsDropped: number;
111 | eventsFailed: number;
112 | batchesSent: number;
113 | batchesFailed: number;
114 | averageFlushTime: number;
115 | lastFlushTime?: number;
116 | rateLimitHits: number;
117 | }
118 |
119 | export enum TelemetryErrorType {
120 | VALIDATION_ERROR = 'VALIDATION_ERROR',
121 | NETWORK_ERROR = 'NETWORK_ERROR',
122 | RATE_LIMIT_ERROR = 'RATE_LIMIT_ERROR',
123 | QUEUE_OVERFLOW_ERROR = 'QUEUE_OVERFLOW_ERROR',
124 | INITIALIZATION_ERROR = 'INITIALIZATION_ERROR',
125 | UNKNOWN_ERROR = 'UNKNOWN_ERROR'
126 | }
127 |
128 | export interface TelemetryErrorContext {
129 | type: TelemetryErrorType;
130 | message: string;
131 | context?: Record<string, any>;
132 | timestamp: number;
133 | retryable: boolean;
134 | }
135 |
136 | /**
137 | * Re-export workflow mutation types
138 | */
139 | export type { WorkflowMutationRecord, WorkflowMutationData } from './mutation-types.js';
```
--------------------------------------------------------------------------------
/tests/mocks/n8n-api/data/executions.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Mock execution data for MSW handlers
3 | */
4 |
5 | export interface MockExecution {
6 | id: string;
7 | workflowId: string;
8 | status: 'success' | 'error' | 'waiting' | 'running';
9 | mode: 'manual' | 'trigger' | 'webhook' | 'internal';
10 | startedAt: string;
11 | stoppedAt?: string;
12 | data?: any;
13 | error?: any;
14 | }
15 |
16 | export const mockExecutions: MockExecution[] = [
17 | {
18 | id: 'exec_1',
19 | workflowId: 'workflow_1',
20 | status: 'success',
21 | mode: 'manual',
22 | startedAt: '2024-01-01T10:00:00.000Z',
23 | stoppedAt: '2024-01-01T10:00:05.000Z',
24 | data: {
25 | resultData: {
26 | runData: {
27 | 'node_2': [
28 | {
29 | startTime: 1704106800000,
30 | executionTime: 234,
31 | data: {
32 | main: [[{
33 | json: {
34 | status: 200,
35 | data: { message: 'Success' }
36 | }
37 | }]]
38 | }
39 | }
40 | ]
41 | }
42 | }
43 | }
44 | },
45 | {
46 | id: 'exec_2',
47 | workflowId: 'workflow_2',
48 | status: 'error',
49 | mode: 'webhook',
50 | startedAt: '2024-01-01T11:00:00.000Z',
51 | stoppedAt: '2024-01-01T11:00:02.000Z',
52 | error: {
53 | message: 'Could not send message to Slack',
54 | stack: 'Error: Could not send message to Slack\n at SlackNode.execute',
55 | node: 'slack_1'
56 | },
57 | data: {
58 | resultData: {
59 | runData: {
60 | 'webhook_1': [
61 | {
62 | startTime: 1704110400000,
63 | executionTime: 10,
64 | data: {
65 | main: [[{
66 | json: {
67 | headers: { 'content-type': 'application/json' },
68 | body: { message: 'Test webhook' }
69 | }
70 | }]]
71 | }
72 | }
73 | ]
74 | }
75 | }
76 | }
77 | },
78 | {
79 | id: 'exec_3',
80 | workflowId: 'workflow_3',
81 | status: 'waiting',
82 | mode: 'trigger',
83 | startedAt: '2024-01-01T12:00:00.000Z',
84 | data: {
85 | resultData: {
86 | runData: {}
87 | },
88 | waitingExecutions: {
89 | 'agent_1': {
90 | reason: 'Waiting for user input'
91 | }
92 | }
93 | }
94 | }
95 | ];
96 |
97 | /**
98 | * Factory functions for creating mock executions
99 | */
100 | export const executionFactory = {
101 | /**
102 | * Create a successful execution
103 | */
104 | success: (workflowId: string, data?: any): MockExecution => ({
105 | id: `exec_${Date.now()}`,
106 | workflowId,
107 | status: 'success',
108 | mode: 'manual',
109 | startedAt: new Date().toISOString(),
110 | stoppedAt: new Date(Date.now() + 5000).toISOString(),
111 | data: data || {
112 | resultData: {
113 | runData: {
114 | 'node_1': [{
115 | startTime: Date.now(),
116 | executionTime: 100,
117 | data: {
118 | main: [[{ json: { success: true } }]]
119 | }
120 | }]
121 | }
122 | }
123 | }
124 | }),
125 |
126 | /**
127 | * Create a failed execution
128 | */
129 | error: (workflowId: string, error: { message: string; node?: string }): MockExecution => ({
130 | id: `exec_${Date.now()}`,
131 | workflowId,
132 | status: 'error',
133 | mode: 'manual',
134 | startedAt: new Date().toISOString(),
135 | stoppedAt: new Date(Date.now() + 2000).toISOString(),
136 | error: {
137 | message: error.message,
138 | stack: `Error: ${error.message}\n at Node.execute`,
139 | node: error.node
140 | },
141 | data: {
142 | resultData: {
143 | runData: {}
144 | }
145 | }
146 | }),
147 |
148 | /**
149 | * Create a custom execution
150 | */
151 | custom: (config: Partial<MockExecution>): MockExecution => ({
152 | id: `exec_${Date.now()}`,
153 | workflowId: 'workflow_1',
154 | status: 'success',
155 | mode: 'manual',
156 | startedAt: new Date().toISOString(),
157 | ...config
158 | })
159 | };
```
--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-validate-workflow.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ToolDocumentation } from '../types';
2 |
3 | export const n8nValidateWorkflowDoc: ToolDocumentation = {
4 | name: 'n8n_validate_workflow',
5 | category: 'workflow_management',
6 | essentials: {
7 | description: 'Validate workflow from n8n instance by ID - checks nodes, connections, expressions, and returns errors/warnings',
8 | keyParameters: ['id'],
9 | example: 'n8n_validate_workflow({id: "wf_abc123"})',
10 | performance: 'Network-dependent (100-500ms) - fetches and validates workflow',
11 | tips: [
12 | 'Use options.profile to control validation strictness (minimal/runtime/ai-friendly/strict)',
13 | 'Validation includes node configs, connections, and n8n expression syntax',
14 | 'Returns categorized errors, warnings, and actionable fix suggestions'
15 | ]
16 | },
17 | full: {
18 | description: `Validates a workflow stored in your n8n instance by fetching it via API and running comprehensive validation checks. This tool:
19 |
20 | - Fetches the workflow from n8n using the workflow ID
21 | - Validates all node configurations based on their schemas
22 | - Checks workflow connections and data flow
23 | - Validates n8n expression syntax in all fields
24 | - Returns categorized issues with fix suggestions
25 |
26 | The validation uses the same engine as validate_workflow but works with workflows already in n8n, making it perfect for validating existing workflows before execution.
27 |
28 | Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
29 | parameters: {
30 | id: {
31 | type: 'string',
32 | required: true,
33 | description: 'The workflow ID to validate from your n8n instance'
34 | },
35 | options: {
36 | type: 'object',
37 | required: false,
38 | description: 'Validation options: {validateNodes: bool (default true), validateConnections: bool (default true), validateExpressions: bool (default true), profile: "minimal"|"runtime"|"ai-friendly"|"strict" (default "runtime")}'
39 | }
40 | },
41 | returns: 'ValidationResult object containing isValid boolean, arrays of errors/warnings, and suggestions for fixes',
42 | examples: [
43 | 'n8n_validate_workflow({id: "wf_abc123"}) - Validate with default settings',
44 | 'n8n_validate_workflow({id: "wf_abc123", options: {profile: "strict"}}) - Strict validation',
45 | 'n8n_validate_workflow({id: "wf_abc123", options: {validateExpressions: false}}) - Skip expression validation'
46 | ],
47 | useCases: [
48 | 'Validating workflows before running them in production',
49 | 'Checking imported workflows for compatibility',
50 | 'Debugging workflow execution failures',
51 | 'Ensuring workflows follow best practices',
52 | 'Pre-deployment validation in CI/CD pipelines'
53 | ],
54 | performance: 'Depends on workflow size and API latency. Typically 100-500ms for medium workflows.',
55 | bestPractices: [
56 | 'Run validation before activating workflows in production',
57 | 'Use "runtime" profile for pre-execution checks',
58 | 'Use "strict" profile for code review and best practices',
59 | 'Fix errors before warnings - errors will likely cause execution failures',
60 | 'Pay attention to expression validation - syntax errors are common'
61 | ],
62 | pitfalls: [
63 | 'Requires valid API credentials - check n8n_health_check first',
64 | 'Large workflows may take longer to validate',
65 | 'Some warnings may be intentional (e.g., optional parameters)',
66 | 'Profile affects validation time - strict is slower but more thorough',
67 | 'Expression validation may flag working but non-standard syntax'
68 | ],
69 | relatedTools: ['validate_workflow', 'n8n_get_workflow', 'n8n_health_check', 'n8n_autofix_workflow']
70 | }
71 | };
```
--------------------------------------------------------------------------------
/src/utils/mcp-client.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Client } from '@modelcontextprotocol/sdk/client/index.js';
2 | import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
3 | import { WebSocketClientTransport } from '@modelcontextprotocol/sdk/client/websocket.js';
4 | import {
5 | CallToolRequest,
6 | ListToolsRequest,
7 | ListResourcesRequest,
8 | ReadResourceRequest,
9 | ListPromptsRequest,
10 | GetPromptRequest,
11 | CallToolResultSchema,
12 | ListToolsResultSchema,
13 | ListResourcesResultSchema,
14 | ReadResourceResultSchema,
15 | ListPromptsResultSchema,
16 | GetPromptResultSchema,
17 | } from '@modelcontextprotocol/sdk/types.js';
18 |
19 | export interface MCPClientConfig {
20 | serverUrl: string;
21 | authToken?: string;
22 | connectionType: 'http' | 'websocket' | 'stdio';
23 | }
24 |
25 | export class MCPClient {
26 | private client: Client;
27 | private config: MCPClientConfig;
28 | private connected: boolean = false;
29 |
30 | constructor(config: MCPClientConfig) {
31 | this.config = config;
32 | this.client = new Client(
33 | {
34 | name: 'n8n-mcp-client',
35 | version: '1.0.0',
36 | },
37 | {
38 | capabilities: {},
39 | }
40 | );
41 | }
42 |
43 | async connect(): Promise<void> {
44 | if (this.connected) {
45 | return;
46 | }
47 |
48 | let transport;
49 |
50 | switch (this.config.connectionType) {
51 | case 'websocket':
52 | const wsUrl = this.config.serverUrl.replace(/^http/, 'ws');
53 | transport = new WebSocketClientTransport(new URL(wsUrl));
54 | break;
55 |
56 | case 'stdio':
57 | // For stdio, the serverUrl should be the command to execute
58 | const [command, ...args] = this.config.serverUrl.split(' ');
59 | transport = new StdioClientTransport({
60 | command,
61 | args,
62 | });
63 | break;
64 |
65 | default:
66 | throw new Error(`HTTP transport is not yet supported for MCP clients`);
67 | }
68 |
69 | await this.client.connect(transport);
70 | this.connected = true;
71 | }
72 |
73 | async disconnect(): Promise<void> {
74 | if (this.connected) {
75 | await this.client.close();
76 | this.connected = false;
77 | }
78 | }
79 |
80 | async listTools(): Promise<any> {
81 | await this.ensureConnected();
82 | return await this.client.request(
83 | { method: 'tools/list' } as ListToolsRequest,
84 | ListToolsResultSchema
85 | );
86 | }
87 |
88 | async callTool(name: string, args: any): Promise<any> {
89 | await this.ensureConnected();
90 | return await this.client.request(
91 | {
92 | method: 'tools/call',
93 | params: {
94 | name,
95 | arguments: args,
96 | },
97 | } as CallToolRequest,
98 | CallToolResultSchema
99 | );
100 | }
101 |
102 | async listResources(): Promise<any> {
103 | await this.ensureConnected();
104 | return await this.client.request(
105 | { method: 'resources/list' } as ListResourcesRequest,
106 | ListResourcesResultSchema
107 | );
108 | }
109 |
110 | async readResource(uri: string): Promise<any> {
111 | await this.ensureConnected();
112 | return await this.client.request(
113 | {
114 | method: 'resources/read',
115 | params: {
116 | uri,
117 | },
118 | } as ReadResourceRequest,
119 | ReadResourceResultSchema
120 | );
121 | }
122 |
123 | async listPrompts(): Promise<any> {
124 | await this.ensureConnected();
125 | return await this.client.request(
126 | { method: 'prompts/list' } as ListPromptsRequest,
127 | ListPromptsResultSchema
128 | );
129 | }
130 |
131 | async getPrompt(name: string, args?: any): Promise<any> {
132 | await this.ensureConnected();
133 | return await this.client.request(
134 | {
135 | method: 'prompts/get',
136 | params: {
137 | name,
138 | arguments: args,
139 | },
140 | } as GetPromptRequest,
141 | GetPromptResultSchema
142 | );
143 | }
144 |
145 | private async ensureConnected(): Promise<void> {
146 | if (!this.connected) {
147 | await this.connect();
148 | }
149 | }
150 | }
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | # syntax=docker/dockerfile:1.7
2 | # Ultra-optimized Dockerfile - minimal runtime dependencies (no n8n packages)
3 |
4 | # Stage 1: Builder (TypeScript compilation only)
5 | FROM node:22-alpine AS builder
6 | WORKDIR /app
7 |
8 | # Copy tsconfig files for TypeScript compilation
9 | COPY tsconfig*.json ./
10 |
11 | # Create minimal package.json and install ONLY build dependencies
12 | # Note: openai and zod are needed for TypeScript compilation of template metadata modules
13 | RUN --mount=type=cache,target=/root/.npm \
14 | echo '{}' > package.json && \
15 | npm install --no-save typescript@^5.8.3 @types/node@^22.15.30 @types/express@^5.0.3 \
16 | @modelcontextprotocol/sdk@^1.12.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
17 | n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0 \
18 | openai@^4.77.0 zod@^3.24.1 lru-cache@^11.2.1 @supabase/supabase-js@^2.57.4
19 |
20 | # Copy source and build
21 | COPY src ./src
22 | # Note: src/n8n contains TypeScript types needed for compilation
23 | # These will be compiled but not included in runtime
24 | RUN npx tsc -p tsconfig.build.json
25 |
26 | # Stage 2: Runtime (minimal dependencies)
27 | FROM node:22-alpine AS runtime
28 | WORKDIR /app
29 |
30 | # Install only essential runtime tools
31 | RUN apk add --no-cache curl su-exec && \
32 | rm -rf /var/cache/apk/*
33 |
34 | # Copy runtime-only package.json
35 | COPY package.runtime.json package.json
36 |
37 | # Install runtime dependencies with better-sqlite3 compilation
38 | # Build tools (python3, make, g++) are installed, used for compilation, then removed
39 | # This enables native SQLite (better-sqlite3) instead of sql.js, preventing memory leaks
40 | RUN --mount=type=cache,target=/root/.npm \
41 | apk add --no-cache python3 make g++ && \
42 | npm install --production --no-audit --no-fund && \
43 | apk del python3 make g++
44 |
45 | # Copy built application
46 | COPY --from=builder /app/dist ./dist
47 |
48 | # Copy pre-built database and required files
49 | # Cache bust: 2025-07-06-trigger-fix-v3 - includes is_trigger=true for webhook,cron,interval,emailReadImap
50 | COPY data/nodes.db ./data/
51 | COPY src/database/schema-optimized.sql ./src/database/
52 | COPY .env.example ./
53 |
54 | # Copy entrypoint script, config parser, and n8n-mcp command
55 | COPY docker/docker-entrypoint.sh /usr/local/bin/
56 | COPY docker/parse-config.js /app/docker/
57 | COPY docker/n8n-mcp /usr/local/bin/
58 | RUN chmod +x /usr/local/bin/docker-entrypoint.sh /usr/local/bin/n8n-mcp
59 |
60 | # Add container labels
61 | LABEL org.opencontainers.image.source="https://github.com/czlonkowski/n8n-mcp"
62 | LABEL org.opencontainers.image.description="n8n MCP Server - Runtime Only"
63 | LABEL org.opencontainers.image.licenses="MIT"
64 | LABEL org.opencontainers.image.title="n8n-mcp"
65 |
66 | # Create non-root user with unpredictable UID/GID
67 | # Using a hash of the build time to generate unpredictable IDs
68 | RUN BUILD_HASH=$(date +%s | sha256sum | head -c 8) && \
69 | UID=$((10000 + 0x${BUILD_HASH} % 50000)) && \
70 | GID=$((10000 + 0x${BUILD_HASH} % 50000)) && \
71 | addgroup -g ${GID} -S nodejs && \
72 | adduser -S nodejs -u ${UID} -G nodejs && \
73 | chown -R nodejs:nodejs /app
74 |
75 | # Switch to non-root user
76 | USER nodejs
77 |
78 | # Set Docker environment flag
79 | ENV IS_DOCKER=true
80 |
81 | # Telemetry: Anonymous usage statistics are ENABLED by default
82 | # To opt-out, uncomment the following line:
83 | # ENV N8N_MCP_TELEMETRY_DISABLED=true
84 |
85 | # Expose HTTP port (default 3000, configurable via PORT environment variable at runtime)
86 | EXPOSE 3000
87 |
88 | # Set stop signal to SIGTERM (default, but explicit is better)
89 | STOPSIGNAL SIGTERM
90 |
91 | # Health check
92 | HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
93 | CMD sh -c 'curl -f http://127.0.0.1:${PORT:-3000}/health || exit 1'
94 |
95 | # Optimized entrypoint
96 | ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
97 | CMD ["node", "dist/mcp/index.js"]
98 |
```
--------------------------------------------------------------------------------
/N8N_HTTP_STREAMABLE_SETUP.md:
--------------------------------------------------------------------------------
```markdown
1 | # n8n MCP HTTP Streamable Configuration Guide
2 |
3 | ## Overview
4 |
5 | This guide shows how to configure the n8n-nodes-mcp community node to connect to n8n-mcp using the **recommended HTTP Streamable transport**.
6 |
7 | ## Prerequisites
8 |
9 | 1. Install n8n-nodes-mcp community node:
10 | - Go to n8n Settings → Community Nodes
11 | - Install: `n8n-nodes-mcp`
12 | - Restart n8n if prompted
13 |
14 | 2. Ensure environment variable is set:
15 | ```bash
16 | N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
17 | ```
18 |
19 | ## Quick Start
20 |
21 | ### Step 1: Start Services
22 |
23 | ```bash
24 | # Stop any existing containers
25 | docker stop n8n n8n-mcp && docker rm n8n n8n-mcp
26 |
27 | # Start with HTTP Streamable configuration
28 | docker-compose -f docker-compose.n8n.yml up -d
29 |
30 | # Services will be available at:
31 | # - n8n: http://localhost:5678
32 | # - n8n-mcp: http://localhost:3000
33 | ```
34 |
35 | ### Step 2: Create MCP Credentials in n8n
36 |
37 | 1. Open n8n at http://localhost:5678
38 | 2. Go to Credentials → Add credential
39 | 3. Search for "MCP" and select "MCP API"
40 | 4. Configure the fields as follows:
41 | - **Credential Name**: `n8n MCP Server`
42 | - **HTTP Stream URL**: `
43 | - **Messages Post Endpoint**: (leave empty)
44 | - **Additional Headers**:
45 | ```json
46 | {
47 | "Authorization": "Bearer test-secure-token-123456789"
48 | }
49 | ```
50 | 5. Save the credential
51 |
52 | ### Step 3: Configure MCP Client Node
53 |
54 | Add an MCP Client node to your workflow with these settings:
55 |
56 | - **Connection Type**: `HTTP Streamable`
57 | - **HTTP Streamable URL**: `http://n8n-mcp:3000/mcp`
58 | - **Authentication**: `Bearer Auth`
59 | - **Credentials**: Select the credential you created
60 | - **Operation**: Choose your operation (e.g., "List Tools", "Call Tool")
61 |
62 | ### Step 4: Test the Connection
63 |
64 | 1. Execute the workflow
65 | 2. The MCP Client should successfully connect and return results
66 |
67 | ## Available Operations
68 |
69 | ### List Tools
70 | Shows all available MCP tools:
71 | - `tools_documentation`
72 | - `list_nodes`
73 | - `get_node_info`
74 | - `search_nodes`
75 | - `get_node_essentials`
76 | - `validate_node_config`
77 | - And many more...
78 |
79 | ### Call Tool
80 | Execute specific tools with arguments:
81 |
82 | **Example: Get Node Info**
83 | - Tool Name: `get_node_info`
84 | - Arguments: `{ "nodeType": "n8n-nodes-base.httpRequest" }`
85 |
86 | **Example: Search Nodes**
87 | - Tool Name: `search_nodes`
88 | - Arguments: `{ "query": "webhook", "limit": 5 }`
89 |
90 | ## Import Example Workflow
91 |
92 | Import the pre-configured workflow:
93 | 1. Go to Workflows → Add workflow → Import from File
94 | 2. Select: `examples/n8n-mcp-streamable-workflow.json`
95 | 3. Update the credentials with your bearer token
96 |
97 | ## Troubleshooting
98 |
99 | ### Connection Refused
100 | - Verify services are running: `docker ps`
101 | - Check logs: `docker logs n8n-mcp`
102 | - Ensure you're using `http://n8n-mcp:3000/mcp` (container name) not `localhost`
103 |
104 | ### Authentication Failed
105 | - Verify bearer token matches exactly
106 | - Check CORS settings allow n8n origin
107 |
108 | ### Test Endpoint Manually
109 | ```bash
110 | # Test health check
111 | curl http://localhost:3000/health
112 |
113 | # Test MCP endpoint (should return error without proper JSON-RPC body)
114 | curl -X POST http://localhost:3000/mcp \
115 | -H "Authorization: Bearer test-secure-token-123456789" \
116 | -H "Content-Type: application/json"
117 | ```
118 |
119 | ## Architecture Notes
120 |
121 | - **Transport**: HTTP Streamable (StreamableHTTPServerTransport)
122 | - **Protocol**: JSON-RPC 2.0 over HTTP POST
123 | - **Authentication**: Bearer token in Authorization header
124 | - **Endpoint**: Single `/mcp` endpoint handles all operations
125 | - **Stateless**: Each request creates a new MCP server instance
126 |
127 | ## Why HTTP Streamable?
128 |
129 | 1. **Recommended by MCP**: The official recommended transport method
130 | 2. **Better Performance**: More efficient than SSE
131 | 3. **Simpler Implementation**: Single POST endpoint
132 | 4. **Future Proof**: SSE is deprecated in MCP spec
```
--------------------------------------------------------------------------------
/src/scripts/sanitize-templates.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env node
2 | import { createDatabaseAdapter } from '../database/database-adapter';
3 | import { logger } from '../utils/logger';
4 | import { TemplateSanitizer } from '../utils/template-sanitizer';
5 | import { gunzipSync, gzipSync } from 'zlib';
6 |
7 | async function sanitizeTemplates() {
8 | console.log('🧹 Sanitizing workflow templates in database...\n');
9 |
10 | const db = await createDatabaseAdapter('./data/nodes.db');
11 | const sanitizer = new TemplateSanitizer();
12 |
13 | try {
14 | // Get all templates - check both old and new format
15 | const templates = db.prepare('SELECT id, name, workflow_json, workflow_json_compressed FROM templates').all() as any[];
16 | console.log(`Found ${templates.length} templates to check\n`);
17 |
18 | let sanitizedCount = 0;
19 | const problematicTemplates: any[] = [];
20 |
21 | for (const template of templates) {
22 | let originalWorkflow: any = null;
23 | let useCompressed = false;
24 |
25 | // Try compressed format first (newer format)
26 | if (template.workflow_json_compressed) {
27 | try {
28 | const buffer = Buffer.from(template.workflow_json_compressed, 'base64');
29 | const decompressed = gunzipSync(buffer).toString('utf-8');
30 | originalWorkflow = JSON.parse(decompressed);
31 | useCompressed = true;
32 | } catch (e) {
33 | console.log(`⚠️ Failed to decompress template ${template.id}, trying uncompressed`);
34 | }
35 | }
36 |
37 | // Fall back to uncompressed format (deprecated)
38 | if (!originalWorkflow && template.workflow_json) {
39 | try {
40 | originalWorkflow = JSON.parse(template.workflow_json);
41 | } catch (e) {
42 | console.log(`⚠️ Skipping template ${template.id}: Invalid JSON in both formats`);
43 | continue;
44 | }
45 | }
46 |
47 | if (!originalWorkflow) {
48 | continue; // Skip templates without workflow data
49 | }
50 |
51 | const { sanitized: sanitizedWorkflow, wasModified } = sanitizer.sanitizeWorkflow(originalWorkflow);
52 |
53 | if (wasModified) {
54 | // Get detected tokens for reporting
55 | const detectedTokens = sanitizer.detectTokens(originalWorkflow);
56 |
57 | // Update the template with sanitized version in the same format
58 | if (useCompressed) {
59 | const compressed = gzipSync(JSON.stringify(sanitizedWorkflow)).toString('base64');
60 | const stmt = db.prepare('UPDATE templates SET workflow_json_compressed = ? WHERE id = ?');
61 | stmt.run(compressed, template.id);
62 | } else {
63 | const stmt = db.prepare('UPDATE templates SET workflow_json = ? WHERE id = ?');
64 | stmt.run(JSON.stringify(sanitizedWorkflow), template.id);
65 | }
66 |
67 | sanitizedCount++;
68 | problematicTemplates.push({
69 | id: template.id,
70 | name: template.name,
71 | tokens: detectedTokens
72 | });
73 |
74 | console.log(`✅ Sanitized template ${template.id}: ${template.name}`);
75 | detectedTokens.forEach(token => {
76 | console.log(` - Found: ${token.substring(0, 20)}...`);
77 | });
78 | }
79 | }
80 |
81 | console.log(`\n📊 Summary:`);
82 | console.log(` Total templates: ${templates.length}`);
83 | console.log(` Sanitized: ${sanitizedCount}`);
84 |
85 | if (problematicTemplates.length > 0) {
86 | console.log(`\n⚠️ Templates that contained API tokens:`);
87 | problematicTemplates.forEach(t => {
88 | console.log(` - ${t.id}: ${t.name}`);
89 | });
90 | }
91 |
92 | console.log('\n✨ Sanitization complete!');
93 | } catch (error) {
94 | console.error('❌ Error sanitizing templates:', error);
95 | process.exit(1);
96 | } finally {
97 | db.close();
98 | }
99 | }
100 |
101 | // Run if called directly
102 | if (require.main === module) {
103 | sanitizeTemplates().catch(console.error);
104 | }
```
--------------------------------------------------------------------------------
/scripts/deploy-to-vm.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | # Deployment script for n8n Documentation MCP Server
4 | # Target: n8ndocumentation.aiservices.pl
5 |
6 | set -e
7 |
8 | echo "🚀 n8n Documentation MCP Server - VM Deployment"
9 | echo "=============================================="
10 |
11 | # Configuration
12 | SERVER_USER=${SERVER_USER:-root}
13 | SERVER_HOST=${SERVER_HOST:-n8ndocumentation.aiservices.pl}
14 | APP_DIR="/opt/n8n-mcp"
15 | SERVICE_NAME="n8n-docs-mcp"
16 |
17 | # Colors
18 | GREEN='\033[0;32m'
19 | YELLOW='\033[1;33m'
20 | RED='\033[0;31m'
21 | NC='\033[0m' # No Color
22 |
23 | # Check if .env exists
24 | if [ ! -f .env ]; then
25 | echo -e "${RED}❌ .env file not found. Please create it from .env.example${NC}"
26 | exit 1
27 | fi
28 |
29 | # Check required environment variables
30 | source .env
31 | if [ "$MCP_DOMAIN" != "n8ndocumentation.aiservices.pl" ]; then
32 | echo -e "${YELLOW}⚠️ Warning: MCP_DOMAIN is not set to n8ndocumentation.aiservices.pl${NC}"
33 | read -p "Continue anyway? (y/N) " -n 1 -r
34 | echo
35 | if [[ ! $REPLY =~ ^[Yy]$ ]]; then
36 | exit 1
37 | fi
38 | fi
39 |
40 | if [ -z "$MCP_AUTH_TOKEN" ] || [ "$MCP_AUTH_TOKEN" == "your-secure-auth-token-here" ]; then
41 | echo -e "${RED}❌ MCP_AUTH_TOKEN not set or using default value${NC}"
42 | echo "Generate a secure token with: openssl rand -hex 32"
43 | exit 1
44 | fi
45 |
46 | echo -e "${GREEN}✅ Configuration validated${NC}"
47 |
48 | # Build the project locally
49 | echo -e "\n${YELLOW}Building project...${NC}"
50 | npm run build
51 |
52 | # Create deployment package
53 | echo -e "\n${YELLOW}Creating deployment package...${NC}"
54 | rm -rf deploy-package
55 | mkdir -p deploy-package
56 |
57 | # Copy necessary files
58 | cp -r dist deploy-package/
59 | cp -r data deploy-package/
60 | cp package*.json deploy-package/
61 | cp .env deploy-package/
62 | cp ecosystem.config.js deploy-package/ 2>/dev/null || true
63 |
64 | # Create tarball
65 | tar -czf deploy-package.tar.gz deploy-package
66 |
67 | echo -e "${GREEN}✅ Deployment package created${NC}"
68 |
69 | # Upload to server
70 | echo -e "\n${YELLOW}Uploading to server...${NC}"
71 | scp deploy-package.tar.gz $SERVER_USER@$SERVER_HOST:/tmp/
72 |
73 | # Deploy on server
74 | echo -e "\n${YELLOW}Deploying on server...${NC}"
75 | ssh $SERVER_USER@$SERVER_HOST << 'ENDSSH'
76 | set -e
77 |
78 | # Create app directory
79 | mkdir -p /opt/n8n-mcp
80 | cd /opt/n8n-mcp
81 |
82 | # Stop existing service if running
83 | pm2 stop n8n-docs-mcp 2>/dev/null || true
84 |
85 | # Extract deployment package
86 | tar -xzf /tmp/deploy-package.tar.gz --strip-components=1
87 | rm /tmp/deploy-package.tar.gz
88 |
89 | # Install production dependencies
90 | npm ci --only=production
91 |
92 | # Create PM2 ecosystem file if not exists
93 | if [ ! -f ecosystem.config.js ]; then
94 | cat > ecosystem.config.js << 'EOF'
95 | module.exports = {
96 | apps: [{
97 | name: 'n8n-docs-mcp',
98 | script: './dist/index-http.js',
99 | instances: 1,
100 | autorestart: true,
101 | watch: false,
102 | max_memory_restart: '1G',
103 | env: {
104 | NODE_ENV: 'production'
105 | },
106 | error_file: './logs/error.log',
107 | out_file: './logs/out.log',
108 | log_file: './logs/combined.log',
109 | time: true
110 | }]
111 | };
112 | EOF
113 | fi
114 |
115 | # Create logs directory
116 | mkdir -p logs
117 |
118 | # Start with PM2
119 | pm2 start ecosystem.config.js
120 | pm2 save
121 |
122 | echo "✅ Deployment complete!"
123 | echo ""
124 | echo "Service status:"
125 | pm2 status n8n-docs-mcp
126 | ENDSSH
127 |
128 | # Clean up local files
129 | rm -rf deploy-package deploy-package.tar.gz
130 |
131 | echo -e "\n${GREEN}🎉 Deployment successful!${NC}"
132 | echo -e "\nServer endpoints:"
133 | echo -e " Health: https://$SERVER_HOST/health"
134 | echo -e " Stats: https://$SERVER_HOST/stats"
135 | echo -e " MCP: https://$SERVER_HOST/mcp"
136 | echo -e "\nClaude Desktop configuration:"
137 | echo -e " {
138 | \"mcpServers\": {
139 | \"n8n-nodes-remote\": {
140 | \"command\": \"npx\",
141 | \"args\": [
142 | \"-y\",
143 | \"@modelcontextprotocol/client-http\",
144 | \"https://$SERVER_HOST/mcp\"
145 | ],
146 | \"env\": {
147 | \"MCP_AUTH_TOKEN\": \"$MCP_AUTH_TOKEN\"
148 | }
149 | }
150 | }
151 | }"
```
--------------------------------------------------------------------------------
/.claude/agents/code-reviewer.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | name: code-reviewer
3 | description: Use this agent when you need to review recently written or modified code for quality, security, and maintainability. This agent should be invoked immediately after code changes are made to ensure high standards are maintained. Examples:\n\n<example>\nContext: The user has created a code-review agent that should be called after writing code.\nuser: "Please write a function that validates email addresses"\nassistant: "I'll create an email validation function for you:"\n<function implementation>\nassistant: "Now let me use the code-reviewer agent to review this code for quality and security"\n<commentary>\nSince new code was just written, the code-reviewer agent should be invoked to ensure it meets quality standards.\n</commentary>\n</example>\n\n<example>\nContext: The user has modified existing code and wants it reviewed.\nuser: "I've updated the authentication logic, can you check if it's secure?"\nassistant: "I'll use the code-reviewer agent to analyze your authentication changes for security and best practices"\n<commentary>\nThe user has made changes to security-critical code, so the code-reviewer agent is the appropriate tool to ensure the modifications are secure and well-implemented.\n</commentary>\n</example>
4 | model: inherit
5 | ---
6 |
7 | You are a senior code reviewer with extensive experience in software engineering, security, and best practices. Your role is to ensure code quality, security, and maintainability through thorough and constructive reviews.
8 |
9 | When invoked, you will:
10 |
11 | 1. **Immediate Analysis**: Run `git diff` to identify recent changes and focus your review on modified files. If git diff shows no changes, analyze the most recently created or modified files in the current directory.
12 |
13 | 2. **Comprehensive Review**: Evaluate code against these critical criteria:
14 | - **Readability**: Code is simple, clear, and self-documenting
15 | - **Naming**: Functions, variables, and classes have descriptive, meaningful names
16 | - **DRY Principle**: No duplicated code; common logic is properly abstracted
17 | - **Error Handling**: All edge cases handled; errors are caught and logged appropriately
18 | - **Security**: No hardcoded secrets, API keys, or sensitive data; proper authentication/authorization
19 | - **Input Validation**: All user inputs are validated and sanitized
20 | - **Testing**: Adequate test coverage for critical paths and edge cases
21 | - **Performance**: No obvious bottlenecks; efficient algorithms and data structures used
22 |
23 | 3. **Structured Feedback**: Organize your review into three priority levels:
24 | - **🚨 Critical Issues (Must Fix)**: Security vulnerabilities, bugs that will cause failures, or severe performance problems
25 | - **⚠️ Warnings (Should Fix)**: Code smells, missing error handling, or practices that could lead to future issues
26 | - **💡 Suggestions (Consider Improving)**: Opportunities for better readability, performance optimizations, or architectural improvements
27 |
28 | 4. **Actionable Recommendations**: For each issue identified:
29 | - Explain why it's a problem
30 | - Provide a specific code example showing how to fix it
31 | - Reference relevant best practices or documentation when applicable
32 |
33 | 5. **Positive Reinforcement**: Acknowledge well-written code sections and good practices observed
34 |
35 | Your review style should be:
36 | - Constructive and educational, not critical or harsh
37 | - Specific with line numbers and code snippets
38 | - Focused on the most impactful improvements
39 | - Considerate of the project's context and constraints
40 |
41 | Begin each review with a brief summary of what was reviewed and your overall assessment, then dive into the detailed findings organized by priority.
42 |
```
--------------------------------------------------------------------------------
/scripts/migrate-nodes-fts.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env node
2 |
3 | import * as path from 'path';
4 | import { createDatabaseAdapter } from '../src/database/database-adapter';
5 | import { logger } from '../src/utils/logger';
6 |
7 | /**
8 | * Migrate existing database to add FTS5 support for nodes
9 | */
10 | async function migrateNodesFTS() {
11 | logger.info('Starting nodes FTS5 migration...');
12 |
13 | const dbPath = path.join(process.cwd(), 'data', 'nodes.db');
14 | const db = await createDatabaseAdapter(dbPath);
15 |
16 | try {
17 | // Check if nodes_fts already exists
18 | const tableExists = db.prepare(`
19 | SELECT name FROM sqlite_master
20 | WHERE type='table' AND name='nodes_fts'
21 | `).get();
22 |
23 | if (tableExists) {
24 | logger.info('nodes_fts table already exists, skipping migration');
25 | return;
26 | }
27 |
28 | logger.info('Creating nodes_fts virtual table...');
29 |
30 | // Create the FTS5 virtual table
31 | db.prepare(`
32 | CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
33 | node_type,
34 | display_name,
35 | description,
36 | documentation,
37 | operations,
38 | content=nodes,
39 | content_rowid=rowid,
40 | tokenize='porter'
41 | )
42 | `).run();
43 |
44 | // Populate the FTS table with existing data
45 | logger.info('Populating nodes_fts with existing data...');
46 |
47 | const nodes = db.prepare('SELECT rowid, * FROM nodes').all() as any[];
48 | logger.info(`Migrating ${nodes.length} nodes to FTS index...`);
49 |
50 | const insertStmt = db.prepare(`
51 | INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
52 | VALUES (?, ?, ?, ?, ?, ?)
53 | `);
54 |
55 | for (const node of nodes) {
56 | insertStmt.run(
57 | node.rowid,
58 | node.node_type,
59 | node.display_name,
60 | node.description || '',
61 | node.documentation || '',
62 | node.operations || ''
63 | );
64 | }
65 |
66 | // Create triggers to keep FTS in sync
67 | logger.info('Creating synchronization triggers...');
68 |
69 | db.prepare(`
70 | CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes
71 | BEGIN
72 | INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
73 | VALUES (new.rowid, new.node_type, new.display_name, new.description, new.documentation, new.operations);
74 | END
75 | `).run();
76 |
77 | db.prepare(`
78 | CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes
79 | BEGIN
80 | UPDATE nodes_fts
81 | SET node_type = new.node_type,
82 | display_name = new.display_name,
83 | description = new.description,
84 | documentation = new.documentation,
85 | operations = new.operations
86 | WHERE rowid = new.rowid;
87 | END
88 | `).run();
89 |
90 | db.prepare(`
91 | CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes
92 | BEGIN
93 | DELETE FROM nodes_fts WHERE rowid = old.rowid;
94 | END
95 | `).run();
96 |
97 | // Test the FTS search
98 | logger.info('Testing FTS search...');
99 |
100 | const testResults = db.prepare(`
101 | SELECT n.* FROM nodes n
102 | JOIN nodes_fts ON n.rowid = nodes_fts.rowid
103 | WHERE nodes_fts MATCH 'webhook'
104 | ORDER BY rank
105 | LIMIT 5
106 | `).all();
107 |
108 | logger.info(`FTS test search found ${testResults.length} results for 'webhook'`);
109 |
110 | // Persist if using sql.js
111 | if ('persist' in db) {
112 | logger.info('Persisting database changes...');
113 | (db as any).persist();
114 | }
115 |
116 | logger.info('✅ FTS5 migration completed successfully!');
117 |
118 | } catch (error) {
119 | logger.error('Migration failed:', error);
120 | throw error;
121 | } finally {
122 | db.close();
123 | }
124 | }
125 |
126 | // Run migration
127 | migrateNodesFTS().catch(error => {
128 | logger.error('Migration error:', error);
129 | process.exit(1);
130 | });
```
--------------------------------------------------------------------------------
/src/utils/example-generator.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Generates example workflows and parameters for n8n nodes
3 | */
4 | export class ExampleGenerator {
5 | /**
6 | * Generate an example workflow from node definition
7 | */
8 | static generateFromNodeDefinition(nodeDefinition: any): any {
9 | const nodeName = nodeDefinition.displayName || 'Example Node';
10 | const nodeType = nodeDefinition.name || 'n8n-nodes-base.exampleNode';
11 |
12 | return {
13 | name: `${nodeName} Example Workflow`,
14 | nodes: [
15 | {
16 | parameters: this.generateExampleParameters(nodeDefinition),
17 | id: this.generateNodeId(),
18 | name: nodeName,
19 | type: nodeType,
20 | typeVersion: nodeDefinition.version || 1,
21 | position: [250, 300],
22 | },
23 | ],
24 | connections: {},
25 | active: false,
26 | settings: {},
27 | tags: ['example', 'generated'],
28 | };
29 | }
30 |
31 | /**
32 | * Generate example parameters based on node properties
33 | */
34 | static generateExampleParameters(nodeDefinition: any): any {
35 | const params: any = {};
36 |
37 | // If properties are available, generate examples based on them
38 | if (Array.isArray(nodeDefinition.properties)) {
39 | for (const prop of nodeDefinition.properties) {
40 | if (prop.name && prop.type) {
41 | params[prop.name] = this.generateExampleValue(prop);
42 | }
43 | }
44 | }
45 |
46 | // Add common parameters based on node type
47 | if (nodeDefinition.displayName?.toLowerCase().includes('trigger')) {
48 | params.pollTimes = {
49 | item: [
50 | {
51 | mode: 'everyMinute',
52 | },
53 | ],
54 | };
55 | }
56 |
57 | return params;
58 | }
59 |
60 | /**
61 | * Generate example value based on property definition
62 | */
63 | private static generateExampleValue(property: any): any {
64 | switch (property.type) {
65 | case 'string':
66 | if (property.name.toLowerCase().includes('url')) {
67 | return 'https://example.com';
68 | }
69 | if (property.name.toLowerCase().includes('email')) {
70 | return '[email protected]';
71 | }
72 | if (property.name.toLowerCase().includes('name')) {
73 | return 'Example Name';
74 | }
75 | return property.default || 'example-value';
76 |
77 | case 'number':
78 | return property.default || 10;
79 |
80 | case 'boolean':
81 | return property.default !== undefined ? property.default : true;
82 |
83 | case 'options':
84 | if (property.options && property.options.length > 0) {
85 | return property.options[0].value;
86 | }
87 | return property.default || '';
88 |
89 | case 'collection':
90 | case 'fixedCollection':
91 | return {};
92 |
93 | default:
94 | return property.default || null;
95 | }
96 | }
97 |
98 | /**
99 | * Generate a unique node ID
100 | */
101 | private static generateNodeId(): string {
102 | return Math.random().toString(36).substring(2, 15) +
103 | Math.random().toString(36).substring(2, 15);
104 | }
105 |
106 | /**
107 | * Generate example based on node operations
108 | */
109 | static generateFromOperations(operations: any[]): any {
110 | const examples: any[] = [];
111 |
112 | if (!operations || operations.length === 0) {
113 | return examples;
114 | }
115 |
116 | // Group operations by resource
117 | const resourceMap = new Map<string, any[]>();
118 | for (const op of operations) {
119 | if (!resourceMap.has(op.resource)) {
120 | resourceMap.set(op.resource, []);
121 | }
122 | resourceMap.get(op.resource)!.push(op);
123 | }
124 |
125 | // Generate example for each resource
126 | for (const [resource, ops] of resourceMap) {
127 | examples.push({
128 | resource,
129 | operation: ops[0].operation,
130 | description: `Example: ${ops[0].description}`,
131 | parameters: {
132 | resource,
133 | operation: ops[0].operation,
134 | },
135 | });
136 | }
137 |
138 | return examples;
139 | }
140 | }
```
--------------------------------------------------------------------------------
/src/mcp-tools-engine.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * MCPEngine - A simplified interface for benchmarking MCP tool execution
3 | * This directly implements the MCP tool functionality without server dependencies
4 | */
5 | import { NodeRepository } from './database/node-repository';
6 | import { PropertyFilter } from './services/property-filter';
7 | import { TaskTemplates } from './services/task-templates';
8 | import { ConfigValidator } from './services/config-validator';
9 | import { EnhancedConfigValidator } from './services/enhanced-config-validator';
10 | import { WorkflowValidator, WorkflowValidationResult } from './services/workflow-validator';
11 |
12 | export class MCPEngine {
13 | private workflowValidator: WorkflowValidator;
14 |
15 | constructor(private repository: NodeRepository) {
16 | this.workflowValidator = new WorkflowValidator(repository, EnhancedConfigValidator);
17 | }
18 |
19 | async listNodes(args: any = {}) {
20 | return this.repository.getAllNodes(args.limit);
21 | }
22 |
23 | async searchNodes(args: any) {
24 | return this.repository.searchNodes(args.query, args.mode || 'OR', args.limit || 20);
25 | }
26 |
27 | async getNodeInfo(args: any) {
28 | return this.repository.getNodeByType(args.nodeType);
29 | }
30 |
31 | async getNodeEssentials(args: any) {
32 | const node = await this.repository.getNodeByType(args.nodeType);
33 | if (!node) return null;
34 |
35 | // Filter to essentials using static method
36 | const essentials = PropertyFilter.getEssentials(node.properties || [], args.nodeType);
37 | return {
38 | nodeType: node.nodeType,
39 | displayName: node.displayName,
40 | description: node.description,
41 | category: node.category,
42 | required: essentials.required,
43 | common: essentials.common
44 | };
45 | }
46 |
47 | async getNodeDocumentation(args: any) {
48 | const node = await this.repository.getNodeByType(args.nodeType);
49 | return node?.documentation || null;
50 | }
51 |
52 | async validateNodeOperation(args: any) {
53 | // Get node properties and validate
54 | const node = await this.repository.getNodeByType(args.nodeType);
55 | if (!node) {
56 | return {
57 | valid: false,
58 | errors: [{ type: 'invalid_configuration', property: '', message: 'Node type not found' }],
59 | warnings: [],
60 | suggestions: [],
61 | visibleProperties: [],
62 | hiddenProperties: []
63 | };
64 | }
65 |
66 | // CRITICAL FIX: Extract user-provided keys before validation
67 | // This prevents false warnings about default values
68 | const userProvidedKeys = new Set(Object.keys(args.config || {}));
69 |
70 | return ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
71 | }
72 |
73 | async validateNodeMinimal(args: any) {
74 | // Get node and check minimal requirements
75 | const node = await this.repository.getNodeByType(args.nodeType);
76 | if (!node) {
77 | return { missingFields: [], error: 'Node type not found' };
78 | }
79 |
80 | const missingFields: string[] = [];
81 | const requiredFields = PropertyFilter.getEssentials(node.properties || [], args.nodeType).required;
82 |
83 | for (const field of requiredFields) {
84 | if (!args.config[field.name]) {
85 | missingFields.push(field.name);
86 | }
87 | }
88 |
89 | return { missingFields };
90 | }
91 |
92 | async searchNodeProperties(args: any) {
93 | return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
94 | }
95 |
96 | async listAITools(args: any) {
97 | return this.repository.getAIToolNodes();
98 | }
99 |
100 | async getDatabaseStatistics(args: any) {
101 | const count = await this.repository.getNodeCount();
102 | const aiTools = await this.repository.getAIToolNodes();
103 | return {
104 | totalNodes: count,
105 | aiToolsCount: aiTools.length,
106 | categories: ['trigger', 'transform', 'output', 'input']
107 | };
108 | }
109 |
110 | async validateWorkflow(args: any): Promise<WorkflowValidationResult> {
111 | return this.workflowValidator.validateWorkflow(args.workflow, args.options);
112 | }
113 | }
```
--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-create-workflow.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ToolDocumentation } from '../types';
2 |
3 | export const n8nCreateWorkflowDoc: ToolDocumentation = {
4 | name: 'n8n_create_workflow',
5 | category: 'workflow_management',
6 | essentials: {
7 | description: 'Create workflow. Requires: name, nodes[], connections{}. Created inactive. Returns workflow with ID.',
8 | keyParameters: ['name', 'nodes', 'connections'],
9 | example: 'n8n_create_workflow({name: "My Flow", nodes: [...], connections: {...}})',
10 | performance: 'Network-dependent',
11 | tips: [
12 | 'Workflow created inactive',
13 | 'Returns ID for future updates',
14 | 'Validate first with validate_workflow',
15 | 'Auto-sanitization fixes operator structures and missing metadata during creation'
16 | ]
17 | },
18 | full: {
19 | description: 'Creates a new workflow in n8n with specified nodes and connections. Workflow is created in inactive state. Each node requires: id, name, type, typeVersion, position, and parameters.',
20 | parameters: {
21 | name: { type: 'string', required: true, description: 'Workflow name' },
22 | nodes: { type: 'array', required: true, description: 'Array of nodes with id, name, type, typeVersion, position, parameters' },
23 | connections: { type: 'object', required: true, description: 'Node connections. Keys are source node IDs' },
24 | settings: { type: 'object', description: 'Optional workflow settings (timezone, error handling, etc.)' }
25 | },
26 | returns: 'Created workflow object with id, name, nodes, connections, active status',
27 | examples: [
28 | `// Basic webhook to Slack workflow
29 | n8n_create_workflow({
30 | name: "Webhook to Slack",
31 | nodes: [
32 | {
33 | id: "webhook_1",
34 | name: "Webhook",
35 | type: "n8n-nodes-base.webhook",
36 | typeVersion: 1,
37 | position: [250, 300],
38 | parameters: {
39 | httpMethod: "POST",
40 | path: "slack-notify"
41 | }
42 | },
43 | {
44 | id: "slack_1",
45 | name: "Slack",
46 | type: "n8n-nodes-base.slack",
47 | typeVersion: 1,
48 | position: [450, 300],
49 | parameters: {
50 | resource: "message",
51 | operation: "post",
52 | channel: "#general",
53 | text: "={{$json.message}}"
54 | }
55 | }
56 | ],
57 | connections: {
58 | "webhook_1": {
59 | "main": [[{node: "slack_1", type: "main", index: 0}]]
60 | }
61 | }
62 | })`,
63 | `// Workflow with settings and error handling
64 | n8n_create_workflow({
65 | name: "Data Processing",
66 | nodes: [...],
67 | connections: {...},
68 | settings: {
69 | timezone: "America/New_York",
70 | errorWorkflow: "error_handler_workflow_id",
71 | saveDataSuccessExecution: "all",
72 | saveDataErrorExecution: "all"
73 | }
74 | })`
75 | ],
76 | useCases: [
77 | 'Deploy validated workflows',
78 | 'Automate workflow creation',
79 | 'Clone workflow structures',
80 | 'Template deployment'
81 | ],
82 | performance: 'Network-dependent - Typically 100-500ms depending on workflow size',
83 | bestPractices: [
84 | 'Validate with validate_workflow first',
85 | 'Use unique node IDs',
86 | 'Position nodes for readability',
87 | 'Test with n8n_trigger_webhook_workflow'
88 | ],
89 | pitfalls: [
90 | '**REQUIRES N8N_API_URL and N8N_API_KEY environment variables** - tool unavailable without n8n API access',
91 | 'Workflows created in INACTIVE state - must activate separately',
92 | 'Node IDs must be unique within workflow',
93 | 'Credentials must be configured separately in n8n',
94 | 'Node type names must include package prefix (e.g., "n8n-nodes-base.slack")',
95 | '**Auto-sanitization runs on creation**: All nodes sanitized before workflow created (operator structures fixed, missing metadata added)',
96 | '**Auto-sanitization cannot prevent all failures**: Broken connections or invalid node configurations may still cause creation to fail'
97 | ],
98 | relatedTools: ['validate_workflow', 'n8n_update_partial_workflow', 'n8n_trigger_webhook_workflow']
99 | }
100 | };
```
--------------------------------------------------------------------------------
/docs/tools-documentation-usage.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP Tools Documentation Usage Guide
2 |
3 | The `tools_documentation` tool provides comprehensive documentation for all MCP tools, making it easy for LLMs to understand how to use the tools effectively.
4 |
5 | ## Basic Usage
6 |
7 | ### 1. Get Documentation for Specific Tools
8 |
9 | ```json
10 | {
11 | "name": "tools_documentation",
12 | "arguments": {
13 | "tools": ["search_nodes", "get_node_essentials"]
14 | }
15 | }
16 | ```
17 |
18 | Returns detailed documentation including parameters, examples, and best practices for the specified tools.
19 |
20 | ### 2. Search Tools by Keyword
21 |
22 | ```json
23 | {
24 | "name": "tools_documentation",
25 | "arguments": {
26 | "search": "validation"
27 | }
28 | }
29 | ```
30 |
31 | Finds all tools related to validation, including their descriptions and use cases.
32 |
33 | ### 3. Browse Tools by Category
34 |
35 | ```json
36 | {
37 | "name": "tools_documentation",
38 | "arguments": {
39 | "category": "workflow_management"
40 | }
41 | }
42 | ```
43 |
44 | Available categories:
45 | - **discovery**: Tools for finding and exploring nodes
46 | - **configuration**: Tools for configuring nodes
47 | - **validation**: Tools for validating configurations
48 | - **workflow_management**: Tools for creating and updating workflows
49 | - **execution**: Tools for running workflows
50 | - **templates**: Tools for working with workflow templates
51 |
52 | ### 4. Get All Categories
53 |
54 | ```json
55 | {
56 | "name": "tools_documentation",
57 | "arguments": {}
58 | }
59 | ```
60 |
61 | Returns a list of all categories and the tools in each category.
62 |
63 | ### 5. Include Quick Reference Guide
64 |
65 | ```json
66 | {
67 | "name": "tools_documentation",
68 | "arguments": {
69 | "tools": ["n8n_create_workflow"],
70 | "includeQuickReference": true
71 | }
72 | }
73 | ```
74 |
75 | Includes a quick reference guide with workflow building process, performance tips, and common patterns.
76 |
77 | ## Response Format
78 |
79 | The tool returns structured documentation with:
80 |
81 | - **Parameters**: Complete parameter descriptions with types, requirements, and defaults
82 | - **Return Format**: Example of what the tool returns
83 | - **Common Use Cases**: Real-world scenarios where the tool is useful
84 | - **Examples**: Working examples with input and expected output
85 | - **Performance Notes**: Speed and efficiency considerations
86 | - **Best Practices**: Recommended usage patterns
87 | - **Common Pitfalls**: Mistakes to avoid
88 | - **Related Tools**: Other tools that work well together
89 |
90 | ## Example: Learning About search_nodes
91 |
92 | Request:
93 | ```json
94 | {
95 | "name": "tools_documentation",
96 | "arguments": {
97 | "tools": ["search_nodes"]
98 | }
99 | }
100 | ```
101 |
102 | Response includes:
103 | - How to search effectively (single words work best)
104 | - Performance characteristics (fast, cached)
105 | - Common searches (http, webhook, email, database, slack)
106 | - Pitfalls to avoid (multi-word searches use OR logic)
107 | - Related tools for next steps
108 |
109 | ## Tips for LLMs
110 |
111 | 1. **Start with categories**: Browse available tools by category to understand what's possible
112 | 2. **Search by task**: Use search to find tools for specific tasks like "validation" or "workflow"
113 | 3. **Learn tool combinations**: Check "Related Tools" to understand workflow patterns
114 | 4. **Check examples**: Every tool has working examples to copy and modify
115 | 5. **Avoid pitfalls**: Pay attention to "Common Pitfalls" to prevent errors
116 |
117 | ## Integration with Workflow Building
118 |
119 | The documentation helps build workflows efficiently:
120 |
121 | 1. **Discovery Phase**: Use `search_nodes` and `list_nodes` documentation
122 | 2. **Configuration Phase**: Learn from `get_node_essentials` examples
123 | 3. **Validation Phase**: Understand validation tool options and profiles
124 | 4. **Creation Phase**: Follow `n8n_create_workflow` best practices
125 | 5. **Update Phase**: Master `n8n_update_partial_workflow` operations
126 |
127 | ## Performance Optimization
128 |
129 | The documentation emphasizes performance:
130 | - Which tools are fast (essentials) vs slow (full info)
131 | - Optimal parameters (e.g., limit: 200+ for list_nodes)
132 | - Caching behavior
133 | - Token savings with partial updates
134 |
135 | This documentation system ensures LLMs can use the MCP tools effectively without trial and error.
```
--------------------------------------------------------------------------------
/src/scripts/test-telemetry-mutations.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Test telemetry mutations
3 | * Verifies that mutations are properly tracked and persisted
4 | */
5 |
6 | import { telemetry } from '../telemetry/telemetry-manager.js';
7 | import { TelemetryConfigManager } from '../telemetry/config-manager.js';
8 |
9 | async function testMutations() {
10 | console.log('Starting telemetry mutation test...\n');
11 |
12 | const configManager = TelemetryConfigManager.getInstance();
13 |
14 | console.log('Telemetry Status:');
15 | console.log('================');
16 | console.log(configManager.getStatus());
17 | console.log('\n');
18 |
19 | // Get initial metrics
20 | const metricsAfterInit = telemetry.getMetrics();
21 | console.log('Telemetry Metrics (After Init):');
22 | console.log('================================');
23 | console.log(JSON.stringify(metricsAfterInit, null, 2));
24 | console.log('\n');
25 |
26 | // Test data mimicking actual mutation with valid workflow structure
27 | const testMutation = {
28 | sessionId: 'test_session_' + Date.now(),
29 | toolName: 'n8n_update_partial_workflow',
30 | userIntent: 'Add a Merge node for data consolidation',
31 | operations: [
32 | {
33 | type: 'addNode',
34 | nodeId: 'Merge1',
35 | node: {
36 | id: 'Merge1',
37 | type: 'n8n-nodes-base.merge',
38 | name: 'Merge',
39 | position: [600, 200],
40 | parameters: {}
41 | }
42 | },
43 | {
44 | type: 'addConnection',
45 | source: 'previous_node',
46 | target: 'Merge1'
47 | }
48 | ],
49 | workflowBefore: {
50 | id: 'test-workflow',
51 | name: 'Test Workflow',
52 | active: true,
53 | nodes: [
54 | {
55 | id: 'previous_node',
56 | type: 'n8n-nodes-base.manualTrigger',
57 | name: 'When called',
58 | position: [300, 200],
59 | parameters: {}
60 | }
61 | ],
62 | connections: {},
63 | nodeIds: []
64 | },
65 | workflowAfter: {
66 | id: 'test-workflow',
67 | name: 'Test Workflow',
68 | active: true,
69 | nodes: [
70 | {
71 | id: 'previous_node',
72 | type: 'n8n-nodes-base.manualTrigger',
73 | name: 'When called',
74 | position: [300, 200],
75 | parameters: {}
76 | },
77 | {
78 | id: 'Merge1',
79 | type: 'n8n-nodes-base.merge',
80 | name: 'Merge',
81 | position: [600, 200],
82 | parameters: {}
83 | }
84 | ],
85 | connections: {
86 | 'previous_node': [
87 | {
88 | node: 'Merge1',
89 | type: 'main',
90 | index: 0,
91 | source: 0,
92 | destination: 0
93 | }
94 | ]
95 | },
96 | nodeIds: []
97 | },
98 | mutationSuccess: true,
99 | durationMs: 125
100 | };
101 |
102 | console.log('Test Mutation Data:');
103 | console.log('==================');
104 | console.log(JSON.stringify({
105 | intent: testMutation.userIntent,
106 | tool: testMutation.toolName,
107 | operationCount: testMutation.operations.length,
108 | sessionId: testMutation.sessionId
109 | }, null, 2));
110 | console.log('\n');
111 |
112 | // Call trackWorkflowMutation
113 | console.log('Calling telemetry.trackWorkflowMutation...');
114 | try {
115 | await telemetry.trackWorkflowMutation(testMutation);
116 | console.log('✓ trackWorkflowMutation completed successfully\n');
117 | } catch (error) {
118 | console.error('✗ trackWorkflowMutation failed:', error);
119 | console.error('\n');
120 | }
121 |
122 | // Flush telemetry
123 | console.log('Flushing telemetry...');
124 | try {
125 | await telemetry.flush();
126 | console.log('✓ Telemetry flushed successfully\n');
127 | } catch (error) {
128 | console.error('✗ Flush failed:', error);
129 | console.error('\n');
130 | }
131 |
132 | // Get final metrics
133 | const metricsAfterFlush = telemetry.getMetrics();
134 | console.log('Telemetry Metrics (After Flush):');
135 | console.log('==================================');
136 | console.log(JSON.stringify(metricsAfterFlush, null, 2));
137 | console.log('\n');
138 |
139 | console.log('Test completed. Check workflow_mutations table in Supabase.');
140 | }
141 |
142 | testMutations().catch(error => {
143 | console.error('Test failed:', error);
144 | process.exit(1);
145 | });
146 |
```
--------------------------------------------------------------------------------
/src/mcp/tool-docs/templates/get-template.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ToolDocumentation } from '../types';
2 |
3 | export const getTemplateDoc: ToolDocumentation = {
4 | name: 'get_template',
5 | category: 'templates',
6 | essentials: {
7 | description: 'Get workflow template by ID with configurable detail level. Ready to import. IDs from search_templates.',
8 | keyParameters: ['templateId', 'mode'],
9 | example: 'get_template({templateId: 1234, mode: "full"})',
10 | performance: 'Fast (<100ms) - single database lookup',
11 | tips: [
12 | 'Get template IDs from search_templates first',
13 | 'Use mode="nodes_only" for quick overview, "structure" for topology, "full" for import',
14 | 'Returns complete workflow JSON ready for import into n8n'
15 | ]
16 | },
17 | full: {
18 | description: `Retrieves the complete workflow JSON for a specific template by its ID. The returned workflow can be directly imported into n8n through the UI or API. This tool fetches pre-built workflows from the community template library containing 2,700+ curated workflows.`,
19 | parameters: {
20 | templateId: {
21 | type: 'number',
22 | required: true,
23 | description: 'The numeric ID of the template to retrieve. Get IDs from search_templates'
24 | },
25 | mode: {
26 | type: 'string',
27 | required: false,
28 | description: 'Response detail level: "nodes_only" (minimal - just node list), "structure" (nodes + connections), "full" (complete workflow JSON, default)',
29 | default: 'full',
30 | enum: ['nodes_only', 'structure', 'full']
31 | }
32 | },
33 | returns: `Returns an object containing:
34 | - template: Complete template information including workflow JSON
35 | - id: Template ID
36 | - name: Template name
37 | - description: What the workflow does
38 | - author: Creator information (name, username, verified status)
39 | - nodes: Array of node types used
40 | - views: Number of times viewed
41 | - created: Creation date
42 | - url: Link to template on n8n.io
43 | - workflow: Complete workflow JSON with structure:
44 | - nodes: Array of node objects (id, name, type, typeVersion, position, parameters)
45 | - connections: Object mapping source nodes to targets
46 | - settings: Workflow configuration (timezone, error handling, etc.)
47 | - usage: Instructions for using the workflow`,
48 | examples: [
49 | 'get_template({templateId: 1234}) - Get complete workflow (default mode="full")',
50 | 'get_template({templateId: 1234, mode: "nodes_only"}) - Get just the node list',
51 | 'get_template({templateId: 1234, mode: "structure"}) - Get nodes and connections',
52 | 'get_template({templateId: 5678, mode: "full"}) - Get complete workflow JSON for import'
53 | ],
54 | useCases: [
55 | 'Download workflows for direct import into n8n',
56 | 'Study workflow patterns and best practices',
57 | 'Get complete workflow JSON for customization',
58 | 'Clone popular workflows for your use case',
59 | 'Learn how complex automations are built'
60 | ],
61 | performance: `Fast performance with single database lookup:
62 | - Query time: <10ms for template retrieval
63 | - Workflow JSON parsing: <50ms
64 | - Total response time: <100ms
65 | - No network calls (uses local cache)`,
66 | bestPractices: [
67 | 'Always check if template exists before attempting modifications',
68 | 'Review workflow nodes before importing to ensure compatibility',
69 | 'Save template JSON locally if planning multiple customizations',
70 | 'Check template creation date for most recent patterns',
71 | 'Verify all required credentials are configured before import'
72 | ],
73 | pitfalls: [
74 | 'Template IDs change when database is refreshed',
75 | 'Some templates may use deprecated node versions',
76 | 'Credentials in templates are placeholders - configure your own',
77 | 'Not all templates work with all n8n versions',
78 | 'Template may reference external services you don\'t have access to'
79 | ],
80 | relatedTools: ['search_templates', 'n8n_create_workflow']
81 | }
82 | };
```
--------------------------------------------------------------------------------
/tests/test-storage-system.js:
--------------------------------------------------------------------------------
```javascript
1 | #!/usr/bin/env node
2 |
3 | /**
4 | * Test the node storage and search system
5 | */
6 |
7 | const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor');
8 | const { NodeStorageService } = require('../dist/services/node-storage-service');
9 |
10 | async function testStorageSystem() {
11 | console.log('=== Node Storage System Test ===\n');
12 |
13 | const extractor = new NodeSourceExtractor();
14 | const storage = new NodeStorageService();
15 |
16 | // 1. Extract and store some nodes
17 | console.log('1. Extracting and storing nodes...\n');
18 |
19 | const testNodes = [
20 | 'n8n-nodes-base.Function',
21 | 'n8n-nodes-base.Webhook',
22 | 'n8n-nodes-base.HttpRequest',
23 | '@n8n/n8n-nodes-langchain.Agent'
24 | ];
25 |
26 | let stored = 0;
27 | for (const nodeType of testNodes) {
28 | try {
29 | console.log(` Extracting ${nodeType}...`);
30 | const nodeInfo = await extractor.extractNodeSource(nodeType);
31 | await storage.storeNode(nodeInfo);
32 | stored++;
33 | console.log(` ✅ Stored successfully`);
34 | } catch (error) {
35 | console.log(` ❌ Failed: ${error.message}`);
36 | }
37 | }
38 |
39 | console.log(`\n Total stored: ${stored}/${testNodes.length}\n`);
40 |
41 | // 2. Test search functionality
42 | console.log('2. Testing search functionality...\n');
43 |
44 | const searchTests = [
45 | { query: 'function', desc: 'Search for "function"' },
46 | { query: 'webhook', desc: 'Search for "webhook"' },
47 | { packageName: 'n8n-nodes-base', desc: 'Filter by package' },
48 | { hasCredentials: false, desc: 'Nodes without credentials' }
49 | ];
50 |
51 | for (const test of searchTests) {
52 | console.log(` ${test.desc}:`);
53 | const results = await storage.searchNodes(test);
54 | console.log(` Found ${results.length} nodes`);
55 | if (results.length > 0) {
56 | console.log(` First result: ${results[0].nodeType}`);
57 | }
58 | }
59 |
60 | // 3. Get statistics
61 | console.log('\n3. Storage statistics:\n');
62 |
63 | const stats = await storage.getStatistics();
64 | console.log(` Total nodes: ${stats.totalNodes}`);
65 | console.log(` Total packages: ${stats.totalPackages}`);
66 | console.log(` Total code size: ${(stats.totalCodeSize / 1024).toFixed(2)} KB`);
67 | console.log(` Average node size: ${(stats.averageNodeSize / 1024).toFixed(2)} KB`);
68 | console.log(` Nodes with credentials: ${stats.nodesWithCredentials}`);
69 |
70 | console.log('\n Package distribution:');
71 | stats.packageDistribution.forEach(pkg => {
72 | console.log(` ${pkg.package}: ${pkg.count} nodes`);
73 | });
74 |
75 | // 4. Test bulk extraction
76 | console.log('\n4. Testing bulk extraction (first 10 nodes)...\n');
77 |
78 | const allNodes = await extractor.listAvailableNodes();
79 | const nodesToExtract = allNodes.slice(0, 10);
80 |
81 | const nodeInfos = [];
82 | for (const node of nodesToExtract) {
83 | try {
84 | const nodeType = node.packageName ? `${node.packageName}.${node.name}` : node.name;
85 | const nodeInfo = await extractor.extractNodeSource(nodeType);
86 | nodeInfos.push(nodeInfo);
87 | } catch (error) {
88 | // Skip failed extractions
89 | }
90 | }
91 |
92 | if (nodeInfos.length > 0) {
93 | const bulkResult = await storage.bulkStoreNodes(nodeInfos);
94 | console.log(` Bulk stored: ${bulkResult.stored}`);
95 | console.log(` Failed: ${bulkResult.failed}`);
96 | }
97 |
98 | // 5. Export for database
99 | console.log('\n5. Exporting for database...\n');
100 |
101 | const dbExport = await storage.exportForDatabase();
102 | console.log(` Exported ${dbExport.nodes.length} nodes`);
103 | console.log(` Total packages: ${dbExport.metadata.totalPackages}`);
104 | console.log(` Export timestamp: ${dbExport.metadata.exportedAt}`);
105 |
106 | // Save export to file
107 | const fs = require('fs').promises;
108 | const exportFile = path.join(__dirname, 'node-storage-export.json');
109 | await fs.writeFile(exportFile, JSON.stringify(dbExport, null, 2));
110 | console.log(` Saved to: ${exportFile}`);
111 |
112 | console.log('\n✅ Storage system test completed!');
113 | }
114 |
115 | const path = require('path');
116 | testStorageSystem().catch(console.error);
```
--------------------------------------------------------------------------------
/src/utils/auth.ts:
--------------------------------------------------------------------------------
```typescript
1 | import crypto from 'crypto';
2 |
3 | export class AuthManager {
4 | private validTokens: Set<string>;
5 | private tokenExpiry: Map<string, number>;
6 |
7 | constructor() {
8 | this.validTokens = new Set();
9 | this.tokenExpiry = new Map();
10 | }
11 |
12 | /**
13 | * Validate an authentication token
14 | */
15 | validateToken(token: string | undefined, expectedToken?: string): boolean {
16 | if (!expectedToken) {
17 | // No authentication required
18 | return true;
19 | }
20 |
21 | if (!token) {
22 | return false;
23 | }
24 |
25 | // SECURITY: Use timing-safe comparison for static token
26 | // See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
27 | if (AuthManager.timingSafeCompare(token, expectedToken)) {
28 | return true;
29 | }
30 |
31 | // Check dynamic tokens
32 | if (this.validTokens.has(token)) {
33 | const expiry = this.tokenExpiry.get(token);
34 | if (expiry && expiry > Date.now()) {
35 | return true;
36 | } else {
37 | // Token expired
38 | this.validTokens.delete(token);
39 | this.tokenExpiry.delete(token);
40 | return false;
41 | }
42 | }
43 |
44 | return false;
45 | }
46 |
47 | /**
48 | * Generate a new authentication token
49 | */
50 | generateToken(expiryHours: number = 24): string {
51 | const token = crypto.randomBytes(32).toString('hex');
52 | const expiryTime = Date.now() + (expiryHours * 60 * 60 * 1000);
53 |
54 | this.validTokens.add(token);
55 | this.tokenExpiry.set(token, expiryTime);
56 |
57 | // Clean up expired tokens
58 | this.cleanupExpiredTokens();
59 |
60 | return token;
61 | }
62 |
63 | /**
64 | * Revoke a token
65 | */
66 | revokeToken(token: string): void {
67 | this.validTokens.delete(token);
68 | this.tokenExpiry.delete(token);
69 | }
70 |
71 | /**
72 | * Clean up expired tokens
73 | */
74 | private cleanupExpiredTokens(): void {
75 | const now = Date.now();
76 | for (const [token, expiry] of this.tokenExpiry.entries()) {
77 | if (expiry <= now) {
78 | this.validTokens.delete(token);
79 | this.tokenExpiry.delete(token);
80 | }
81 | }
82 | }
83 |
84 | /**
85 | * Hash a password or token for secure storage
86 | */
87 | static hashToken(token: string): string {
88 | return crypto.createHash('sha256').update(token).digest('hex');
89 | }
90 |
91 | /**
92 | * Compare a plain token with a hashed token
93 | */
94 | static compareTokens(plainToken: string, hashedToken: string): boolean {
95 | const hashedPlainToken = AuthManager.hashToken(plainToken);
96 | return crypto.timingSafeEqual(
97 | Buffer.from(hashedPlainToken),
98 | Buffer.from(hashedToken)
99 | );
100 | }
101 |
102 | /**
103 | * Compare two tokens using constant-time algorithm to prevent timing attacks
104 | *
105 | * @param plainToken - Token from request
106 | * @param expectedToken - Expected token value
107 | * @returns true if tokens match, false otherwise
108 | *
109 | * @security This uses crypto.timingSafeEqual to prevent timing attack vulnerabilities.
110 | * Never use === or !== for token comparison as it allows attackers to discover
111 | * tokens character-by-character through timing analysis.
112 | *
113 | * @example
114 | * const isValid = AuthManager.timingSafeCompare(requestToken, serverToken);
115 | * if (!isValid) {
116 | * return res.status(401).json({ error: 'Unauthorized' });
117 | * }
118 | *
119 | * @see https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
120 | */
121 | static timingSafeCompare(plainToken: string, expectedToken: string): boolean {
122 | try {
123 | // Tokens must be non-empty
124 | if (!plainToken || !expectedToken) {
125 | return false;
126 | }
127 |
128 | // Convert to buffers
129 | const plainBuffer = Buffer.from(plainToken, 'utf8');
130 | const expectedBuffer = Buffer.from(expectedToken, 'utf8');
131 |
132 | // Check length first (constant time not needed for length comparison)
133 | if (plainBuffer.length !== expectedBuffer.length) {
134 | return false;
135 | }
136 |
137 | // Constant-time comparison
138 | return crypto.timingSafeEqual(plainBuffer, expectedBuffer);
139 | } catch (error) {
140 | // Buffer conversion or comparison failed
141 | return false;
142 | }
143 | }
144 | }
```
--------------------------------------------------------------------------------
/src/utils/logger.ts:
--------------------------------------------------------------------------------
```typescript
1 | export enum LogLevel {
2 | ERROR = 0,
3 | WARN = 1,
4 | INFO = 2,
5 | DEBUG = 3,
6 | }
7 |
8 | export interface LoggerConfig {
9 | level: LogLevel;
10 | prefix?: string;
11 | timestamp?: boolean;
12 | }
13 |
14 | export class Logger {
15 | private config: LoggerConfig;
16 | private static instance: Logger;
17 | private useFileLogging = false;
18 | private fileStream: any = null;
19 | // Cache environment variables for performance
20 | private readonly isStdio = process.env.MCP_MODE === 'stdio';
21 | private readonly isDisabled = process.env.DISABLE_CONSOLE_OUTPUT === 'true';
22 | private readonly isHttp = process.env.MCP_MODE === 'http';
23 | private readonly isTest = process.env.NODE_ENV === 'test' || process.env.TEST_ENVIRONMENT === 'true';
24 |
25 | constructor(config?: Partial<LoggerConfig>) {
26 | this.config = {
27 | level: LogLevel.INFO,
28 | prefix: 'n8n-mcp',
29 | timestamp: true,
30 | ...config,
31 | };
32 | }
33 |
34 | static getInstance(config?: Partial<LoggerConfig>): Logger {
35 | if (!Logger.instance) {
36 | Logger.instance = new Logger(config);
37 | }
38 | return Logger.instance;
39 | }
40 |
41 | private formatMessage(level: string, message: string): string {
42 | const parts: string[] = [];
43 |
44 | if (this.config.timestamp) {
45 | parts.push(`[${new Date().toISOString()}]`);
46 | }
47 |
48 | if (this.config.prefix) {
49 | parts.push(`[${this.config.prefix}]`);
50 | }
51 |
52 | parts.push(`[${level}]`);
53 | parts.push(message);
54 |
55 | return parts.join(' ');
56 | }
57 |
58 | private log(level: LogLevel, levelName: string, message: string, ...args: any[]): void {
59 | // Allow ERROR level logs through in more cases for debugging
60 | const allowErrorLogs = level === LogLevel.ERROR && (this.isHttp || process.env.DEBUG === 'true');
61 |
62 | // Check environment variables FIRST, before level check
63 | // In stdio mode, suppress ALL console output to avoid corrupting JSON-RPC (except errors when debugging)
64 | // Also suppress in test mode unless debug is explicitly enabled
65 | if (this.isStdio || this.isDisabled || (this.isTest && process.env.DEBUG !== 'true')) {
66 | // Allow error logs through if debugging is enabled
67 | if (!allowErrorLogs) {
68 | return;
69 | }
70 | }
71 |
72 | if (level <= this.config.level || allowErrorLogs) {
73 | const formattedMessage = this.formatMessage(levelName, message);
74 |
75 | // In HTTP mode during request handling, suppress console output (except errors)
76 | // The ConsoleManager will handle this, but we add a safety check
77 | if (this.isHttp && process.env.MCP_REQUEST_ACTIVE === 'true' && !allowErrorLogs) {
78 | // Silently drop the log during active MCP requests (except errors)
79 | return;
80 | }
81 |
82 | switch (level) {
83 | case LogLevel.ERROR:
84 | console.error(formattedMessage, ...args);
85 | break;
86 | case LogLevel.WARN:
87 | console.warn(formattedMessage, ...args);
88 | break;
89 | default:
90 | console.log(formattedMessage, ...args);
91 | }
92 | }
93 | }
94 |
95 | error(message: string, ...args: any[]): void {
96 | this.log(LogLevel.ERROR, 'ERROR', message, ...args);
97 | }
98 |
99 | warn(message: string, ...args: any[]): void {
100 | this.log(LogLevel.WARN, 'WARN', message, ...args);
101 | }
102 |
103 | info(message: string, ...args: any[]): void {
104 | this.log(LogLevel.INFO, 'INFO', message, ...args);
105 | }
106 |
107 | debug(message: string, ...args: any[]): void {
108 | this.log(LogLevel.DEBUG, 'DEBUG', message, ...args);
109 | }
110 |
111 | setLevel(level: LogLevel): void {
112 | this.config.level = level;
113 | }
114 |
115 | static parseLogLevel(level: string): LogLevel {
116 | switch (level.toLowerCase()) {
117 | case 'error':
118 | return LogLevel.ERROR;
119 | case 'warn':
120 | return LogLevel.WARN;
121 | case 'debug':
122 | return LogLevel.DEBUG;
123 | case 'info':
124 | default:
125 | return LogLevel.INFO;
126 | }
127 | }
128 | }
129 |
130 | // Create a default logger instance
131 | export const logger = Logger.getInstance({
132 | level: Logger.parseLogLevel(process.env.LOG_LEVEL || 'info'),
133 | });
```
--------------------------------------------------------------------------------
/scripts/test-http.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | # Test script for n8n-MCP HTTP Server
3 |
4 | set -e
5 |
6 | # Configuration
7 | URL="${1:-http://localhost:3000}"
8 | TOKEN="${AUTH_TOKEN:-test-token}"
9 | VERBOSE="${VERBOSE:-0}"
10 |
11 | # Colors for output
12 | RED='\033[0;31m'
13 | GREEN='\033[0;32m'
14 | YELLOW='\033[1;33m'
15 | NC='\033[0m' # No Color
16 |
17 | echo "🧪 Testing n8n-MCP HTTP Server"
18 | echo "================================"
19 | echo "Server URL: $URL"
20 | echo ""
21 |
22 | # Check if jq is installed
23 | if ! command -v jq &> /dev/null; then
24 | echo -e "${YELLOW}Warning: jq not installed. Output will not be formatted.${NC}"
25 | echo "Install with: brew install jq (macOS) or apt-get install jq (Linux)"
26 | echo ""
27 | JQ="cat"
28 | else
29 | JQ="jq ."
30 | fi
31 |
32 | # Function to make requests
33 | make_request() {
34 | local method="$1"
35 | local endpoint="$2"
36 | local data="$3"
37 | local headers="$4"
38 | local expected_status="$5"
39 |
40 | if [ "$VERBOSE" = "1" ]; then
41 | echo -e "${YELLOW}Request:${NC} $method $URL$endpoint"
42 | [ -n "$data" ] && echo -e "${YELLOW}Data:${NC} $data"
43 | fi
44 |
45 | # Build curl command
46 | local cmd="curl -s -w '\n%{http_code}' -X $method '$URL$endpoint'"
47 | [ -n "$headers" ] && cmd="$cmd $headers"
48 | [ -n "$data" ] && cmd="$cmd -d '$data'"
49 |
50 | # Execute and capture response
51 | local response=$(eval "$cmd")
52 | local body=$(echo "$response" | sed '$d')
53 | local status=$(echo "$response" | tail -n 1)
54 |
55 | # Check status
56 | if [ "$status" = "$expected_status" ]; then
57 | echo -e "${GREEN}✓${NC} $method $endpoint - Status: $status"
58 | else
59 | echo -e "${RED}✗${NC} $method $endpoint - Expected: $expected_status, Got: $status"
60 | fi
61 |
62 | # Show response body
63 | if [ -n "$body" ]; then
64 | echo "$body" | $JQ
65 | fi
66 | echo ""
67 | }
68 |
69 | # Test 1: Health check
70 | echo "1. Testing health endpoint..."
71 | make_request "GET" "/health" "" "" "200"
72 |
73 | # Test 2: OPTIONS request (CORS preflight)
74 | echo "2. Testing CORS preflight..."
75 | make_request "OPTIONS" "/mcp" "" "-H 'Origin: http://localhost' -H 'Access-Control-Request-Method: POST'" "204"
76 |
77 | # Test 3: Authentication failure
78 | echo "3. Testing authentication (should fail)..."
79 | make_request "POST" "/mcp" \
80 | '{"jsonrpc":"2.0","method":"tools/list","id":1}' \
81 | "-H 'Content-Type: application/json' -H 'Authorization: Bearer wrong-token'" \
82 | "401"
83 |
84 | # Test 4: Missing authentication
85 | echo "4. Testing missing authentication..."
86 | make_request "POST" "/mcp" \
87 | '{"jsonrpc":"2.0","method":"tools/list","id":1}' \
88 | "-H 'Content-Type: application/json'" \
89 | "401"
90 |
91 | # Test 5: Valid MCP request to list tools
92 | echo "5. Testing valid MCP request (list tools)..."
93 | make_request "POST" "/mcp" \
94 | '{"jsonrpc":"2.0","method":"tools/list","id":1}' \
95 | "-H 'Content-Type: application/json' -H 'Authorization: Bearer $TOKEN' -H 'Accept: application/json, text/event-stream'" \
96 | "200"
97 |
98 | # Test 6: 404 for unknown endpoint
99 | echo "6. Testing 404 response..."
100 | make_request "GET" "/unknown" "" "" "404"
101 |
102 | # Test 7: Invalid JSON
103 | echo "7. Testing invalid JSON..."
104 | make_request "POST" "/mcp" \
105 | '{invalid json}' \
106 | "-H 'Content-Type: application/json' -H 'Authorization: Bearer $TOKEN'" \
107 | "400"
108 |
109 | # Test 8: Request size limit
110 | echo "8. Testing request size limit..."
111 | # Use a different approach for large data
112 | echo "Skipping large payload test (would exceed bash limits)"
113 |
114 | # Test 9: MCP initialization
115 | if [ "$VERBOSE" = "1" ]; then
116 | echo "9. Testing MCP initialization..."
117 | make_request "POST" "/mcp" \
118 | '{"jsonrpc":"2.0","method":"initialize","params":{"protocolVersion":"2024-11-05","capabilities":{"roots":{}}},"id":1}' \
119 | "-H 'Content-Type: application/json' -H 'Authorization: Bearer $TOKEN' -H 'Accept: text/event-stream'" \
120 | "200"
121 | fi
122 |
123 | echo "================================"
124 | echo "🎉 Tests completed!"
125 | echo ""
126 | echo "To run with verbose output: VERBOSE=1 $0"
127 | echo "To test a different server: $0 https://your-server.com"
128 | echo "To use a different token: AUTH_TOKEN=your-token $0"
```
--------------------------------------------------------------------------------
/docs/CI_TEST_INFRASTRUCTURE.md:
--------------------------------------------------------------------------------
```markdown
1 | # CI Test Infrastructure - Known Issues
2 |
3 | ## Integration Test Failures for External Contributor PRs
4 |
5 | ### Issue Summary
6 |
7 | Integration tests fail for external contributor PRs with "No response from n8n server" errors, despite the code changes being correct. This is a **test infrastructure issue**, not a code quality issue.
8 |
9 | ### Root Cause
10 |
11 | 1. **GitHub Actions Security**: External contributor PRs don't get access to repository secrets (`N8N_API_URL`, `N8N_API_KEY`, etc.)
12 | 2. **MSW Mock Server**: Mock Service Worker (MSW) is not properly intercepting HTTP requests in the CI environment
13 | 3. **Test Configuration**: Integration tests expect `http://localhost:3001/mock-api` but the mock server isn't responding
14 |
15 | ### Evidence
16 |
17 | From CI logs (PR #343):
18 | ```
19 | [CI-DEBUG] Global setup complete, N8N_API_URL: http://localhost:3001/mock-api
20 | ❌ No response from n8n server (repeated 60+ times across 20 tests)
21 | ```
22 |
23 | The tests ARE using the correct mock URL, but MSW isn't intercepting the requests.
24 |
25 | ### Why This Happens
26 |
27 | **For External PRs:**
28 | - GitHub Actions doesn't expose repository secrets for security reasons
29 | - Prevents malicious PRs from exfiltrating secrets
30 | - MSW setup runs but requests don't get intercepted in CI
31 |
32 | **Test Configuration:**
33 | - `.env.test` line 19: `N8N_API_URL=http://localhost:3001/mock-api`
34 | - `.env.test` line 67: `MSW_ENABLED=true`
35 | - CI workflow line 75-80: Secrets set but empty for external PRs
36 |
37 | ### Impact
38 |
39 | - ✅ **Code Quality**: NOT affected - the actual code changes are correct
40 | - ✅ **Local Testing**: Works fine - MSW intercepts requests locally
41 | - ❌ **CI for External PRs**: Integration tests fail (infrastructure issue)
42 | - ✅ **CI for Internal PRs**: Works fine (has access to secrets)
43 |
44 | ### Current Workarounds
45 |
46 | 1. **For Maintainers**: Use `--admin` flag to merge despite failing tests when code is verified correct
47 | 2. **For Contributors**: Run tests locally where MSW works properly
48 | 3. **For CI**: Unit tests pass (don't require n8n API), integration tests fail
49 |
50 | ### Files Affected
51 |
52 | - `tests/integration/setup/integration-setup.ts` - MSW server setup
53 | - `tests/setup/msw-setup.ts` - MSW configuration
54 | - `tests/mocks/n8n-api/handlers.ts` - Mock request handlers
55 | - `.github/workflows/test.yml` - CI configuration
56 | - `.env.test` - Test environment configuration
57 |
58 | ### Potential Solutions (Not Implemented)
59 |
60 | 1. **Separate Unit/Integration Runs**
61 | - Run integration tests only for internal PRs
62 | - Skip integration tests for external PRs
63 | - Rely on unit tests for external PR validation
64 |
65 | 2. **MSW CI Debugging**
66 | - Add extensive logging to MSW setup
67 | - Check if MSW server actually starts in CI
68 | - Verify request interception is working
69 |
70 | 3. **Mock Server Process**
71 | - Start actual HTTP server in CI instead of MSW
72 | - More reliable but adds complexity
73 | - Would require test infrastructure refactoring
74 |
75 | 4. **Public Test Instance**
76 | - Use publicly accessible test n8n instance
77 | - Exposes test data, security concerns
78 | - Would work for external PRs
79 |
80 | ### Decision
81 |
82 | **Status**: Documented but not fixed
83 |
84 | **Rationale**:
85 | - Integration test infrastructure refactoring is separate concern from code quality
86 | - External PRs are relatively rare compared to internal development
87 | - Unit tests provide sufficient coverage for most changes
88 | - Maintainers can verify integration tests locally before merging
89 |
90 | ### Testing Strategy
91 |
92 | **For External Contributor PRs:**
93 | 1. ✅ Unit tests must pass
94 | 2. ✅ TypeScript compilation must pass
95 | 3. ✅ Build must succeed
96 | 4. ⚠️ Integration test failures are expected (infrastructure issue)
97 | 5. ✅ Maintainer verifies locally before merge
98 |
99 | **For Internal PRs:**
100 | 1. ✅ All tests must pass (unit + integration)
101 | 2. ✅ Full CI validation
102 |
103 | ### References
104 |
105 | - PR #343: First occurrence of this issue
106 | - PR #345: Documented the infrastructure issue
107 | - Issue: External PRs don't get secrets (GitHub Actions security)
108 |
109 | ### Last Updated
110 |
111 | 2025-10-21 - Documented as part of PR #345 investigation
112 |
```
--------------------------------------------------------------------------------
/tests/unit/services/debug-validator.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, vi, beforeEach } from 'vitest';
2 | import { WorkflowValidator } from '@/services/workflow-validator';
3 |
4 | // Mock dependencies - don't use vi.mock for complex mocks
5 | vi.mock('@/services/expression-validator', () => ({
6 | ExpressionValidator: {
7 | validateNodeExpressions: () => ({
8 | valid: true,
9 | errors: [],
10 | warnings: [],
11 | variables: [],
12 | expressions: []
13 | })
14 | }
15 | }));
16 | vi.mock('@/utils/logger', () => ({
17 | Logger: vi.fn().mockImplementation(() => ({
18 | error: vi.fn(),
19 | warn: vi.fn(),
20 | info: vi.fn(),
21 | debug: vi.fn()
22 | }))
23 | }));
24 |
25 | describe('Debug Validator Tests', () => {
26 | let validator: WorkflowValidator;
27 | let mockNodeRepository: any;
28 | let mockEnhancedConfigValidator: any;
29 |
30 | beforeEach(() => {
31 | // Create mock repository
32 | mockNodeRepository = {
33 | getNode: (nodeType: string) => {
34 | // Handle both n8n-nodes-base.set and nodes-base.set (normalized)
35 | if (nodeType === 'n8n-nodes-base.set' || nodeType === 'nodes-base.set') {
36 | return {
37 | name: 'Set',
38 | type: 'nodes-base.set',
39 | typeVersion: 1,
40 | properties: [],
41 | package: 'n8n-nodes-base',
42 | version: 1,
43 | displayName: 'Set'
44 | };
45 | }
46 | return null;
47 | }
48 | };
49 |
50 | // Create mock EnhancedConfigValidator
51 | mockEnhancedConfigValidator = {
52 | validateWithMode: () => ({
53 | valid: true,
54 | errors: [],
55 | warnings: [],
56 | suggestions: [],
57 | mode: 'operation',
58 | visibleProperties: [],
59 | hiddenProperties: []
60 | })
61 | };
62 |
63 | // Create validator instance
64 | validator = new WorkflowValidator(mockNodeRepository, mockEnhancedConfigValidator as any);
65 | });
66 |
67 | it('should handle nodes at extreme positions - debug', async () => {
68 | const workflow = {
69 | nodes: [
70 | { id: '1', name: 'FarLeft', type: 'n8n-nodes-base.set', position: [-999999, -999999] as [number, number], parameters: {} },
71 | { id: '2', name: 'FarRight', type: 'n8n-nodes-base.set', position: [999999, 999999] as [number, number], parameters: {} },
72 | { id: '3', name: 'Zero', type: 'n8n-nodes-base.set', position: [0, 0] as [number, number], parameters: {} }
73 | ],
74 | connections: {
75 | 'FarLeft': {
76 | main: [[{ node: 'FarRight', type: 'main', index: 0 }]]
77 | },
78 | 'FarRight': {
79 | main: [[{ node: 'Zero', type: 'main', index: 0 }]]
80 | }
81 | }
82 | };
83 |
84 | const result = await validator.validateWorkflow(workflow);
85 |
86 |
87 | // Test should pass with extreme positions
88 | expect(result.valid).toBe(true);
89 | expect(result.errors).toHaveLength(0);
90 | });
91 |
92 | it('should handle special characters in node names - debug', async () => {
93 | const workflow = {
94 | nodes: [
95 | { id: '1', name: 'Node@#$%', type: 'n8n-nodes-base.set', position: [0, 0] as [number, number], parameters: {} },
96 | { id: '2', name: 'Node 中文', type: 'n8n-nodes-base.set', position: [100, 0] as [number, number], parameters: {} },
97 | { id: '3', name: 'Node😊', type: 'n8n-nodes-base.set', position: [200, 0] as [number, number], parameters: {} }
98 | ],
99 | connections: {
100 | 'Node@#$%': {
101 | main: [[{ node: 'Node 中文', type: 'main', index: 0 }]]
102 | },
103 | 'Node 中文': {
104 | main: [[{ node: 'Node😊', type: 'main', index: 0 }]]
105 | }
106 | }
107 | };
108 |
109 | const result = await validator.validateWorkflow(workflow);
110 |
111 |
112 | // Test should pass with special characters in node names
113 | expect(result.valid).toBe(true);
114 | expect(result.errors).toHaveLength(0);
115 | });
116 |
117 | it('should handle non-array nodes - debug', async () => {
118 | const workflow = {
119 | nodes: 'not-an-array',
120 | connections: {}
121 | };
122 | const result = await validator.validateWorkflow(workflow as any);
123 |
124 |
125 | expect(result.valid).toBe(false);
126 | expect(result.errors[0].message).toContain('nodes must be an array');
127 | });
128 | });
```
--------------------------------------------------------------------------------
/scripts/vitest-benchmark-json-reporter.js:
--------------------------------------------------------------------------------
```javascript
1 | const { writeFileSync } = require('fs');
2 | const { resolve } = require('path');
3 |
4 | class BenchmarkJsonReporter {
5 | constructor() {
6 | this.results = [];
7 | console.log('[BenchmarkJsonReporter] Initialized');
8 | }
9 |
10 | onInit(ctx) {
11 | console.log('[BenchmarkJsonReporter] onInit called');
12 | }
13 |
14 | onCollected(files) {
15 | console.log('[BenchmarkJsonReporter] onCollected called with', files ? files.length : 0, 'files');
16 | }
17 |
18 | onTaskUpdate(tasks) {
19 | console.log('[BenchmarkJsonReporter] onTaskUpdate called');
20 | }
21 |
22 | onBenchmarkResult(file, benchmark) {
23 | console.log('[BenchmarkJsonReporter] onBenchmarkResult called for', benchmark.name);
24 | }
25 |
26 | onFinished(files, errors) {
27 | console.log('[BenchmarkJsonReporter] onFinished called with', files ? files.length : 0, 'files');
28 |
29 | const results = {
30 | timestamp: new Date().toISOString(),
31 | files: []
32 | };
33 |
34 | try {
35 | for (const file of files || []) {
36 | if (!file) continue;
37 |
38 | const fileResult = {
39 | filepath: file.filepath || file.name || 'unknown',
40 | groups: []
41 | };
42 |
43 | // Handle both file.tasks and file.benchmarks
44 | const tasks = file.tasks || file.benchmarks || [];
45 |
46 | // Process tasks/benchmarks
47 | for (const task of tasks) {
48 | if (task.type === 'suite' && task.tasks) {
49 | // This is a suite containing benchmarks
50 | const group = {
51 | name: task.name,
52 | benchmarks: []
53 | };
54 |
55 | for (const benchmark of task.tasks) {
56 | if (benchmark.result?.benchmark) {
57 | group.benchmarks.push({
58 | name: benchmark.name,
59 | result: {
60 | mean: benchmark.result.benchmark.mean,
61 | min: benchmark.result.benchmark.min,
62 | max: benchmark.result.benchmark.max,
63 | hz: benchmark.result.benchmark.hz,
64 | p75: benchmark.result.benchmark.p75,
65 | p99: benchmark.result.benchmark.p99,
66 | p995: benchmark.result.benchmark.p995,
67 | p999: benchmark.result.benchmark.p999,
68 | rme: benchmark.result.benchmark.rme,
69 | samples: benchmark.result.benchmark.samples
70 | }
71 | });
72 | }
73 | }
74 |
75 | if (group.benchmarks.length > 0) {
76 | fileResult.groups.push(group);
77 | }
78 | } else if (task.result?.benchmark) {
79 | // This is a direct benchmark (not in a suite)
80 | if (!fileResult.groups.length) {
81 | fileResult.groups.push({
82 | name: 'Default',
83 | benchmarks: []
84 | });
85 | }
86 |
87 | fileResult.groups[0].benchmarks.push({
88 | name: task.name,
89 | result: {
90 | mean: task.result.benchmark.mean,
91 | min: task.result.benchmark.min,
92 | max: task.result.benchmark.max,
93 | hz: task.result.benchmark.hz,
94 | p75: task.result.benchmark.p75,
95 | p99: task.result.benchmark.p99,
96 | p995: task.result.benchmark.p995,
97 | p999: task.result.benchmark.p999,
98 | rme: task.result.benchmark.rme,
99 | samples: task.result.benchmark.samples
100 | }
101 | });
102 | }
103 | }
104 |
105 | if (fileResult.groups.length > 0) {
106 | results.files.push(fileResult);
107 | }
108 | }
109 |
110 | // Write results
111 | const outputPath = resolve(process.cwd(), 'benchmark-results.json');
112 | writeFileSync(outputPath, JSON.stringify(results, null, 2));
113 | console.log(`[BenchmarkJsonReporter] Benchmark results written to ${outputPath}`);
114 | console.log(`[BenchmarkJsonReporter] Total files processed: ${results.files.length}`);
115 | } catch (error) {
116 | console.error('[BenchmarkJsonReporter] Error writing results:', error);
117 | }
118 | }
119 | }
120 |
121 | module.exports = BenchmarkJsonReporter;
```