This is page 23 of 46. Use http://codebase.md/czlonkowski/n8n-mcp?lines=false&page={x} to view the full context.
# Directory Structure
```
├── _config.yml
├── .claude
│ └── agents
│ ├── code-reviewer.md
│ ├── context-manager.md
│ ├── debugger.md
│ ├── deployment-engineer.md
│ ├── mcp-backend-engineer.md
│ ├── n8n-mcp-tester.md
│ ├── technical-researcher.md
│ └── test-automator.md
├── .dockerignore
├── .env.docker
├── .env.example
├── .env.n8n.example
├── .env.test
├── .env.test.example
├── .github
│ ├── ABOUT.md
│ ├── BENCHMARK_THRESHOLDS.md
│ ├── FUNDING.yml
│ ├── gh-pages.yml
│ ├── secret_scanning.yml
│ └── workflows
│ ├── benchmark-pr.yml
│ ├── benchmark.yml
│ ├── docker-build-fast.yml
│ ├── docker-build-n8n.yml
│ ├── docker-build.yml
│ ├── release.yml
│ ├── test.yml
│ └── update-n8n-deps.yml
├── .gitignore
├── .npmignore
├── ATTRIBUTION.md
├── CHANGELOG.md
├── CLAUDE.md
├── codecov.yml
├── coverage.json
├── data
│ ├── .gitkeep
│ ├── nodes.db
│ ├── nodes.db-shm
│ ├── nodes.db-wal
│ └── templates.db
├── deploy
│ └── quick-deploy-n8n.sh
├── docker
│ ├── docker-entrypoint.sh
│ ├── n8n-mcp
│ ├── parse-config.js
│ └── README.md
├── docker-compose.buildkit.yml
├── docker-compose.extract.yml
├── docker-compose.n8n.yml
├── docker-compose.override.yml.example
├── docker-compose.test-n8n.yml
├── docker-compose.yml
├── Dockerfile
├── Dockerfile.railway
├── Dockerfile.test
├── docs
│ ├── AUTOMATED_RELEASES.md
│ ├── BENCHMARKS.md
│ ├── CHANGELOG.md
│ ├── CI_TEST_INFRASTRUCTURE.md
│ ├── CLAUDE_CODE_SETUP.md
│ ├── CLAUDE_INTERVIEW.md
│ ├── CODECOV_SETUP.md
│ ├── CODEX_SETUP.md
│ ├── CURSOR_SETUP.md
│ ├── DEPENDENCY_UPDATES.md
│ ├── DOCKER_README.md
│ ├── DOCKER_TROUBLESHOOTING.md
│ ├── FINAL_AI_VALIDATION_SPEC.md
│ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md
│ ├── HTTP_DEPLOYMENT.md
│ ├── img
│ │ ├── cc_command.png
│ │ ├── cc_connected.png
│ │ ├── codex_connected.png
│ │ ├── cursor_tut.png
│ │ ├── Railway_api.png
│ │ ├── Railway_server_address.png
│ │ ├── skills.png
│ │ ├── vsc_ghcp_chat_agent_mode.png
│ │ ├── vsc_ghcp_chat_instruction_files.png
│ │ ├── vsc_ghcp_chat_thinking_tool.png
│ │ └── windsurf_tut.png
│ ├── INSTALLATION.md
│ ├── LIBRARY_USAGE.md
│ ├── local
│ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md
│ │ ├── DEEP_DIVE_ANALYSIS_README.md
│ │ ├── Deep_dive_p1_p2.md
│ │ ├── integration-testing-plan.md
│ │ ├── integration-tests-phase1-summary.md
│ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
│ │ ├── P0_IMPLEMENTATION_PLAN.md
│ │ └── TEMPLATE_MINING_ANALYSIS.md
│ ├── MCP_ESSENTIALS_README.md
│ ├── MCP_QUICK_START_GUIDE.md
│ ├── N8N_DEPLOYMENT.md
│ ├── RAILWAY_DEPLOYMENT.md
│ ├── README_CLAUDE_SETUP.md
│ ├── README.md
│ ├── tools-documentation-usage.md
│ ├── VS_CODE_PROJECT_SETUP.md
│ ├── WINDSURF_SETUP.md
│ └── workflow-diff-examples.md
├── examples
│ └── enhanced-documentation-demo.js
├── fetch_log.txt
├── LICENSE
├── MEMORY_N8N_UPDATE.md
├── MEMORY_TEMPLATE_UPDATE.md
├── monitor_fetch.sh
├── N8N_HTTP_STREAMABLE_SETUP.md
├── n8n-nodes.db
├── P0-R3-TEST-PLAN.md
├── package-lock.json
├── package.json
├── package.runtime.json
├── PRIVACY.md
├── railway.json
├── README.md
├── renovate.json
├── scripts
│ ├── analyze-optimization.sh
│ ├── audit-schema-coverage.ts
│ ├── build-optimized.sh
│ ├── compare-benchmarks.js
│ ├── demo-optimization.sh
│ ├── deploy-http.sh
│ ├── deploy-to-vm.sh
│ ├── export-webhook-workflows.ts
│ ├── extract-changelog.js
│ ├── extract-from-docker.js
│ ├── extract-nodes-docker.sh
│ ├── extract-nodes-simple.sh
│ ├── format-benchmark-results.js
│ ├── generate-benchmark-stub.js
│ ├── generate-detailed-reports.js
│ ├── generate-test-summary.js
│ ├── http-bridge.js
│ ├── mcp-http-client.js
│ ├── migrate-nodes-fts.ts
│ ├── migrate-tool-docs.ts
│ ├── n8n-docs-mcp.service
│ ├── nginx-n8n-mcp.conf
│ ├── prebuild-fts5.ts
│ ├── prepare-release.js
│ ├── publish-npm-quick.sh
│ ├── publish-npm.sh
│ ├── quick-test.ts
│ ├── run-benchmarks-ci.js
│ ├── sync-runtime-version.js
│ ├── test-ai-validation-debug.ts
│ ├── test-code-node-enhancements.ts
│ ├── test-code-node-fixes.ts
│ ├── test-docker-config.sh
│ ├── test-docker-fingerprint.ts
│ ├── test-docker-optimization.sh
│ ├── test-docker.sh
│ ├── test-empty-connection-validation.ts
│ ├── test-error-message-tracking.ts
│ ├── test-error-output-validation.ts
│ ├── test-error-validation.js
│ ├── test-essentials.ts
│ ├── test-expression-code-validation.ts
│ ├── test-expression-format-validation.js
│ ├── test-fts5-search.ts
│ ├── test-fuzzy-fix.ts
│ ├── test-fuzzy-simple.ts
│ ├── test-helpers-validation.ts
│ ├── test-http-search.ts
│ ├── test-http.sh
│ ├── test-jmespath-validation.ts
│ ├── test-multi-tenant-simple.ts
│ ├── test-multi-tenant.ts
│ ├── test-n8n-integration.sh
│ ├── test-node-info.js
│ ├── test-node-type-validation.ts
│ ├── test-nodes-base-prefix.ts
│ ├── test-operation-validation.ts
│ ├── test-optimized-docker.sh
│ ├── test-release-automation.js
│ ├── test-search-improvements.ts
│ ├── test-security.ts
│ ├── test-single-session.sh
│ ├── test-sqljs-triggers.ts
│ ├── test-telemetry-debug.ts
│ ├── test-telemetry-direct.ts
│ ├── test-telemetry-env.ts
│ ├── test-telemetry-integration.ts
│ ├── test-telemetry-no-select.ts
│ ├── test-telemetry-security.ts
│ ├── test-telemetry-simple.ts
│ ├── test-typeversion-validation.ts
│ ├── test-url-configuration.ts
│ ├── test-user-id-persistence.ts
│ ├── test-webhook-validation.ts
│ ├── test-workflow-insert.ts
│ ├── test-workflow-sanitizer.ts
│ ├── test-workflow-tracking-debug.ts
│ ├── update-and-publish-prep.sh
│ ├── update-n8n-deps.js
│ ├── update-readme-version.js
│ ├── vitest-benchmark-json-reporter.js
│ └── vitest-benchmark-reporter.ts
├── SECURITY.md
├── src
│ ├── config
│ │ └── n8n-api.ts
│ ├── data
│ │ └── canonical-ai-tool-examples.json
│ ├── database
│ │ ├── database-adapter.ts
│ │ ├── migrations
│ │ │ └── add-template-node-configs.sql
│ │ ├── node-repository.ts
│ │ ├── nodes.db
│ │ ├── schema-optimized.sql
│ │ └── schema.sql
│ ├── errors
│ │ └── validation-service-error.ts
│ ├── http-server-single-session.ts
│ ├── http-server.ts
│ ├── index.ts
│ ├── loaders
│ │ └── node-loader.ts
│ ├── mappers
│ │ └── docs-mapper.ts
│ ├── mcp
│ │ ├── handlers-n8n-manager.ts
│ │ ├── handlers-workflow-diff.ts
│ │ ├── index.ts
│ │ ├── server.ts
│ │ ├── stdio-wrapper.ts
│ │ ├── tool-docs
│ │ │ ├── configuration
│ │ │ │ ├── get-node-as-tool-info.ts
│ │ │ │ ├── get-node-documentation.ts
│ │ │ │ ├── get-node-essentials.ts
│ │ │ │ ├── get-node-info.ts
│ │ │ │ ├── get-property-dependencies.ts
│ │ │ │ ├── index.ts
│ │ │ │ └── search-node-properties.ts
│ │ │ ├── discovery
│ │ │ │ ├── get-database-statistics.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── list-ai-tools.ts
│ │ │ │ ├── list-nodes.ts
│ │ │ │ └── search-nodes.ts
│ │ │ ├── guides
│ │ │ │ ├── ai-agents-guide.ts
│ │ │ │ └── index.ts
│ │ │ ├── index.ts
│ │ │ ├── system
│ │ │ │ ├── index.ts
│ │ │ │ ├── n8n-diagnostic.ts
│ │ │ │ ├── n8n-health-check.ts
│ │ │ │ ├── n8n-list-available-tools.ts
│ │ │ │ └── tools-documentation.ts
│ │ │ ├── templates
│ │ │ │ ├── get-template.ts
│ │ │ │ ├── get-templates-for-task.ts
│ │ │ │ ├── index.ts
│ │ │ │ ├── list-node-templates.ts
│ │ │ │ ├── list-tasks.ts
│ │ │ │ ├── search-templates-by-metadata.ts
│ │ │ │ └── search-templates.ts
│ │ │ ├── types.ts
│ │ │ ├── validation
│ │ │ │ ├── index.ts
│ │ │ │ ├── validate-node-minimal.ts
│ │ │ │ ├── validate-node-operation.ts
│ │ │ │ ├── validate-workflow-connections.ts
│ │ │ │ ├── validate-workflow-expressions.ts
│ │ │ │ └── validate-workflow.ts
│ │ │ └── workflow_management
│ │ │ ├── index.ts
│ │ │ ├── n8n-autofix-workflow.ts
│ │ │ ├── n8n-create-workflow.ts
│ │ │ ├── n8n-delete-execution.ts
│ │ │ ├── n8n-delete-workflow.ts
│ │ │ ├── n8n-get-execution.ts
│ │ │ ├── n8n-get-workflow-details.ts
│ │ │ ├── n8n-get-workflow-minimal.ts
│ │ │ ├── n8n-get-workflow-structure.ts
│ │ │ ├── n8n-get-workflow.ts
│ │ │ ├── n8n-list-executions.ts
│ │ │ ├── n8n-list-workflows.ts
│ │ │ ├── n8n-trigger-webhook-workflow.ts
│ │ │ ├── n8n-update-full-workflow.ts
│ │ │ ├── n8n-update-partial-workflow.ts
│ │ │ └── n8n-validate-workflow.ts
│ │ ├── tools-documentation.ts
│ │ ├── tools-n8n-friendly.ts
│ │ ├── tools-n8n-manager.ts
│ │ ├── tools.ts
│ │ └── workflow-examples.ts
│ ├── mcp-engine.ts
│ ├── mcp-tools-engine.ts
│ ├── n8n
│ │ ├── MCPApi.credentials.ts
│ │ └── MCPNode.node.ts
│ ├── parsers
│ │ ├── node-parser.ts
│ │ ├── property-extractor.ts
│ │ └── simple-parser.ts
│ ├── scripts
│ │ ├── debug-http-search.ts
│ │ ├── extract-from-docker.ts
│ │ ├── fetch-templates-robust.ts
│ │ ├── fetch-templates.ts
│ │ ├── rebuild-database.ts
│ │ ├── rebuild-optimized.ts
│ │ ├── rebuild.ts
│ │ ├── sanitize-templates.ts
│ │ ├── seed-canonical-ai-examples.ts
│ │ ├── test-autofix-documentation.ts
│ │ ├── test-autofix-workflow.ts
│ │ ├── test-execution-filtering.ts
│ │ ├── test-node-suggestions.ts
│ │ ├── test-protocol-negotiation.ts
│ │ ├── test-summary.ts
│ │ ├── test-webhook-autofix.ts
│ │ ├── validate.ts
│ │ └── validation-summary.ts
│ ├── services
│ │ ├── ai-node-validator.ts
│ │ ├── ai-tool-validators.ts
│ │ ├── confidence-scorer.ts
│ │ ├── config-validator.ts
│ │ ├── enhanced-config-validator.ts
│ │ ├── example-generator.ts
│ │ ├── execution-processor.ts
│ │ ├── expression-format-validator.ts
│ │ ├── expression-validator.ts
│ │ ├── n8n-api-client.ts
│ │ ├── n8n-validation.ts
│ │ ├── node-documentation-service.ts
│ │ ├── node-sanitizer.ts
│ │ ├── node-similarity-service.ts
│ │ ├── node-specific-validators.ts
│ │ ├── operation-similarity-service.ts
│ │ ├── property-dependencies.ts
│ │ ├── property-filter.ts
│ │ ├── resource-similarity-service.ts
│ │ ├── sqlite-storage-service.ts
│ │ ├── task-templates.ts
│ │ ├── universal-expression-validator.ts
│ │ ├── workflow-auto-fixer.ts
│ │ ├── workflow-diff-engine.ts
│ │ └── workflow-validator.ts
│ ├── telemetry
│ │ ├── batch-processor.ts
│ │ ├── config-manager.ts
│ │ ├── early-error-logger.ts
│ │ ├── error-sanitization-utils.ts
│ │ ├── error-sanitizer.ts
│ │ ├── event-tracker.ts
│ │ ├── event-validator.ts
│ │ ├── index.ts
│ │ ├── performance-monitor.ts
│ │ ├── rate-limiter.ts
│ │ ├── startup-checkpoints.ts
│ │ ├── telemetry-error.ts
│ │ ├── telemetry-manager.ts
│ │ ├── telemetry-types.ts
│ │ └── workflow-sanitizer.ts
│ ├── templates
│ │ ├── batch-processor.ts
│ │ ├── metadata-generator.ts
│ │ ├── README.md
│ │ ├── template-fetcher.ts
│ │ ├── template-repository.ts
│ │ └── template-service.ts
│ ├── types
│ │ ├── index.ts
│ │ ├── instance-context.ts
│ │ ├── n8n-api.ts
│ │ ├── node-types.ts
│ │ └── workflow-diff.ts
│ └── utils
│ ├── auth.ts
│ ├── bridge.ts
│ ├── cache-utils.ts
│ ├── console-manager.ts
│ ├── documentation-fetcher.ts
│ ├── enhanced-documentation-fetcher.ts
│ ├── error-handler.ts
│ ├── example-generator.ts
│ ├── expression-utils.ts
│ ├── fixed-collection-validator.ts
│ ├── logger.ts
│ ├── mcp-client.ts
│ ├── n8n-errors.ts
│ ├── node-source-extractor.ts
│ ├── node-type-normalizer.ts
│ ├── node-type-utils.ts
│ ├── node-utils.ts
│ ├── npm-version-checker.ts
│ ├── protocol-version.ts
│ ├── simple-cache.ts
│ ├── ssrf-protection.ts
│ ├── template-node-resolver.ts
│ ├── template-sanitizer.ts
│ ├── url-detector.ts
│ ├── validation-schemas.ts
│ └── version.ts
├── test-output.txt
├── test-reinit-fix.sh
├── tests
│ ├── __snapshots__
│ │ └── .gitkeep
│ ├── auth.test.ts
│ ├── benchmarks
│ │ ├── database-queries.bench.ts
│ │ ├── index.ts
│ │ ├── mcp-tools.bench.ts
│ │ ├── mcp-tools.bench.ts.disabled
│ │ ├── mcp-tools.bench.ts.skip
│ │ ├── node-loading.bench.ts.disabled
│ │ ├── README.md
│ │ ├── search-operations.bench.ts.disabled
│ │ └── validation-performance.bench.ts.disabled
│ ├── bridge.test.ts
│ ├── comprehensive-extraction-test.js
│ ├── data
│ │ └── .gitkeep
│ ├── debug-slack-doc.js
│ ├── demo-enhanced-documentation.js
│ ├── docker-tests-README.md
│ ├── error-handler.test.ts
│ ├── examples
│ │ └── using-database-utils.test.ts
│ ├── extracted-nodes-db
│ │ ├── database-import.json
│ │ ├── extraction-report.json
│ │ ├── insert-nodes.sql
│ │ ├── n8n-nodes-base__Airtable.json
│ │ ├── n8n-nodes-base__Discord.json
│ │ ├── n8n-nodes-base__Function.json
│ │ ├── n8n-nodes-base__HttpRequest.json
│ │ ├── n8n-nodes-base__If.json
│ │ ├── n8n-nodes-base__Slack.json
│ │ ├── n8n-nodes-base__SplitInBatches.json
│ │ └── n8n-nodes-base__Webhook.json
│ ├── factories
│ │ ├── node-factory.ts
│ │ └── property-definition-factory.ts
│ ├── fixtures
│ │ ├── .gitkeep
│ │ ├── database
│ │ │ └── test-nodes.json
│ │ ├── factories
│ │ │ ├── node.factory.ts
│ │ │ └── parser-node.factory.ts
│ │ └── template-configs.ts
│ ├── helpers
│ │ └── env-helpers.ts
│ ├── http-server-auth.test.ts
│ ├── integration
│ │ ├── ai-validation
│ │ │ ├── ai-agent-validation.test.ts
│ │ │ ├── ai-tool-validation.test.ts
│ │ │ ├── chat-trigger-validation.test.ts
│ │ │ ├── e2e-validation.test.ts
│ │ │ ├── helpers.ts
│ │ │ ├── llm-chain-validation.test.ts
│ │ │ ├── README.md
│ │ │ └── TEST_REPORT.md
│ │ ├── ci
│ │ │ └── database-population.test.ts
│ │ ├── database
│ │ │ ├── connection-management.test.ts
│ │ │ ├── empty-database.test.ts
│ │ │ ├── fts5-search.test.ts
│ │ │ ├── node-fts5-search.test.ts
│ │ │ ├── node-repository.test.ts
│ │ │ ├── performance.test.ts
│ │ │ ├── sqljs-memory-leak.test.ts
│ │ │ ├── template-node-configs.test.ts
│ │ │ ├── template-repository.test.ts
│ │ │ ├── test-utils.ts
│ │ │ └── transactions.test.ts
│ │ ├── database-integration.test.ts
│ │ ├── docker
│ │ │ ├── docker-config.test.ts
│ │ │ ├── docker-entrypoint.test.ts
│ │ │ └── test-helpers.ts
│ │ ├── flexible-instance-config.test.ts
│ │ ├── mcp
│ │ │ └── template-examples-e2e.test.ts
│ │ ├── mcp-protocol
│ │ │ ├── basic-connection.test.ts
│ │ │ ├── error-handling.test.ts
│ │ │ ├── performance.test.ts
│ │ │ ├── protocol-compliance.test.ts
│ │ │ ├── README.md
│ │ │ ├── session-management.test.ts
│ │ │ ├── test-helpers.ts
│ │ │ ├── tool-invocation.test.ts
│ │ │ └── workflow-error-validation.test.ts
│ │ ├── msw-setup.test.ts
│ │ ├── n8n-api
│ │ │ ├── executions
│ │ │ │ ├── delete-execution.test.ts
│ │ │ │ ├── get-execution.test.ts
│ │ │ │ ├── list-executions.test.ts
│ │ │ │ └── trigger-webhook.test.ts
│ │ │ ├── scripts
│ │ │ │ └── cleanup-orphans.ts
│ │ │ ├── system
│ │ │ │ ├── diagnostic.test.ts
│ │ │ │ ├── health-check.test.ts
│ │ │ │ └── list-tools.test.ts
│ │ │ ├── test-connection.ts
│ │ │ ├── types
│ │ │ │ └── mcp-responses.ts
│ │ │ ├── utils
│ │ │ │ ├── cleanup-helpers.ts
│ │ │ │ ├── credentials.ts
│ │ │ │ ├── factories.ts
│ │ │ │ ├── fixtures.ts
│ │ │ │ ├── mcp-context.ts
│ │ │ │ ├── n8n-client.ts
│ │ │ │ ├── node-repository.ts
│ │ │ │ ├── response-types.ts
│ │ │ │ ├── test-context.ts
│ │ │ │ └── webhook-workflows.ts
│ │ │ └── workflows
│ │ │ ├── autofix-workflow.test.ts
│ │ │ ├── create-workflow.test.ts
│ │ │ ├── delete-workflow.test.ts
│ │ │ ├── get-workflow-details.test.ts
│ │ │ ├── get-workflow-minimal.test.ts
│ │ │ ├── get-workflow-structure.test.ts
│ │ │ ├── get-workflow.test.ts
│ │ │ ├── list-workflows.test.ts
│ │ │ ├── smart-parameters.test.ts
│ │ │ ├── update-partial-workflow.test.ts
│ │ │ ├── update-workflow.test.ts
│ │ │ └── validate-workflow.test.ts
│ │ ├── security
│ │ │ ├── command-injection-prevention.test.ts
│ │ │ └── rate-limiting.test.ts
│ │ ├── setup
│ │ │ ├── integration-setup.ts
│ │ │ └── msw-test-server.ts
│ │ ├── telemetry
│ │ │ ├── docker-user-id-stability.test.ts
│ │ │ └── mcp-telemetry.test.ts
│ │ ├── templates
│ │ │ └── metadata-operations.test.ts
│ │ └── workflow-creation-node-type-format.test.ts
│ ├── logger.test.ts
│ ├── MOCKING_STRATEGY.md
│ ├── mocks
│ │ ├── n8n-api
│ │ │ ├── data
│ │ │ │ ├── credentials.ts
│ │ │ │ ├── executions.ts
│ │ │ │ └── workflows.ts
│ │ │ ├── handlers.ts
│ │ │ └── index.ts
│ │ └── README.md
│ ├── node-storage-export.json
│ ├── setup
│ │ ├── global-setup.ts
│ │ ├── msw-setup.ts
│ │ ├── TEST_ENV_DOCUMENTATION.md
│ │ └── test-env.ts
│ ├── test-database-extraction.js
│ ├── test-direct-extraction.js
│ ├── test-enhanced-documentation.js
│ ├── test-enhanced-integration.js
│ ├── test-mcp-extraction.js
│ ├── test-mcp-server-extraction.js
│ ├── test-mcp-tools-integration.js
│ ├── test-node-documentation-service.js
│ ├── test-node-list.js
│ ├── test-package-info.js
│ ├── test-parsing-operations.js
│ ├── test-slack-node-complete.js
│ ├── test-small-rebuild.js
│ ├── test-sqlite-search.js
│ ├── test-storage-system.js
│ ├── unit
│ │ ├── __mocks__
│ │ │ ├── n8n-nodes-base.test.ts
│ │ │ ├── n8n-nodes-base.ts
│ │ │ └── README.md
│ │ ├── database
│ │ │ ├── __mocks__
│ │ │ │ └── better-sqlite3.ts
│ │ │ ├── database-adapter-unit.test.ts
│ │ │ ├── node-repository-core.test.ts
│ │ │ ├── node-repository-operations.test.ts
│ │ │ ├── node-repository-outputs.test.ts
│ │ │ ├── README.md
│ │ │ └── template-repository-core.test.ts
│ │ ├── docker
│ │ │ ├── config-security.test.ts
│ │ │ ├── edge-cases.test.ts
│ │ │ ├── parse-config.test.ts
│ │ │ └── serve-command.test.ts
│ │ ├── errors
│ │ │ └── validation-service-error.test.ts
│ │ ├── examples
│ │ │ └── using-n8n-nodes-base-mock.test.ts
│ │ ├── flexible-instance-security-advanced.test.ts
│ │ ├── flexible-instance-security.test.ts
│ │ ├── http-server
│ │ │ └── multi-tenant-support.test.ts
│ │ ├── http-server-n8n-mode.test.ts
│ │ ├── http-server-n8n-reinit.test.ts
│ │ ├── http-server-session-management.test.ts
│ │ ├── loaders
│ │ │ └── node-loader.test.ts
│ │ ├── mappers
│ │ │ └── docs-mapper.test.ts
│ │ ├── mcp
│ │ │ ├── get-node-essentials-examples.test.ts
│ │ │ ├── handlers-n8n-manager-simple.test.ts
│ │ │ ├── handlers-n8n-manager.test.ts
│ │ │ ├── handlers-workflow-diff.test.ts
│ │ │ ├── lru-cache-behavior.test.ts
│ │ │ ├── multi-tenant-tool-listing.test.ts.disabled
│ │ │ ├── parameter-validation.test.ts
│ │ │ ├── search-nodes-examples.test.ts
│ │ │ ├── tools-documentation.test.ts
│ │ │ └── tools.test.ts
│ │ ├── monitoring
│ │ │ └── cache-metrics.test.ts
│ │ ├── MULTI_TENANT_TEST_COVERAGE.md
│ │ ├── multi-tenant-integration.test.ts
│ │ ├── parsers
│ │ │ ├── node-parser-outputs.test.ts
│ │ │ ├── node-parser.test.ts
│ │ │ ├── property-extractor.test.ts
│ │ │ └── simple-parser.test.ts
│ │ ├── scripts
│ │ │ └── fetch-templates-extraction.test.ts
│ │ ├── services
│ │ │ ├── ai-node-validator.test.ts
│ │ │ ├── ai-tool-validators.test.ts
│ │ │ ├── confidence-scorer.test.ts
│ │ │ ├── config-validator-basic.test.ts
│ │ │ ├── config-validator-edge-cases.test.ts
│ │ │ ├── config-validator-node-specific.test.ts
│ │ │ ├── config-validator-security.test.ts
│ │ │ ├── debug-validator.test.ts
│ │ │ ├── enhanced-config-validator-integration.test.ts
│ │ │ ├── enhanced-config-validator-operations.test.ts
│ │ │ ├── enhanced-config-validator.test.ts
│ │ │ ├── example-generator.test.ts
│ │ │ ├── execution-processor.test.ts
│ │ │ ├── expression-format-validator.test.ts
│ │ │ ├── expression-validator-edge-cases.test.ts
│ │ │ ├── expression-validator.test.ts
│ │ │ ├── fixed-collection-validation.test.ts
│ │ │ ├── loop-output-edge-cases.test.ts
│ │ │ ├── n8n-api-client.test.ts
│ │ │ ├── n8n-validation.test.ts
│ │ │ ├── node-sanitizer.test.ts
│ │ │ ├── node-similarity-service.test.ts
│ │ │ ├── node-specific-validators.test.ts
│ │ │ ├── operation-similarity-service-comprehensive.test.ts
│ │ │ ├── operation-similarity-service.test.ts
│ │ │ ├── property-dependencies.test.ts
│ │ │ ├── property-filter-edge-cases.test.ts
│ │ │ ├── property-filter.test.ts
│ │ │ ├── resource-similarity-service-comprehensive.test.ts
│ │ │ ├── resource-similarity-service.test.ts
│ │ │ ├── task-templates.test.ts
│ │ │ ├── template-service.test.ts
│ │ │ ├── universal-expression-validator.test.ts
│ │ │ ├── validation-fixes.test.ts
│ │ │ ├── workflow-auto-fixer.test.ts
│ │ │ ├── workflow-diff-engine.test.ts
│ │ │ ├── workflow-fixed-collection-validation.test.ts
│ │ │ ├── workflow-validator-comprehensive.test.ts
│ │ │ ├── workflow-validator-edge-cases.test.ts
│ │ │ ├── workflow-validator-error-outputs.test.ts
│ │ │ ├── workflow-validator-expression-format.test.ts
│ │ │ ├── workflow-validator-loops-simple.test.ts
│ │ │ ├── workflow-validator-loops.test.ts
│ │ │ ├── workflow-validator-mocks.test.ts
│ │ │ ├── workflow-validator-performance.test.ts
│ │ │ ├── workflow-validator-with-mocks.test.ts
│ │ │ └── workflow-validator.test.ts
│ │ ├── telemetry
│ │ │ ├── batch-processor.test.ts
│ │ │ ├── config-manager.test.ts
│ │ │ ├── event-tracker.test.ts
│ │ │ ├── event-validator.test.ts
│ │ │ ├── rate-limiter.test.ts
│ │ │ ├── telemetry-error.test.ts
│ │ │ ├── telemetry-manager.test.ts
│ │ │ ├── v2.18.3-fixes-verification.test.ts
│ │ │ └── workflow-sanitizer.test.ts
│ │ ├── templates
│ │ │ ├── batch-processor.test.ts
│ │ │ ├── metadata-generator.test.ts
│ │ │ ├── template-repository-metadata.test.ts
│ │ │ └── template-repository-security.test.ts
│ │ ├── test-env-example.test.ts
│ │ ├── test-infrastructure.test.ts
│ │ ├── types
│ │ │ ├── instance-context-coverage.test.ts
│ │ │ └── instance-context-multi-tenant.test.ts
│ │ ├── utils
│ │ │ ├── auth-timing-safe.test.ts
│ │ │ ├── cache-utils.test.ts
│ │ │ ├── console-manager.test.ts
│ │ │ ├── database-utils.test.ts
│ │ │ ├── expression-utils.test.ts
│ │ │ ├── fixed-collection-validator.test.ts
│ │ │ ├── n8n-errors.test.ts
│ │ │ ├── node-type-normalizer.test.ts
│ │ │ ├── node-type-utils.test.ts
│ │ │ ├── node-utils.test.ts
│ │ │ ├── simple-cache-memory-leak-fix.test.ts
│ │ │ ├── ssrf-protection.test.ts
│ │ │ └── template-node-resolver.test.ts
│ │ └── validation-fixes.test.ts
│ └── utils
│ ├── assertions.ts
│ ├── builders
│ │ └── workflow.builder.ts
│ ├── data-generators.ts
│ ├── database-utils.ts
│ ├── README.md
│ └── test-helpers.ts
├── thumbnail.png
├── tsconfig.build.json
├── tsconfig.json
├── types
│ ├── mcp.d.ts
│ └── test-env.d.ts
├── verify-telemetry-fix.js
├── versioned-nodes.md
├── vitest.config.benchmark.ts
├── vitest.config.integration.ts
└── vitest.config.ts
```
# Files
--------------------------------------------------------------------------------
/tests/integration/mcp-protocol/performance.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { InMemoryTransport } from '@modelcontextprotocol/sdk/inMemory.js';
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { TestableN8NMCPServer } from './test-helpers';
describe('MCP Performance Tests', () => {
let mcpServer: TestableN8NMCPServer;
let client: Client;
beforeEach(async () => {
mcpServer = new TestableN8NMCPServer();
await mcpServer.initialize();
const [serverTransport, clientTransport] = InMemoryTransport.createLinkedPair();
await mcpServer.connectToTransport(serverTransport);
client = new Client({
name: 'test-client',
version: '1.0.0'
}, {
capabilities: {}
});
await client.connect(clientTransport);
// Verify database is populated by checking statistics
const statsResponse = await client.callTool({ name: 'get_database_statistics', arguments: {} });
if ((statsResponse as any).content && (statsResponse as any).content[0]) {
const stats = JSON.parse((statsResponse as any).content[0].text);
// Ensure database has nodes for testing
if (!stats.totalNodes || stats.totalNodes === 0) {
console.error('Database stats:', stats);
throw new Error('Test database not properly populated');
}
}
});
afterEach(async () => {
await client.close();
await mcpServer.close();
});
describe('Response Time Benchmarks', () => {
it('should respond to simple queries quickly', async () => {
const iterations = 100;
const start = performance.now();
for (let i = 0; i < iterations; i++) {
await client.callTool({ name: 'get_database_statistics', arguments: {} });
}
const duration = performance.now() - start;
const avgTime = duration / iterations;
console.log(`Average response time for get_database_statistics: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware threshold (relaxed +20% for type safety overhead)
const threshold = process.env.CI ? 20 : 12;
expect(avgTime).toBeLessThan(threshold);
});
it('should handle list operations efficiently', async () => {
const iterations = 50;
const start = performance.now();
for (let i = 0; i < iterations; i++) {
await client.callTool({ name: 'list_nodes', arguments: { limit: 10 } });
}
const duration = performance.now() - start;
const avgTime = duration / iterations;
console.log(`Average response time for list_nodes: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware threshold
const threshold = process.env.CI ? 40 : 20;
expect(avgTime).toBeLessThan(threshold);
});
it('should perform searches efficiently', async () => {
const searches = ['http', 'webhook', 'slack', 'database', 'api'];
const iterations = 20;
const start = performance.now();
for (let i = 0; i < iterations; i++) {
for (const query of searches) {
await client.callTool({ name: 'search_nodes', arguments: { query } });
}
}
const totalRequests = iterations * searches.length;
const duration = performance.now() - start;
const avgTime = duration / totalRequests;
console.log(`Average response time for search_nodes: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware threshold
const threshold = process.env.CI ? 60 : 30;
expect(avgTime).toBeLessThan(threshold);
});
it('should retrieve node info quickly', async () => {
const nodeTypes = [
'nodes-base.httpRequest',
'nodes-base.webhook',
'nodes-base.set',
'nodes-base.if',
'nodes-base.switch'
];
const start = performance.now();
for (const nodeType of nodeTypes) {
await client.callTool({ name: 'get_node_info', arguments: { nodeType } });
}
const duration = performance.now() - start;
const avgTime = duration / nodeTypes.length;
console.log(`Average response time for get_node_info: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware threshold (these are large responses)
const threshold = process.env.CI ? 100 : 50;
expect(avgTime).toBeLessThan(threshold);
});
});
describe('Concurrent Request Performance', () => {
it('should handle concurrent requests efficiently', async () => {
const concurrentRequests = 50;
const start = performance.now();
const promises = [];
for (let i = 0; i < concurrentRequests; i++) {
promises.push(
client.callTool({ name: 'list_nodes', arguments: { limit: 5 } })
);
}
await Promise.all(promises);
const duration = performance.now() - start;
const avgTime = duration / concurrentRequests;
console.log(`Average time for ${concurrentRequests} concurrent requests: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Concurrent requests should be more efficient than sequential
const threshold = process.env.CI ? 25 : 10;
expect(avgTime).toBeLessThan(threshold);
});
it('should handle mixed concurrent operations', async () => {
const operations = [
{ tool: 'list_nodes', params: { limit: 10 } },
{ tool: 'search_nodes', params: { query: 'http' } },
{ tool: 'get_database_statistics', params: {} },
{ tool: 'list_ai_tools', params: {} },
{ tool: 'list_tasks', params: {} }
];
const rounds = 10;
const start = performance.now();
for (let round = 0; round < rounds; round++) {
const promises = operations.map(op =>
client.callTool({ name: op.tool, arguments: op.params })
);
await Promise.all(promises);
}
const duration = performance.now() - start;
const totalRequests = rounds * operations.length;
const avgTime = duration / totalRequests;
console.log(`Average time for mixed operations: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
const threshold = process.env.CI ? 40 : 20;
expect(avgTime).toBeLessThan(threshold);
});
});
describe('Large Data Performance', () => {
it('should handle large node lists efficiently', async () => {
const start = performance.now();
const response = await client.callTool({ name: 'list_nodes', arguments: {
limit: 200 // Get many nodes
} });
const duration = performance.now() - start;
console.log(`Time to list 200 nodes: ${duration.toFixed(2)}ms`);
// Environment-aware threshold
const threshold = process.env.CI ? 200 : 100;
expect(duration).toBeLessThan(threshold);
// Check the response content
expect(response).toBeDefined();
let nodes;
if (response.content && Array.isArray(response.content) && response.content[0]) {
// MCP standard response format
expect(response.content[0].type).toBe('text');
expect(response.content[0].text).toBeDefined();
try {
const parsed = JSON.parse(response.content[0].text);
// list_nodes returns an object with nodes property
nodes = parsed.nodes || parsed;
} catch (e) {
console.error('Failed to parse JSON:', e);
console.error('Response text was:', response.content[0].text);
throw e;
}
} else if (Array.isArray(response)) {
// Direct array response
nodes = response;
} else if (response.nodes) {
// Object with nodes property
nodes = response.nodes;
} else {
console.error('Unexpected response format:', response);
throw new Error('Unexpected response format');
}
expect(nodes).toBeDefined();
expect(Array.isArray(nodes)).toBe(true);
expect(nodes.length).toBeGreaterThan(100);
});
it('should handle large workflow validation efficiently', async () => {
// Create a large workflow
const nodeCount = 100;
const nodes = [];
const connections: any = {};
for (let i = 0; i < nodeCount; i++) {
nodes.push({
id: String(i),
name: `Node${i}`,
type: i % 3 === 0 ? 'nodes-base.httpRequest' : 'nodes-base.set',
typeVersion: 1,
position: [i * 100, 0],
parameters: i % 3 === 0 ?
{ method: 'GET', url: 'https://api.example.com' } :
{ values: { string: [{ name: 'test', value: 'value' }] } }
});
if (i > 0) {
connections[`Node${i-1}`] = {
'main': [[{ node: `Node${i}`, type: 'main', index: 0 }]]
};
}
}
const start = performance.now();
const response = await client.callTool({ name: 'validate_workflow', arguments: {
workflow: { nodes, connections }
} });
const duration = performance.now() - start;
console.log(`Time to validate ${nodeCount} node workflow: ${duration.toFixed(2)}ms`);
// Environment-aware threshold
const threshold = process.env.CI ? 1000 : 500;
expect(duration).toBeLessThan(threshold);
// Check the response content - MCP callTool returns content array with text
expect(response).toBeDefined();
expect((response as any).content).toBeDefined();
expect(Array.isArray((response as any).content)).toBe(true);
expect((response as any).content.length).toBeGreaterThan(0);
expect((response as any).content[0]).toBeDefined();
expect((response as any).content[0].type).toBe('text');
expect((response as any).content[0].text).toBeDefined();
// Parse the JSON response
const validation = JSON.parse((response as any).content[0].text);
expect(validation).toBeDefined();
expect(validation).toHaveProperty('valid');
});
});
describe('Memory Efficiency', () => {
it('should handle repeated operations without memory leaks', async () => {
const iterations = 1000;
const batchSize = 100;
// Measure initial memory if available
const initialMemory = process.memoryUsage();
for (let i = 0; i < iterations; i += batchSize) {
const promises = [];
for (let j = 0; j < batchSize; j++) {
promises.push(
client.callTool({ name: 'get_database_statistics', arguments: {} })
);
}
await Promise.all(promises);
// Force garbage collection if available
if (global.gc) {
global.gc();
}
}
const finalMemory = process.memoryUsage();
const memoryIncrease = finalMemory.heapUsed - initialMemory.heapUsed;
console.log(`Memory increase after ${iterations} operations: ${(memoryIncrease / 1024 / 1024).toFixed(2)}MB`);
// Memory increase should be reasonable (less than 50MB)
expect(memoryIncrease).toBeLessThan(50 * 1024 * 1024);
});
it('should release memory after large operations', async () => {
const initialMemory = process.memoryUsage();
// Perform large operations
for (let i = 0; i < 10; i++) {
await client.callTool({ name: 'list_nodes', arguments: { limit: 200 } });
await client.callTool({ name: 'get_node_info', arguments: {
nodeType: 'nodes-base.httpRequest'
} });
}
// Force garbage collection if available
if (global.gc) {
global.gc();
await new Promise(resolve => setTimeout(resolve, 100));
}
const finalMemory = process.memoryUsage();
const memoryIncrease = finalMemory.heapUsed - initialMemory.heapUsed;
console.log(`Memory increase after large operations: ${(memoryIncrease / 1024 / 1024).toFixed(2)}MB`);
// Should not retain excessive memory
expect(memoryIncrease).toBeLessThan(20 * 1024 * 1024);
});
});
describe('Scalability Tests', () => {
it('should maintain performance with increasing load', async () => {
const loadLevels = [10, 50, 100, 200];
const results: any[] = [];
for (const load of loadLevels) {
const start = performance.now();
const promises = [];
for (let i = 0; i < load; i++) {
promises.push(
client.callTool({ name: 'list_nodes', arguments: { limit: 1 } })
);
}
await Promise.all(promises);
const duration = performance.now() - start;
const avgTime = duration / load;
results.push({
load,
totalTime: duration,
avgTime
});
console.log(`Load ${load}: Total ${duration.toFixed(2)}ms, Avg ${avgTime.toFixed(2)}ms`);
}
// Average time should not increase dramatically with load
const firstAvg = results[0].avgTime;
const lastAvg = results[results.length - 1].avgTime;
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
console.log(`Performance scaling - First avg: ${firstAvg.toFixed(2)}ms, Last avg: ${lastAvg.toFixed(2)}ms`);
// Environment-aware scaling factor
const scalingFactor = process.env.CI ? 3 : 2;
expect(lastAvg).toBeLessThan(firstAvg * scalingFactor);
});
it('should handle burst traffic', async () => {
const burstSize = 100;
const start = performance.now();
// Simulate burst of requests
const promises = [];
for (let i = 0; i < burstSize; i++) {
const operation = i % 4;
switch (operation) {
case 0:
promises.push(client.callTool({ name: 'list_nodes', arguments: { limit: 5 } }));
break;
case 1:
promises.push(client.callTool({ name: 'search_nodes', arguments: { query: 'test' } }));
break;
case 2:
promises.push(client.callTool({ name: 'get_database_statistics', arguments: {} }));
break;
case 3:
promises.push(client.callTool({ name: 'list_ai_tools', arguments: {} }));
break;
}
}
await Promise.all(promises);
const duration = performance.now() - start;
console.log(`Burst of ${burstSize} requests completed in ${duration.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Should handle burst within reasonable time
const threshold = process.env.CI ? 2000 : 1000;
expect(duration).toBeLessThan(threshold);
});
});
describe('Critical Path Optimization', () => {
it('should optimize tool listing performance', async () => {
// Warm up with multiple calls to ensure everything is initialized
for (let i = 0; i < 5; i++) {
await client.callTool({ name: 'list_nodes', arguments: { limit: 1 } });
}
const iterations = 100;
const times: number[] = [];
for (let i = 0; i < iterations; i++) {
const start = performance.now();
await client.callTool({ name: 'list_nodes', arguments: { limit: 20 } });
times.push(performance.now() - start);
}
// Remove outliers (first few runs might be slower)
times.sort((a, b) => a - b);
const trimmedTimes = times.slice(10, -10); // Remove top and bottom 10%
const avgTime = trimmedTimes.reduce((a, b) => a + b, 0) / trimmedTimes.length;
const minTime = Math.min(...trimmedTimes);
const maxTime = Math.max(...trimmedTimes);
console.log(`list_nodes performance - Avg: ${avgTime.toFixed(2)}ms, Min: ${minTime.toFixed(2)}ms, Max: ${maxTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware thresholds
const threshold = process.env.CI ? 25 : 10;
expect(avgTime).toBeLessThan(threshold);
// Max should not be too much higher than average (no outliers)
// More lenient in CI due to resource contention
const maxMultiplier = process.env.CI ? 5 : 3;
expect(maxTime).toBeLessThan(avgTime * maxMultiplier);
});
it('should optimize search performance', async () => {
// Warm up with multiple calls
for (let i = 0; i < 3; i++) {
await client.callTool({ name: 'search_nodes', arguments: { query: 'test' } });
}
const queries = ['http', 'webhook', 'database', 'api', 'slack'];
const times: number[] = [];
for (const query of queries) {
for (let i = 0; i < 20; i++) {
const start = performance.now();
await client.callTool({ name: 'search_nodes', arguments: { query } });
times.push(performance.now() - start);
}
}
// Remove outliers
times.sort((a, b) => a - b);
const trimmedTimes = times.slice(10, -10); // Remove top and bottom 10%
const avgTime = trimmedTimes.reduce((a, b) => a + b, 0) / trimmedTimes.length;
console.log(`search_nodes average performance: ${avgTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware threshold
const threshold = process.env.CI ? 35 : 15;
expect(avgTime).toBeLessThan(threshold);
});
it('should cache effectively for repeated queries', async () => {
const nodeType = 'nodes-base.httpRequest';
// First call (cold)
const coldStart = performance.now();
await client.callTool({ name: 'get_node_info', arguments: { nodeType } });
const coldTime = performance.now() - coldStart;
// Give cache time to settle
await new Promise(resolve => setTimeout(resolve, 10));
// Subsequent calls (potentially cached)
const warmTimes: number[] = [];
for (let i = 0; i < 10; i++) {
const start = performance.now();
await client.callTool({ name: 'get_node_info', arguments: { nodeType } });
warmTimes.push(performance.now() - start);
}
// Remove outliers from warm times
warmTimes.sort((a, b) => a - b);
const trimmedWarmTimes = warmTimes.slice(1, -1); // Remove highest and lowest
const avgWarmTime = trimmedWarmTimes.reduce((a, b) => a + b, 0) / trimmedWarmTimes.length;
console.log(`Cold time: ${coldTime.toFixed(2)}ms, Avg warm time: ${avgWarmTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// In CI, caching might not be as effective due to resource constraints
const cacheMultiplier = process.env.CI ? 1.5 : 1.1;
// Warm calls should be faster or at least not significantly slower
expect(avgWarmTime).toBeLessThanOrEqual(coldTime * cacheMultiplier);
});
});
describe('Stress Tests', () => {
it('should handle sustained high load', async () => {
const duration = 5000; // 5 seconds
const start = performance.now();
let requestCount = 0;
let errorCount = 0;
while (performance.now() - start < duration) {
try {
await client.callTool({ name: 'get_database_statistics', arguments: {} });
requestCount++;
} catch (error) {
errorCount++;
}
}
const actualDuration = performance.now() - start;
const requestsPerSecond = requestCount / (actualDuration / 1000);
console.log(`Sustained load test - Requests: ${requestCount}, RPS: ${requestsPerSecond.toFixed(2)}, Errors: ${errorCount}`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Environment-aware RPS threshold (relaxed -8% for type safety overhead)
const rpsThreshold = process.env.CI ? 50 : 92;
expect(requestsPerSecond).toBeGreaterThan(rpsThreshold);
// Error rate should be very low
expect(errorCount).toBe(0);
});
it('should recover from performance degradation', async () => {
// Create heavy load
const heavyPromises = [];
for (let i = 0; i < 200; i++) {
heavyPromises.push(
client.callTool({ name: 'validate_workflow', arguments: {
workflow: {
nodes: Array(20).fill(null).map((_, idx) => ({
id: String(idx),
name: `Node${idx}`,
type: 'nodes-base.set',
typeVersion: 1,
position: [idx * 100, 0],
parameters: {}
})),
connections: {}
}
} })
);
}
await Promise.all(heavyPromises);
// Measure performance after heavy load
const recoveryTimes: number[] = [];
for (let i = 0; i < 10; i++) {
const start = performance.now();
await client.callTool({ name: 'get_database_statistics', arguments: {} });
recoveryTimes.push(performance.now() - start);
}
const avgRecoveryTime = recoveryTimes.reduce((a, b) => a + b, 0) / recoveryTimes.length;
console.log(`Average response time after heavy load: ${avgRecoveryTime.toFixed(2)}ms`);
console.log(`Environment: ${process.env.CI ? 'CI' : 'Local'}`);
// Should recover to normal performance (relaxed +20% for type safety overhead)
const threshold = process.env.CI ? 25 : 12;
expect(avgRecoveryTime).toBeLessThan(threshold);
});
});
});
```
--------------------------------------------------------------------------------
/tests/unit/services/ai-node-validator.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import {
validateAIAgent,
validateChatTrigger,
validateBasicLLMChain,
buildReverseConnectionMap,
getAIConnections,
validateAISpecificNodes,
type WorkflowNode,
type WorkflowJson
} from '@/services/ai-node-validator';
describe('AI Node Validator', () => {
describe('buildReverseConnectionMap', () => {
it('should build reverse connections for AI language model', () => {
const workflow: WorkflowJson = {
nodes: [],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
expect(reverseMap.get('AI Agent')).toEqual([
{
sourceName: 'OpenAI',
sourceType: 'ai_languageModel',
type: 'ai_languageModel',
index: 0
}
]);
});
it('should handle multiple AI connections to same node', () => {
const workflow: WorkflowJson = {
nodes: [],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'HTTP Request Tool': {
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
},
'Window Buffer Memory': {
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const agentConnections = reverseMap.get('AI Agent');
expect(agentConnections).toHaveLength(3);
expect(agentConnections).toContainEqual(
expect.objectContaining({ type: 'ai_languageModel' })
);
expect(agentConnections).toContainEqual(
expect.objectContaining({ type: 'ai_tool' })
);
expect(agentConnections).toContainEqual(
expect.objectContaining({ type: 'ai_memory' })
);
});
it('should skip empty source names', () => {
const workflow: WorkflowJson = {
nodes: [],
connections: {
'': {
'main': [[{ node: 'Target', type: 'main', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
expect(reverseMap.has('Target')).toBe(false);
});
it('should skip empty target node names', () => {
const workflow: WorkflowJson = {
nodes: [],
connections: {
'Source': {
'main': [[{ node: '', type: 'main', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
expect(reverseMap.size).toBe(0);
});
});
describe('getAIConnections', () => {
it('should filter AI connections from all incoming connections', () => {
const reverseMap = new Map();
reverseMap.set('AI Agent', [
{ sourceName: 'Chat Trigger', type: 'main', index: 0 },
{ sourceName: 'OpenAI', type: 'ai_languageModel', index: 0 },
{ sourceName: 'HTTP Tool', type: 'ai_tool', index: 0 }
]);
const aiConnections = getAIConnections('AI Agent', reverseMap);
expect(aiConnections).toHaveLength(2);
expect(aiConnections).not.toContainEqual(
expect.objectContaining({ type: 'main' })
);
});
it('should filter by specific AI connection type', () => {
const reverseMap = new Map();
reverseMap.set('AI Agent', [
{ sourceName: 'OpenAI', type: 'ai_languageModel', index: 0 },
{ sourceName: 'Tool1', type: 'ai_tool', index: 0 },
{ sourceName: 'Tool2', type: 'ai_tool', index: 1 }
]);
const toolConnections = getAIConnections('AI Agent', reverseMap, 'ai_tool');
expect(toolConnections).toHaveLength(2);
expect(toolConnections.every(c => c.type === 'ai_tool')).toBe(true);
});
it('should return empty array for node with no connections', () => {
const reverseMap = new Map();
const connections = getAIConnections('Unknown Node', reverseMap);
expect(connections).toEqual([]);
});
});
describe('validateAIAgent', () => {
it('should error on missing language model connection', () => {
const node: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [node],
connections: {}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(node, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
message: expect.stringContaining('language model')
})
);
});
it('should accept single language model connection', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: { promptType: 'auto' }
};
const model: WorkflowNode = {
id: 'llm1',
name: 'OpenAI',
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
position: [0, -100],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [agent, model],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
const languageModelErrors = issues.filter(i =>
i.severity === 'error' && i.message.includes('language model')
);
expect(languageModelErrors).toHaveLength(0);
});
it('should accept dual language model connection for fallback', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: { promptType: 'auto' },
typeVersion: 1.7
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'OpenAI GPT-4': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'OpenAI GPT-3.5': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 1 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
const excessModelErrors = issues.filter(i =>
i.severity === 'error' && i.message.includes('more than 2')
);
expect(excessModelErrors).toHaveLength(0);
});
it('should error on more than 2 language model connections', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'Model1': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'Model2': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 1 }]]
},
'Model3': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 2 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
code: 'TOO_MANY_LANGUAGE_MODELS'
})
);
});
it('should error on streaming mode with main output connections', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {
promptType: 'auto',
options: { streamResponse: true }
}
};
const responseNode: WorkflowNode = {
id: 'response1',
name: 'Response Node',
type: 'n8n-nodes-base.respondToWebhook',
position: [200, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [agent, responseNode],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'AI Agent': {
'main': [[{ node: 'Response Node', type: 'main', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
code: 'STREAMING_WITH_MAIN_OUTPUT'
})
);
});
it('should error on missing prompt text for define promptType', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {
promptType: 'define'
}
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
code: 'MISSING_PROMPT_TEXT'
})
);
});
it('should info on short systemMessage', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {
promptType: 'auto',
systemMessage: 'Help user' // Too short (< 20 chars)
}
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'info',
message: expect.stringContaining('systemMessage is very short')
})
);
});
it('should error on multiple memory connections', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: { promptType: 'auto' }
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'Memory1': {
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 0 }]]
},
'Memory2': {
'ai_memory': [[{ node: 'AI Agent', type: 'ai_memory', index: 1 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
code: 'MULTIPLE_MEMORY_CONNECTIONS'
})
);
});
it('should warn on high maxIterations', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {
promptType: 'auto',
maxIterations: 60 // Exceeds threshold of 50
}
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'warning',
message: expect.stringContaining('maxIterations')
})
);
});
it('should validate output parser with hasOutputParser flag', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {
promptType: 'auto',
hasOutputParser: true
}
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateAIAgent(agent, reverseMap, workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
message: expect.stringContaining('output parser')
})
);
});
});
describe('validateChatTrigger', () => {
it('should error on streaming mode to non-AI-Agent target', () => {
const trigger: WorkflowNode = {
id: 'chat1',
name: 'Chat Trigger',
type: '@n8n/n8n-nodes-langchain.chatTrigger',
position: [0, 0],
parameters: {
options: { responseMode: 'streaming' }
}
};
const codeNode: WorkflowNode = {
id: 'code1',
name: 'Code',
type: 'n8n-nodes-base.code',
position: [200, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [trigger, codeNode],
connections: {
'Chat Trigger': {
'main': [[{ node: 'Code', type: 'main', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateChatTrigger(trigger, workflow, reverseMap);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
code: 'STREAMING_WRONG_TARGET'
})
);
});
it('should pass valid Chat Trigger with streaming to AI Agent', () => {
const trigger: WorkflowNode = {
id: 'chat1',
name: 'Chat Trigger',
type: '@n8n/n8n-nodes-langchain.chatTrigger',
position: [0, 0],
parameters: {
options: { responseMode: 'streaming' }
}
};
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [200, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [trigger, agent],
connections: {
'Chat Trigger': {
'main': [[{ node: 'AI Agent', type: 'main', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateChatTrigger(trigger, workflow, reverseMap);
const errors = issues.filter(i => i.severity === 'error');
expect(errors).toHaveLength(0);
});
it('should error on missing outgoing connections', () => {
const trigger: WorkflowNode = {
id: 'chat1',
name: 'Chat Trigger',
type: '@n8n/n8n-nodes-langchain.chatTrigger',
position: [0, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [trigger],
connections: {}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateChatTrigger(trigger, workflow, reverseMap);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
code: 'MISSING_CONNECTIONS'
})
);
});
});
describe('validateBasicLLMChain', () => {
it('should error on missing language model connection', () => {
const chain: WorkflowNode = {
id: 'chain1',
name: 'LLM Chain',
type: '@n8n/n8n-nodes-langchain.chainLlm',
position: [0, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [chain],
connections: {}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateBasicLLMChain(chain, reverseMap);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
message: expect.stringContaining('language model')
})
);
});
it('should pass valid LLM Chain', () => {
const chain: WorkflowNode = {
id: 'chain1',
name: 'LLM Chain',
type: '@n8n/n8n-nodes-langchain.chainLlm',
position: [0, 0],
parameters: {
prompt: 'Summarize the following text: {{$json.text}}'
}
};
const workflow: WorkflowJson = {
nodes: [chain],
connections: {
'OpenAI': {
'ai_languageModel': [[{ node: 'LLM Chain', type: 'ai_languageModel', index: 0 }]]
}
}
};
const reverseMap = buildReverseConnectionMap(workflow);
const issues = validateBasicLLMChain(chain, reverseMap);
const errors = issues.filter(i => i.severity === 'error');
expect(errors).toHaveLength(0);
});
});
describe('validateAISpecificNodes', () => {
it('should validate complete AI Agent workflow', () => {
const chatTrigger: WorkflowNode = {
id: 'chat1',
name: 'Chat Trigger',
type: '@n8n/n8n-nodes-langchain.chatTrigger',
position: [0, 0],
parameters: {}
};
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [200, 0],
parameters: {
promptType: 'auto'
}
};
const model: WorkflowNode = {
id: 'llm1',
name: 'OpenAI',
type: '@n8n/n8n-nodes-langchain.lmChatOpenAi',
position: [200, -100],
parameters: {}
};
const httpTool: WorkflowNode = {
id: 'tool1',
name: 'Weather API',
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
position: [200, 100],
parameters: {
toolDescription: 'Get current weather for a city',
method: 'GET',
url: 'https://api.weather.com/v1/current?city={city}',
placeholderDefinitions: {
values: [
{ name: 'city', description: 'City name' }
]
}
}
};
const workflow: WorkflowJson = {
nodes: [chatTrigger, agent, model, httpTool],
connections: {
'Chat Trigger': {
'main': [[{ node: 'AI Agent', type: 'main', index: 0 }]]
},
'OpenAI': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'Weather API': {
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
}
}
};
const issues = validateAISpecificNodes(workflow);
const errors = issues.filter(i => i.severity === 'error');
expect(errors).toHaveLength(0);
});
it('should detect missing language model in workflow', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: {}
};
const workflow: WorkflowJson = {
nodes: [agent],
connections: {}
};
const issues = validateAISpecificNodes(workflow);
expect(issues).toContainEqual(
expect.objectContaining({
severity: 'error',
message: expect.stringContaining('language model')
})
);
});
it('should validate all AI tool sub-nodes in workflow', () => {
const agent: WorkflowNode = {
id: 'agent1',
name: 'AI Agent',
type: '@n8n/n8n-nodes-langchain.agent',
position: [0, 0],
parameters: { promptType: 'auto' }
};
const invalidTool: WorkflowNode = {
id: 'tool1',
name: 'Bad Tool',
type: '@n8n/n8n-nodes-langchain.toolHttpRequest',
position: [0, 100],
parameters: {} // Missing toolDescription and url
};
const workflow: WorkflowJson = {
nodes: [agent, invalidTool],
connections: {
'Model': {
'ai_languageModel': [[{ node: 'AI Agent', type: 'ai_languageModel', index: 0 }]]
},
'Bad Tool': {
'ai_tool': [[{ node: 'AI Agent', type: 'ai_tool', index: 0 }]]
}
}
};
const issues = validateAISpecificNodes(workflow);
// Should have errors from missing toolDescription and url
expect(issues.filter(i => i.severity === 'error').length).toBeGreaterThan(0);
});
});
});
```
--------------------------------------------------------------------------------
/scripts/generate-detailed-reports.js:
--------------------------------------------------------------------------------
```javascript
#!/usr/bin/env node
import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
import { resolve, dirname } from 'path';
/**
* Generate detailed test reports in multiple formats
*/
class TestReportGenerator {
constructor() {
this.results = {
tests: null,
coverage: null,
benchmarks: null,
metadata: {
timestamp: new Date().toISOString(),
repository: process.env.GITHUB_REPOSITORY || 'n8n-mcp',
sha: process.env.GITHUB_SHA || 'unknown',
branch: process.env.GITHUB_REF || 'unknown',
runId: process.env.GITHUB_RUN_ID || 'local',
runNumber: process.env.GITHUB_RUN_NUMBER || '0',
}
};
}
loadTestResults() {
const testResultPath = resolve(process.cwd(), 'test-results/results.json');
if (existsSync(testResultPath)) {
try {
const data = JSON.parse(readFileSync(testResultPath, 'utf-8'));
this.results.tests = this.processTestResults(data);
} catch (error) {
console.error('Error loading test results:', error);
}
}
}
processTestResults(data) {
const processedResults = {
summary: {
total: data.numTotalTests || 0,
passed: data.numPassedTests || 0,
failed: data.numFailedTests || 0,
skipped: data.numSkippedTests || 0,
duration: data.duration || 0,
success: (data.numFailedTests || 0) === 0
},
testSuites: [],
failedTests: []
};
// Process test suites
if (data.testResults) {
for (const suite of data.testResults) {
const suiteInfo = {
name: suite.name,
duration: suite.duration || 0,
tests: {
total: suite.numPassingTests + suite.numFailingTests + suite.numPendingTests,
passed: suite.numPassingTests || 0,
failed: suite.numFailingTests || 0,
skipped: suite.numPendingTests || 0
},
status: suite.numFailingTests === 0 ? 'passed' : 'failed'
};
processedResults.testSuites.push(suiteInfo);
// Collect failed tests
if (suite.testResults) {
for (const test of suite.testResults) {
if (test.status === 'failed') {
processedResults.failedTests.push({
suite: suite.name,
test: test.title,
duration: test.duration || 0,
error: test.failureMessages ? test.failureMessages.join('\n') : 'Unknown error'
});
}
}
}
}
}
return processedResults;
}
loadCoverageResults() {
const coveragePath = resolve(process.cwd(), 'coverage/coverage-summary.json');
if (existsSync(coveragePath)) {
try {
const data = JSON.parse(readFileSync(coveragePath, 'utf-8'));
this.results.coverage = this.processCoverageResults(data);
} catch (error) {
console.error('Error loading coverage results:', error);
}
}
}
processCoverageResults(data) {
const coverage = {
summary: {
lines: data.total.lines.pct,
statements: data.total.statements.pct,
functions: data.total.functions.pct,
branches: data.total.branches.pct,
average: 0
},
files: []
};
// Calculate average
coverage.summary.average = (
coverage.summary.lines +
coverage.summary.statements +
coverage.summary.functions +
coverage.summary.branches
) / 4;
// Process file coverage
for (const [filePath, fileData] of Object.entries(data)) {
if (filePath !== 'total') {
coverage.files.push({
path: filePath,
lines: fileData.lines.pct,
statements: fileData.statements.pct,
functions: fileData.functions.pct,
branches: fileData.branches.pct,
uncoveredLines: fileData.lines.total - fileData.lines.covered
});
}
}
// Sort files by coverage (lowest first)
coverage.files.sort((a, b) => a.lines - b.lines);
return coverage;
}
loadBenchmarkResults() {
const benchmarkPath = resolve(process.cwd(), 'benchmark-results.json');
if (existsSync(benchmarkPath)) {
try {
const data = JSON.parse(readFileSync(benchmarkPath, 'utf-8'));
this.results.benchmarks = this.processBenchmarkResults(data);
} catch (error) {
console.error('Error loading benchmark results:', error);
}
}
}
processBenchmarkResults(data) {
const benchmarks = {
timestamp: data.timestamp,
results: []
};
for (const file of data.files || []) {
for (const group of file.groups || []) {
for (const benchmark of group.benchmarks || []) {
benchmarks.results.push({
file: file.filepath,
group: group.name,
name: benchmark.name,
ops: benchmark.result.hz,
mean: benchmark.result.mean,
min: benchmark.result.min,
max: benchmark.result.max,
p75: benchmark.result.p75,
p99: benchmark.result.p99,
samples: benchmark.result.samples
});
}
}
}
// Sort by ops/sec (highest first)
benchmarks.results.sort((a, b) => b.ops - a.ops);
return benchmarks;
}
generateMarkdownReport() {
let report = '# n8n-mcp Test Report\n\n';
report += `Generated: ${this.results.metadata.timestamp}\n\n`;
// Metadata
report += '## Build Information\n\n';
report += `- **Repository**: ${this.results.metadata.repository}\n`;
report += `- **Commit**: ${this.results.metadata.sha.substring(0, 7)}\n`;
report += `- **Branch**: ${this.results.metadata.branch}\n`;
report += `- **Run**: #${this.results.metadata.runNumber}\n\n`;
// Test Results
if (this.results.tests) {
const { summary, testSuites, failedTests } = this.results.tests;
const emoji = summary.success ? '✅' : '❌';
report += `## ${emoji} Test Results\n\n`;
report += `### Summary\n\n`;
report += `- **Total Tests**: ${summary.total}\n`;
report += `- **Passed**: ${summary.passed} (${((summary.passed / summary.total) * 100).toFixed(1)}%)\n`;
report += `- **Failed**: ${summary.failed}\n`;
report += `- **Skipped**: ${summary.skipped}\n`;
report += `- **Duration**: ${(summary.duration / 1000).toFixed(2)}s\n\n`;
// Test Suites
if (testSuites.length > 0) {
report += '### Test Suites\n\n';
report += '| Suite | Status | Tests | Duration |\n';
report += '|-------|--------|-------|----------|\n';
for (const suite of testSuites) {
const status = suite.status === 'passed' ? '✅' : '❌';
const tests = `${suite.tests.passed}/${suite.tests.total}`;
const duration = `${(suite.duration / 1000).toFixed(2)}s`;
report += `| ${suite.name} | ${status} | ${tests} | ${duration} |\n`;
}
report += '\n';
}
// Failed Tests
if (failedTests.length > 0) {
report += '### Failed Tests\n\n';
for (const failed of failedTests) {
report += `#### ${failed.suite} > ${failed.test}\n\n`;
report += '```\n';
report += failed.error;
report += '\n```\n\n';
}
}
}
// Coverage Results
if (this.results.coverage) {
const { summary, files } = this.results.coverage;
const emoji = summary.average >= 80 ? '✅' : summary.average >= 60 ? '⚠️' : '❌';
report += `## ${emoji} Coverage Report\n\n`;
report += '### Summary\n\n';
report += `- **Lines**: ${summary.lines.toFixed(2)}%\n`;
report += `- **Statements**: ${summary.statements.toFixed(2)}%\n`;
report += `- **Functions**: ${summary.functions.toFixed(2)}%\n`;
report += `- **Branches**: ${summary.branches.toFixed(2)}%\n`;
report += `- **Average**: ${summary.average.toFixed(2)}%\n\n`;
// Files with low coverage
const lowCoverageFiles = files.filter(f => f.lines < 80).slice(0, 10);
if (lowCoverageFiles.length > 0) {
report += '### Files with Low Coverage\n\n';
report += '| File | Lines | Uncovered Lines |\n';
report += '|------|-------|----------------|\n';
for (const file of lowCoverageFiles) {
const fileName = file.path.split('/').pop();
report += `| ${fileName} | ${file.lines.toFixed(1)}% | ${file.uncoveredLines} |\n`;
}
report += '\n';
}
}
// Benchmark Results
if (this.results.benchmarks && this.results.benchmarks.results.length > 0) {
report += '## ⚡ Benchmark Results\n\n';
report += '### Top Performers\n\n';
report += '| Benchmark | Ops/sec | Mean (ms) | Samples |\n';
report += '|-----------|---------|-----------|----------|\n';
for (const bench of this.results.benchmarks.results.slice(0, 10)) {
const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
const meanFormatted = (bench.mean * 1000).toFixed(3);
report += `| ${bench.name} | ${opsFormatted} | ${meanFormatted} | ${bench.samples} |\n`;
}
report += '\n';
}
return report;
}
generateJsonReport() {
return JSON.stringify(this.results, null, 2);
}
generateHtmlReport() {
const htmlTemplate = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>n8n-mcp Test Report</title>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
line-height: 1.6;
color: #333;
max-width: 1200px;
margin: 0 auto;
padding: 20px;
background-color: #f5f5f5;
}
.header {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
padding: 30px;
border-radius: 10px;
margin-bottom: 30px;
}
.header h1 {
margin: 0 0 10px 0;
font-size: 2.5em;
}
.metadata {
opacity: 0.9;
font-size: 0.9em;
}
.section {
background: white;
padding: 25px;
margin-bottom: 20px;
border-radius: 10px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
.section h2 {
margin-top: 0;
color: #333;
border-bottom: 2px solid #eee;
padding-bottom: 10px;
}
.stats {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 20px;
margin: 20px 0;
}
.stat-card {
background: #f8f9fa;
padding: 20px;
border-radius: 8px;
text-align: center;
border: 1px solid #e9ecef;
}
.stat-card .value {
font-size: 2em;
font-weight: bold;
color: #667eea;
}
.stat-card .label {
color: #666;
font-size: 0.9em;
margin-top: 5px;
}
table {
width: 100%;
border-collapse: collapse;
margin: 20px 0;
}
th, td {
padding: 12px;
text-align: left;
border-bottom: 1px solid #ddd;
}
th {
background-color: #f8f9fa;
font-weight: 600;
color: #495057;
}
tr:hover {
background-color: #f8f9fa;
}
.success { color: #28a745; }
.warning { color: #ffc107; }
.danger { color: #dc3545; }
.failed-test {
background-color: #fff5f5;
border: 1px solid #feb2b2;
border-radius: 5px;
padding: 15px;
margin: 10px 0;
}
.failed-test h4 {
margin: 0 0 10px 0;
color: #c53030;
}
.error-message {
background-color: #1a202c;
color: #e2e8f0;
padding: 15px;
border-radius: 5px;
font-family: 'Courier New', monospace;
font-size: 0.9em;
overflow-x: auto;
}
.progress-bar {
width: 100%;
height: 20px;
background-color: #e9ecef;
border-radius: 10px;
overflow: hidden;
margin: 10px 0;
}
.progress-fill {
height: 100%;
background: linear-gradient(90deg, #28a745 0%, #20c997 100%);
transition: width 0.3s ease;
}
.coverage-low { background: linear-gradient(90deg, #dc3545 0%, #f86734 100%); }
.coverage-medium { background: linear-gradient(90deg, #ffc107 0%, #ffb347 100%); }
</style>
</head>
<body>
<div class="header">
<h1>n8n-mcp Test Report</h1>
<div class="metadata">
<div>Repository: ${this.results.metadata.repository}</div>
<div>Commit: ${this.results.metadata.sha.substring(0, 7)}</div>
<div>Run: #${this.results.metadata.runNumber}</div>
<div>Generated: ${new Date(this.results.metadata.timestamp).toLocaleString()}</div>
</div>
</div>
${this.generateTestResultsHtml()}
${this.generateCoverageHtml()}
${this.generateBenchmarkHtml()}
</body>
</html>`;
return htmlTemplate;
}
generateTestResultsHtml() {
if (!this.results.tests) return '';
const { summary, testSuites, failedTests } = this.results.tests;
const successRate = ((summary.passed / summary.total) * 100).toFixed(1);
const statusClass = summary.success ? 'success' : 'danger';
const statusIcon = summary.success ? '✅' : '❌';
let html = `
<div class="section">
<h2>${statusIcon} Test Results</h2>
<div class="stats">
<div class="stat-card">
<div class="value">${summary.total}</div>
<div class="label">Total Tests</div>
</div>
<div class="stat-card">
<div class="value ${statusClass}">${summary.passed}</div>
<div class="label">Passed</div>
</div>
<div class="stat-card">
<div class="value ${summary.failed > 0 ? 'danger' : ''}">${summary.failed}</div>
<div class="label">Failed</div>
</div>
<div class="stat-card">
<div class="value">${successRate}%</div>
<div class="label">Success Rate</div>
</div>
<div class="stat-card">
<div class="value">${(summary.duration / 1000).toFixed(1)}s</div>
<div class="label">Duration</div>
</div>
</div>`;
if (testSuites.length > 0) {
html += `
<h3>Test Suites</h3>
<table>
<thead>
<tr>
<th>Suite</th>
<th>Status</th>
<th>Tests</th>
<th>Duration</th>
</tr>
</thead>
<tbody>`;
for (const suite of testSuites) {
const status = suite.status === 'passed' ? '✅' : '❌';
const statusClass = suite.status === 'passed' ? 'success' : 'danger';
html += `
<tr>
<td>${suite.name}</td>
<td class="${statusClass}">${status}</td>
<td>${suite.tests.passed}/${suite.tests.total}</td>
<td>${(suite.duration / 1000).toFixed(2)}s</td>
</tr>`;
}
html += `
</tbody>
</table>`;
}
if (failedTests.length > 0) {
html += `
<h3>Failed Tests</h3>`;
for (const failed of failedTests) {
html += `
<div class="failed-test">
<h4>${failed.suite} > ${failed.test}</h4>
<div class="error-message">${this.escapeHtml(failed.error)}</div>
</div>`;
}
}
html += `</div>`;
return html;
}
generateCoverageHtml() {
if (!this.results.coverage) return '';
const { summary, files } = this.results.coverage;
const coverageClass = summary.average >= 80 ? 'success' : summary.average >= 60 ? 'warning' : 'danger';
const progressClass = summary.average >= 80 ? '' : summary.average >= 60 ? 'coverage-medium' : 'coverage-low';
let html = `
<div class="section">
<h2>📊 Coverage Report</h2>
<div class="stats">
<div class="stat-card">
<div class="value ${coverageClass}">${summary.average.toFixed(1)}%</div>
<div class="label">Average Coverage</div>
</div>
<div class="stat-card">
<div class="value">${summary.lines.toFixed(1)}%</div>
<div class="label">Lines</div>
</div>
<div class="stat-card">
<div class="value">${summary.statements.toFixed(1)}%</div>
<div class="label">Statements</div>
</div>
<div class="stat-card">
<div class="value">${summary.functions.toFixed(1)}%</div>
<div class="label">Functions</div>
</div>
<div class="stat-card">
<div class="value">${summary.branches.toFixed(1)}%</div>
<div class="label">Branches</div>
</div>
</div>
<div class="progress-bar">
<div class="progress-fill ${progressClass}" style="width: ${summary.average}%"></div>
</div>`;
const lowCoverageFiles = files.filter(f => f.lines < 80).slice(0, 10);
if (lowCoverageFiles.length > 0) {
html += `
<h3>Files with Low Coverage</h3>
<table>
<thead>
<tr>
<th>File</th>
<th>Lines</th>
<th>Statements</th>
<th>Functions</th>
<th>Branches</th>
</tr>
</thead>
<tbody>`;
for (const file of lowCoverageFiles) {
const fileName = file.path.split('/').pop();
html += `
<tr>
<td>${fileName}</td>
<td class="${file.lines < 50 ? 'danger' : file.lines < 80 ? 'warning' : ''}">${file.lines.toFixed(1)}%</td>
<td>${file.statements.toFixed(1)}%</td>
<td>${file.functions.toFixed(1)}%</td>
<td>${file.branches.toFixed(1)}%</td>
</tr>`;
}
html += `
</tbody>
</table>`;
}
html += `</div>`;
return html;
}
generateBenchmarkHtml() {
if (!this.results.benchmarks || this.results.benchmarks.results.length === 0) return '';
let html = `
<div class="section">
<h2>⚡ Benchmark Results</h2>
<table>
<thead>
<tr>
<th>Benchmark</th>
<th>Operations/sec</th>
<th>Mean Time (ms)</th>
<th>Min (ms)</th>
<th>Max (ms)</th>
<th>Samples</th>
</tr>
</thead>
<tbody>`;
for (const bench of this.results.benchmarks.results.slice(0, 20)) {
const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
const meanFormatted = (bench.mean * 1000).toFixed(3);
const minFormatted = (bench.min * 1000).toFixed(3);
const maxFormatted = (bench.max * 1000).toFixed(3);
html += `
<tr>
<td>${bench.name}</td>
<td><strong>${opsFormatted}</strong></td>
<td>${meanFormatted}</td>
<td>${minFormatted}</td>
<td>${maxFormatted}</td>
<td>${bench.samples}</td>
</tr>`;
}
html += `
</tbody>
</table>`;
if (this.results.benchmarks.results.length > 20) {
html += `<p><em>Showing top 20 of ${this.results.benchmarks.results.length} benchmarks</em></p>`;
}
html += `</div>`;
return html;
}
escapeHtml(text) {
const map = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": '''
};
return text.replace(/[&<>"']/g, m => map[m]);
}
async generate() {
// Load all results
this.loadTestResults();
this.loadCoverageResults();
this.loadBenchmarkResults();
// Ensure output directory exists
const outputDir = resolve(process.cwd(), 'test-reports');
if (!existsSync(outputDir)) {
mkdirSync(outputDir, { recursive: true });
}
// Generate reports in different formats
const markdownReport = this.generateMarkdownReport();
const jsonReport = this.generateJsonReport();
const htmlReport = this.generateHtmlReport();
// Write reports
writeFileSync(resolve(outputDir, 'report.md'), markdownReport);
writeFileSync(resolve(outputDir, 'report.json'), jsonReport);
writeFileSync(resolve(outputDir, 'report.html'), htmlReport);
console.log('Test reports generated successfully:');
console.log('- test-reports/report.md');
console.log('- test-reports/report.json');
console.log('- test-reports/report.html');
}
}
// Run the generator
const generator = new TestReportGenerator();
generator.generate().catch(console.error);
```
--------------------------------------------------------------------------------
/docs/N8N_DEPLOYMENT.md:
--------------------------------------------------------------------------------
```markdown
# n8n-MCP Deployment Guide
This guide covers how to deploy n8n-MCP and connect it to your n8n instance. Whether you're testing locally or deploying to production, we'll show you how to set up n8n-MCP for use with n8n's MCP Client Tool node.
## Table of Contents
- [Overview](#overview)
- [Local Testing](#local-testing)
- [Production Deployment](#production-deployment)
- [Same Server as n8n](#same-server-as-n8n)
- [Different Server (Cloud Deployment)](#different-server-cloud-deployment)
- [Connecting n8n to n8n-MCP](#connecting-n8n-to-n8n-mcp)
- [Security & Best Practices](#security--best-practices)
- [Troubleshooting](#troubleshooting)
## Overview
n8n-MCP is a Model Context Protocol server that provides AI assistants with comprehensive access to n8n node documentation and management capabilities. When connected to n8n via the MCP Client Tool node, it enables:
- AI-powered workflow creation and validation
- Access to documentation for 500+ n8n nodes
- Workflow management through the n8n API
- Real-time configuration validation
## Local Testing
### Quick Test Script
Test n8n-MCP locally with the provided test script:
```bash
# Clone the repository
git clone https://github.com/czlonkowski/n8n-mcp.git
cd n8n-mcp
# Build the project
npm install
npm run build
# Run the integration test script
./scripts/test-n8n-integration.sh
```
This script will:
1. Start a real n8n instance in Docker
2. Start n8n-MCP server configured for n8n
3. Guide you through API key setup for workflow management
4. Test the complete integration between n8n and n8n-MCP
### Manual Local Setup
For development or custom testing:
1. **Prerequisites**:
- n8n instance running (local or remote)
- n8n API key (from n8n Settings → API)
2. **Start n8n-MCP**:
```bash
# Set environment variables
export N8N_MODE=true
export MCP_MODE=http # Required for HTTP mode
export N8N_API_URL=http://localhost:5678 # Your n8n instance URL
export N8N_API_KEY=your-api-key-here # Your n8n API key
export MCP_AUTH_TOKEN=test-token-minimum-32-chars-long
export AUTH_TOKEN=test-token-minimum-32-chars-long # Same value as MCP_AUTH_TOKEN
export PORT=3001
# Start the server
npm start
```
3. **Verify it's running**:
```bash
# Check health
curl http://localhost:3001/health
# Check MCP protocol endpoint (this is the endpoint n8n connects to)
curl http://localhost:3001/mcp
# Should return: {"protocolVersion":"2024-11-05"} for n8n compatibility
```
## Environment Variables Reference
| Variable | Required | Description | Example Value |
|----------|----------|-------------|---------------|
| `N8N_MODE` | Yes | Enables n8n integration mode | `true` |
| `MCP_MODE` | Yes | Enables HTTP mode for n8n MCP Client | `http` |
| `N8N_API_URL` | Yes* | URL of your n8n instance | `http://localhost:5678` |
| `N8N_API_KEY` | Yes* | n8n API key for workflow management | `n8n_api_xxx...` |
| `MCP_AUTH_TOKEN` | Yes | Authentication token for MCP requests (min 32 chars) | `secure-random-32-char-token` |
| `AUTH_TOKEN` | Yes | **MUST match MCP_AUTH_TOKEN exactly** | `secure-random-32-char-token` |
| `PORT` | No | Port for the HTTP server | `3000` (default) |
| `LOG_LEVEL` | No | Logging verbosity | `info`, `debug`, `error` |
*Required only for workflow management features. Documentation tools work without these.
## Docker Build Changes (v2.9.2+)
Starting with version 2.9.2, we use a single optimized Dockerfile for all deployments:
- The previous `Dockerfile.n8n` has been removed as redundant
- N8N_MODE functionality is enabled via the `N8N_MODE=true` environment variable
- This reduces image size by 500MB+ and improves build times from 8+ minutes to 1-2 minutes
- All examples now use the standard `Dockerfile`
## Production Deployment
> **⚠️ Critical**: Docker caches images locally. Always run `docker pull ghcr.io/czlonkowski/n8n-mcp:latest` before deploying to ensure you have the latest version. This simple step prevents most deployment issues.
### Same Server as n8n
If you're running n8n-MCP on the same server as your n8n instance:
### Using Pre-built Image (Recommended)
The pre-built images are automatically updated with each release and are the easiest way to get started.
**IMPORTANT**: Always pull the latest image to avoid using cached versions:
```bash
# ALWAYS pull the latest image first
docker pull ghcr.io/czlonkowski/n8n-mcp:latest
# Generate a secure token (save this!)
AUTH_TOKEN=$(openssl rand -hex 32)
echo "Your AUTH_TOKEN: $AUTH_TOKEN"
# Create a Docker network if n8n uses one
docker network create n8n-net
# Run n8n-MCP container
docker run -d \
--name n8n-mcp \
--network n8n-net \
-p 3000:3000 \
-e N8N_MODE=true \
-e MCP_MODE=http \
-e N8N_API_URL=http://n8n:5678 \
-e N8N_API_KEY=your-n8n-api-key \
-e MCP_AUTH_TOKEN=$AUTH_TOKEN \
-e AUTH_TOKEN=$AUTH_TOKEN \
-e LOG_LEVEL=info \
--restart unless-stopped \
ghcr.io/czlonkowski/n8n-mcp:latest
```
### Building from Source (Advanced Users)
Only build from source if you need custom modifications or are contributing to development:
```bash
# Clone and build
git clone https://github.com/czlonkowski/n8n-mcp.git
cd n8n-mcp
# Build Docker image
docker build -t n8n-mcp:latest .
# Run using your local image
docker run -d \
--name n8n-mcp \
-p 3000:3000 \
-e N8N_MODE=true \
-e MCP_MODE=http \
-e MCP_AUTH_TOKEN=$(openssl rand -hex 32) \
-e AUTH_TOKEN=$(openssl rand -hex 32) \
# ... other settings
n8n-mcp:latest
```
### Using systemd (for native installation)
```bash
# Create service file
sudo cat > /etc/systemd/system/n8n-mcp.service << EOF
[Unit]
Description=n8n-MCP Server
After=network.target
[Service]
Type=simple
User=nodejs
WorkingDirectory=/opt/n8n-mcp
Environment="N8N_MODE=true"
Environment="MCP_MODE=http"
Environment="N8N_API_URL=http://localhost:5678"
Environment="N8N_API_KEY=your-n8n-api-key"
Environment="MCP_AUTH_TOKEN=your-secure-token-32-chars-min"
Environment="AUTH_TOKEN=your-secure-token-32-chars-min"
Environment="PORT=3000"
ExecStart=/usr/bin/node /opt/n8n-mcp/dist/mcp/index.js
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# Enable and start
sudo systemctl enable n8n-mcp
sudo systemctl start n8n-mcp
```
### Different Server (Cloud Deployment)
Deploy n8n-MCP on a separate server from your n8n instance:
#### Quick Docker Deployment (Recommended)
**Always pull the latest image to ensure you have the current version:**
```bash
# On your cloud server (Hetzner, AWS, DigitalOcean, etc.)
# ALWAYS pull the latest image first
docker pull ghcr.io/czlonkowski/n8n-mcp:latest
# Generate auth tokens
AUTH_TOKEN=$(openssl rand -hex 32)
echo "Save this AUTH_TOKEN: $AUTH_TOKEN"
# Run the container
docker run -d \
--name n8n-mcp \
-p 3000:3000 \
-e N8N_MODE=true \
-e MCP_MODE=http \
-e N8N_API_URL=https://your-n8n-instance.com \
-e N8N_API_KEY=your-n8n-api-key \
-e MCP_AUTH_TOKEN=$AUTH_TOKEN \
-e AUTH_TOKEN=$AUTH_TOKEN \
-e LOG_LEVEL=info \
--restart unless-stopped \
ghcr.io/czlonkowski/n8n-mcp:latest
```
#### Building from Source (Advanced)
Only needed if you're modifying the code:
```bash
# Clone and build
git clone https://github.com/czlonkowski/n8n-mcp.git
cd n8n-mcp
docker build -t n8n-mcp:latest .
# Run using local image
docker run -d \
--name n8n-mcp \
-p 3000:3000 \
# ... same environment variables as above
n8n-mcp:latest
```
#### Full Production Setup (Hetzner/AWS/DigitalOcean)
1. **Server Requirements**:
- **Minimal**: 1 vCPU, 1GB RAM (CX11 on Hetzner)
- **Recommended**: 2 vCPU, 2GB RAM
- **OS**: Ubuntu 22.04 LTS
2. **Initial Setup**:
```bash
# SSH into your server
ssh root@your-server-ip
# Update and install Docker
apt update && apt upgrade -y
curl -fsSL https://get.docker.com | sh
```
3. **Deploy n8n-MCP with SSL** (using Caddy for automatic HTTPS):
**Using Docker Compose (Recommended)**
```bash
# Create docker-compose.yml
cat > docker-compose.yml << 'EOF'
version: '3.8'
services:
n8n-mcp:
image: ghcr.io/czlonkowski/n8n-mcp:latest
pull_policy: always # Always pull latest image
container_name: n8n-mcp
restart: unless-stopped
environment:
- N8N_MODE=true
- MCP_MODE=http
- N8N_API_URL=${N8N_API_URL}
- N8N_API_KEY=${N8N_API_KEY}
- MCP_AUTH_TOKEN=${MCP_AUTH_TOKEN}
- AUTH_TOKEN=${AUTH_TOKEN}
- PORT=3000
- LOG_LEVEL=info
networks:
- web
caddy:
image: caddy:2-alpine
container_name: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
networks:
- web
networks:
web:
driver: bridge
volumes:
caddy_data:
caddy_config:
EOF
```
**Note**: The `pull_policy: always` ensures you always get the latest version.
**Building from Source (if needed)**
```bash
# Only if you need custom modifications
git clone https://github.com/czlonkowski/n8n-mcp.git
cd n8n-mcp
docker build -t n8n-mcp:local .
# Then update docker-compose.yml to use:
# image: n8n-mcp:local
container_name: n8n-mcp
restart: unless-stopped
environment:
- N8N_MODE=true
- MCP_MODE=http
- N8N_API_URL=${N8N_API_URL}
- N8N_API_KEY=${N8N_API_KEY}
- MCP_AUTH_TOKEN=${MCP_AUTH_TOKEN}
- AUTH_TOKEN=${AUTH_TOKEN}
- PORT=3000
- LOG_LEVEL=info
networks:
- web
caddy:
image: caddy:2-alpine
container_name: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./Caddyfile:/etc/caddy/Caddyfile
- caddy_data:/data
- caddy_config:/config
networks:
- web
networks:
web:
driver: bridge
volumes:
caddy_data:
caddy_config:
EOF
```
**Complete the Setup**
```bash
# Create Caddyfile
cat > Caddyfile << 'EOF'
mcp.yourdomain.com {
reverse_proxy n8n-mcp:3000
}
EOF
# Create .env file
AUTH_TOKEN=$(openssl rand -hex 32)
cat > .env << EOF
N8N_API_URL=https://your-n8n-instance.com
N8N_API_KEY=your-n8n-api-key-here
MCP_AUTH_TOKEN=$AUTH_TOKEN
AUTH_TOKEN=$AUTH_TOKEN
EOF
# Save the AUTH_TOKEN!
echo "Your AUTH_TOKEN is: $AUTH_TOKEN"
echo "Save this token - you'll need it in n8n MCP Client Tool configuration"
# Start services
docker compose up -d
```
#### Cloud Provider Tips
**AWS EC2**:
- Security Group: Open port 3000 (or 443 with HTTPS)
- Instance Type: t3.micro is sufficient
- Use Elastic IP for stable addressing
**DigitalOcean**:
- Droplet: Basic ($6/month) is enough
- Enable backups for production use
**Google Cloud**:
- Machine Type: e2-micro (free tier eligible)
- Use Cloud Load Balancer for SSL
## Connecting n8n to n8n-MCP
### Configure n8n MCP Client Tool
1. **In your n8n workflow**, add the **MCP Client Tool** node
2. **Configure the connection**:
```
Server URL (MUST include /mcp endpoint):
- Same server: http://localhost:3000/mcp
- Docker network: http://n8n-mcp:3000/mcp
- Different server: https://mcp.yourdomain.com/mcp
Auth Token: [Your MCP_AUTH_TOKEN/AUTH_TOKEN value]
Transport: HTTP Streamable (SSE)
```
⚠️ **Critical**: The Server URL must include the `/mcp` endpoint path. Without this, the connection will fail.
3. **Test the connection** by selecting a simple tool like `list_nodes`
### Available Tools
Once connected, you can use these MCP tools in n8n:
**Documentation Tools** (No API key required):
- `list_nodes` - List all n8n nodes with filtering
- `search_nodes` - Search nodes by keyword
- `get_node_info` - Get detailed node information
- `get_node_essentials` - Get only essential properties
- `validate_workflow` - Validate workflow configurations
- `get_node_documentation` - Get human-readable docs
**Management Tools** (Requires n8n API key):
- `n8n_create_workflow` - Create new workflows
- `n8n_update_workflow` - Update existing workflows
- `n8n_get_workflow` - Retrieve workflow details
- `n8n_list_workflows` - List all workflows
- `n8n_trigger_webhook_workflow` - Trigger webhook workflows
### Using with AI Agents
Connect n8n-MCP to AI Agent nodes for intelligent automation:
1. **Add an AI Agent node** (e.g., OpenAI, Anthropic)
2. **Connect MCP Client Tool** to the Agent's tool input
3. **Configure prompts** for workflow creation:
```
You are an n8n workflow expert. Use the MCP tools to:
1. Search for appropriate nodes using search_nodes
2. Get configuration details with get_node_essentials
3. Validate configurations with validate_workflow
4. Create the workflow if all validations pass
```
## Security & Best Practices
### Authentication
- **MCP_AUTH_TOKEN**: Always use a strong, random token (32+ characters)
- **N8N_API_KEY**: Only required for workflow management features
- Store tokens in environment variables or secure vaults
### Network Security
- **Use HTTPS** in production (Caddy/Nginx/Traefik)
- **Firewall**: Only expose necessary ports (3000 or 443)
- **IP Whitelisting**: Consider restricting access to known n8n instances
### Docker Security
- **Always pull latest images**: Docker caches images locally, so run `docker pull` before deployment
- Run containers with `--read-only` flag if possible
- Use specific image versions instead of `:latest` in production
- Regular updates: `docker pull ghcr.io/czlonkowski/n8n-mcp:latest`
## Troubleshooting
### Docker Image Issues
**Using Outdated Cached Images**
- **Symptom**: Missing features, old bugs reappearing, features not working as documented
- **Cause**: Docker uses locally cached images instead of pulling the latest version
- **Solution**: Always run `docker pull ghcr.io/czlonkowski/n8n-mcp:latest` before deployment
- **Verification**: Check image age with `docker images | grep n8n-mcp`
### Common Configuration Issues
**Missing `MCP_MODE=http` Environment Variable**
- **Symptom**: n8n MCP Client Tool cannot connect, server doesn't respond on `/mcp` endpoint
- **Solution**: Add `MCP_MODE=http` to your environment variables
- **Why**: Without this, the server runs in stdio mode which is incompatible with n8n
**Server URL Missing `/mcp` Endpoint**
- **Symptom**: "Connection refused" or "Invalid response" in n8n MCP Client Tool
- **Solution**: Ensure your Server URL includes `/mcp` (e.g., `http://localhost:3000/mcp`)
- **Why**: n8n connects to the `/mcp` endpoint specifically, not the root URL
**Mismatched Auth Tokens**
- **Symptom**: "Authentication failed" or "Invalid auth token"
- **Solution**: Ensure both `MCP_AUTH_TOKEN` and `AUTH_TOKEN` have the same value
- **Why**: Both variables must match for proper authentication
### Connection Issues
**"Connection refused" in n8n MCP Client Tool**
1. **Check n8n-MCP is running**:
```bash
# Docker
docker ps | grep n8n-mcp
docker logs n8n-mcp --tail 20
# Systemd
systemctl status n8n-mcp
journalctl -u n8n-mcp --tail 20
```
2. **Verify endpoints are accessible**:
```bash
# Health check (should return status info)
curl http://your-server:3000/health
# MCP endpoint (should return protocol version)
curl http://your-server:3000/mcp
```
3. **Check firewall and networking**:
```bash
# Test port accessibility from n8n server
telnet your-mcp-server 3000
# Check firewall rules (Ubuntu/Debian)
sudo ufw status
# Check if port is bound correctly
netstat -tlnp | grep :3000
```
**"Invalid auth token" or "Authentication failed"**
1. **Verify token format**:
```bash
# Check token length (should be 64 chars for hex-32)
echo $MCP_AUTH_TOKEN | wc -c
# Verify both tokens match
echo "MCP_AUTH_TOKEN: $MCP_AUTH_TOKEN"
echo "AUTH_TOKEN: $AUTH_TOKEN"
```
2. **Common token issues**:
- Token too short (minimum 32 characters)
- Extra whitespace or newlines in token
- Different values for `MCP_AUTH_TOKEN` and `AUTH_TOKEN`
- Special characters not properly escaped in environment files
**"Cannot connect to n8n API"**
1. **Verify n8n configuration**:
```bash
# Test n8n API accessibility
curl -H "X-N8N-API-KEY: your-api-key" \
https://your-n8n-instance.com/api/v1/workflows
```
2. **Common n8n API issues**:
- `N8N_API_URL` missing protocol (http:// or https://)
- n8n API key expired or invalid
- n8n instance not accessible from n8n-MCP server
- n8n API disabled in settings
### Version Compatibility Issues
**"Features Not Working as Expected"**
- **Symptom**: Missing features, old bugs, or compatibility issues
- **Solution**: Pull the latest image: `docker pull ghcr.io/czlonkowski/n8n-mcp:latest`
- **Check**: Verify image date with `docker inspect ghcr.io/czlonkowski/n8n-mcp:latest | grep Created`
**"Protocol version mismatch"**
- n8n-MCP automatically uses version 2024-11-05 for n8n compatibility
- Update to latest n8n-MCP version if issues persist
- Verify `/mcp` endpoint returns correct version
### Environment Variable Issues
**Complete Environment Variable Checklist**:
```bash
# Required for all deployments
export N8N_MODE=true # Enables n8n integration
export MCP_MODE=http # Enables HTTP mode for n8n
export MCP_AUTH_TOKEN=your-secure-32-char-token # Auth token
export AUTH_TOKEN=your-secure-32-char-token # Same value as MCP_AUTH_TOKEN
# Required for workflow management features
export N8N_API_URL=https://your-n8n-instance.com # Your n8n URL
export N8N_API_KEY=your-n8n-api-key # Your n8n API key
# Optional
export PORT=3000 # HTTP port (default: 3000)
export LOG_LEVEL=info # Logging level
```
### Docker-Specific Issues
**Container Build Failures**
```bash
# Clear Docker cache and rebuild
docker system prune -f
docker build --no-cache -t n8n-mcp:latest .
```
**Container Runtime Issues**
```bash
# Check container logs for detailed errors
docker logs n8n-mcp -f --timestamps
# Inspect container environment
docker exec n8n-mcp env | grep -E "(N8N|MCP|AUTH)"
# Test container connectivity
docker exec n8n-mcp curl -f http://localhost:3000/health
```
### Network and SSL Issues
**HTTPS/SSL Problems**
```bash
# Test SSL certificate
openssl s_client -connect mcp.yourdomain.com:443
# Check Caddy logs
docker logs caddy -f --tail 50
```
**Docker Network Issues**
```bash
# Check if containers can communicate
docker network ls
docker network inspect bridge
# Test inter-container connectivity
docker exec n8n curl http://n8n-mcp:3000/health
```
### Debugging Steps
1. **Enable comprehensive logging**:
```bash
# For Docker
docker run -d \
--name n8n-mcp \
-e DEBUG_MCP=true \
-e LOG_LEVEL=debug \
-e N8N_MODE=true \
-e MCP_MODE=http \
# ... other settings
# For systemd, add to service file:
Environment="DEBUG_MCP=true"
Environment="LOG_LEVEL=debug"
```
2. **Test all endpoints systematically**:
```bash
# 1. Health check (basic server functionality)
curl -v http://localhost:3000/health
# 2. MCP protocol endpoint (what n8n connects to)
curl -v http://localhost:3000/mcp
# 3. Test authentication (if working, returns tools list)
curl -X POST http://localhost:3000/mcp \
-H "Authorization: Bearer YOUR_AUTH_TOKEN" \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"tools/list","id":1}'
# 4. Test a simple tool (documentation only, no n8n API needed)
curl -X POST http://localhost:3000/mcp \
-H "Authorization: Bearer YOUR_AUTH_TOKEN" \
-H "Content-Type: application/json" \
-d '{"jsonrpc":"2.0","method":"tools/call","params":{"name":"get_database_statistics","arguments":{}},"id":2}'
```
3. **Common log patterns to look for**:
```bash
# Success patterns
grep "Server started" /var/log/n8n-mcp.log
grep "Protocol version" /var/log/n8n-mcp.log
# Error patterns
grep -i "error\|failed\|invalid" /var/log/n8n-mcp.log
grep -i "auth\|token" /var/log/n8n-mcp.log
grep -i "connection\|network" /var/log/n8n-mcp.log
```
### Getting Help
If you're still experiencing issues:
1. **Gather diagnostic information**:
```bash
# System info
docker --version
docker-compose --version
uname -a
# n8n-MCP version
docker exec n8n-mcp node dist/index.js --version
# Environment check
docker exec n8n-mcp env | grep -E "(N8N|MCP|AUTH)" | sort
# Container status
docker ps | grep n8n-mcp
docker stats n8n-mcp --no-stream
```
2. **Create a minimal test setup**:
```bash
# Test with minimal configuration
docker run -d \
--name n8n-mcp-test \
-p 3001:3000 \
-e N8N_MODE=true \
-e MCP_MODE=http \
-e MCP_AUTH_TOKEN=test-token-minimum-32-chars-long \
-e AUTH_TOKEN=test-token-minimum-32-chars-long \
-e LOG_LEVEL=debug \
n8n-mcp:latest
# Test basic functionality
curl http://localhost:3001/health
curl http://localhost:3001/mcp
```
3. **Report issues**: Include the diagnostic information when opening an issue on [GitHub](https://github.com/czlonkowski/n8n-mcp/issues)
## Performance Tips
- **Minimal deployment**: 1 vCPU, 1GB RAM is sufficient
- **Database**: Pre-built SQLite database (~15MB) loads quickly
- **Response time**: Average 12ms for queries
- **Caching**: Built-in 15-minute cache for repeated queries
## Next Steps
- Test your setup with the [MCP Client Tool in n8n](https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.mcpclienttool/)
- Explore [available MCP tools](../README.md#-available-mcp-tools)
- Build AI-powered workflows with [AI Agent nodes](https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmagent/)
- Join the [n8n Community](https://community.n8n.io) for ideas and support
---
Need help? Open an issue on [GitHub](https://github.com/czlonkowski/n8n-mcp/issues) or check the [n8n forums](https://community.n8n.io)
```
--------------------------------------------------------------------------------
/tests/unit/mappers/docs-mapper.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { DocsMapper } from '@/mappers/docs-mapper';
import { promises as fs } from 'fs';
import path from 'path';
// Mock fs promises
vi.mock('fs', () => ({
promises: {
readFile: vi.fn()
}
}));
// Mock process.cwd()
const originalCwd = process.cwd;
beforeEach(() => {
process.cwd = vi.fn(() => '/mocked/path');
});
afterEach(() => {
process.cwd = originalCwd;
vi.clearAllMocks();
});
describe('DocsMapper', () => {
let docsMapper: DocsMapper;
let consoleLogSpy: any;
beforeEach(() => {
docsMapper = new DocsMapper();
consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
});
afterEach(() => {
consoleLogSpy.mockRestore();
});
describe('fetchDocumentation', () => {
describe('successful documentation fetch', () => {
it('should fetch documentation for httpRequest node', async () => {
const mockContent = '# HTTP Request Node\n\nDocumentation content';
vi.mocked(fs.readFile).mockResolvedValueOnce(mockContent);
const result = await docsMapper.fetchDocumentation('httpRequest');
expect(result).toBe(mockContent);
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('httprequest.md'),
'utf-8'
);
expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: httpRequest -> httprequest');
expect(consoleLogSpy).toHaveBeenCalledWith(expect.stringContaining('✓ Found docs at:'));
});
it('should apply known fixes for node types', async () => {
const mockContent = '# Webhook Node\n\nDocumentation';
vi.mocked(fs.readFile).mockResolvedValueOnce(mockContent);
const result = await docsMapper.fetchDocumentation('webhook');
expect(result).toBe(mockContent);
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('webhook.md'),
'utf-8'
);
});
it('should handle node types with package prefix', async () => {
const mockContent = '# Code Node\n\nDocumentation';
vi.mocked(fs.readFile).mockResolvedValueOnce(mockContent);
const result = await docsMapper.fetchDocumentation('n8n-nodes-base.code');
expect(result).toBe(mockContent);
expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: n8n-nodes-base.code -> code');
});
it('should try multiple paths until finding documentation', async () => {
const mockContent = '# Slack Node\n\nDocumentation';
// First few attempts fail
vi.mocked(fs.readFile)
.mockRejectedValueOnce(new Error('Not found'))
.mockRejectedValueOnce(new Error('Not found'))
.mockResolvedValueOnce(mockContent);
const result = await docsMapper.fetchDocumentation('slack');
expect(result).toBe(mockContent);
expect(fs.readFile).toHaveBeenCalledTimes(3);
});
it('should check directory paths with index.md', async () => {
const mockContent = '# Complex Node\n\nDocumentation';
// Simulate finding in a directory structure - reject enough times to reach index.md paths
vi.mocked(fs.readFile)
.mockRejectedValueOnce(new Error('Not found')) // core-nodes direct
.mockRejectedValueOnce(new Error('Not found')) // app-nodes direct
.mockRejectedValueOnce(new Error('Not found')) // trigger-nodes direct
.mockRejectedValueOnce(new Error('Not found')) // langchain root direct
.mockRejectedValueOnce(new Error('Not found')) // langchain sub direct
.mockResolvedValueOnce(mockContent); // Found in directory/index.md
const result = await docsMapper.fetchDocumentation('complexNode');
expect(result).toBe(mockContent);
// Check that it eventually tried an index.md path
expect(fs.readFile).toHaveBeenCalledTimes(6);
const calls = vi.mocked(fs.readFile).mock.calls;
const indexCalls = calls.filter(call => (call[0] as string).includes('index.md'));
expect(indexCalls.length).toBeGreaterThan(0);
});
});
describe('documentation not found', () => {
it('should return null when documentation is not found', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('ENOENT: no such file'));
const result = await docsMapper.fetchDocumentation('nonExistentNode');
expect(result).toBeNull();
expect(consoleLogSpy).toHaveBeenCalledWith(' ✗ No docs found for nonexistentnode');
});
it('should return null for empty node type', async () => {
const result = await docsMapper.fetchDocumentation('');
expect(result).toBeNull();
expect(consoleLogSpy).toHaveBeenCalledWith('⚠️ Could not extract node name from: ');
});
it('should handle invalid node type format', async () => {
const result = await docsMapper.fetchDocumentation('.');
expect(result).toBeNull();
expect(consoleLogSpy).toHaveBeenCalledWith('⚠️ Could not extract node name from: .');
});
});
describe('path construction', () => {
it('should construct correct paths for core nodes', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
await docsMapper.fetchDocumentation('testNode');
// Check that it tried core-nodes path
expect(fs.readFile).toHaveBeenCalledWith(
path.join('/mocked/path', 'n8n-docs', 'docs/integrations/builtin/core-nodes/n8n-nodes-base.testnode.md'),
'utf-8'
);
});
it('should construct correct paths for app nodes', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
await docsMapper.fetchDocumentation('appNode');
// Check that it tried app-nodes path
expect(fs.readFile).toHaveBeenCalledWith(
path.join('/mocked/path', 'n8n-docs', 'docs/integrations/builtin/app-nodes/n8n-nodes-base.appnode.md'),
'utf-8'
);
});
it('should construct correct paths for trigger nodes', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
await docsMapper.fetchDocumentation('triggerNode');
// Check that it tried trigger-nodes path
expect(fs.readFile).toHaveBeenCalledWith(
path.join('/mocked/path', 'n8n-docs', 'docs/integrations/builtin/trigger-nodes/n8n-nodes-base.triggernode.md'),
'utf-8'
);
});
it('should construct correct paths for langchain nodes', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
await docsMapper.fetchDocumentation('aiNode');
// Check that it tried langchain paths
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('cluster-nodes/root-nodes/n8n-nodes-langchain.ainode'),
'utf-8'
);
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('cluster-nodes/sub-nodes/n8n-nodes-langchain.ainode'),
'utf-8'
);
});
});
describe('error handling', () => {
it('should handle file system errors gracefully', async () => {
const customError = new Error('Permission denied');
vi.mocked(fs.readFile).mockRejectedValue(customError);
const result = await docsMapper.fetchDocumentation('testNode');
expect(result).toBeNull();
// Should have tried all possible paths
expect(fs.readFile).toHaveBeenCalledTimes(10); // 5 direct paths + 5 directory paths
});
it('should handle non-Error exceptions', async () => {
vi.mocked(fs.readFile).mockRejectedValue('String error');
const result = await docsMapper.fetchDocumentation('testNode');
expect(result).toBeNull();
});
});
describe('KNOWN_FIXES mapping', () => {
it('should apply fix for httpRequest', async () => {
vi.mocked(fs.readFile).mockResolvedValueOnce('content');
await docsMapper.fetchDocumentation('httpRequest');
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('httprequest.md'),
'utf-8'
);
});
it('should apply fix for respondToWebhook', async () => {
vi.mocked(fs.readFile).mockResolvedValueOnce('content');
await docsMapper.fetchDocumentation('respondToWebhook');
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('respondtowebhook.md'),
'utf-8'
);
});
it('should preserve casing for unknown nodes', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
await docsMapper.fetchDocumentation('CustomNode');
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('customnode.md'), // toLowerCase applied
'utf-8'
);
});
});
describe('logging', () => {
it('should log search progress', async () => {
vi.mocked(fs.readFile).mockResolvedValueOnce('content');
await docsMapper.fetchDocumentation('testNode');
expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: testNode -> testnode');
expect(consoleLogSpy).toHaveBeenCalledWith(expect.stringContaining('✓ Found docs at:'));
});
it('should log when documentation is not found', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
await docsMapper.fetchDocumentation('missingNode');
expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: missingNode -> missingnode');
expect(consoleLogSpy).toHaveBeenCalledWith(' ✗ No docs found for missingnode');
});
});
describe('edge cases', () => {
it('should handle very long node names', async () => {
const longNodeName = 'a'.repeat(100);
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
const result = await docsMapper.fetchDocumentation(longNodeName);
expect(result).toBeNull();
expect(fs.readFile).toHaveBeenCalled();
});
it('should handle node names with special characters', async () => {
vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
const result = await docsMapper.fetchDocumentation('node-with-dashes_and_underscores');
expect(result).toBeNull();
expect(fs.readFile).toHaveBeenCalledWith(
expect.stringContaining('node-with-dashes_and_underscores.md'),
'utf-8'
);
});
it('should handle multiple dots in node type', async () => {
vi.mocked(fs.readFile).mockResolvedValueOnce('content');
const result = await docsMapper.fetchDocumentation('com.example.nodes.custom');
expect(result).toBe('content');
expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: com.example.nodes.custom -> custom');
});
});
});
describe('enhanceLoopNodeDocumentation - SplitInBatches', () => {
it('should enhance SplitInBatches documentation with output guidance', async () => {
const originalContent = `# Split In Batches Node
This node splits data into batches.
## When to use
Use this node when you need to process large datasets in smaller chunks.
## Parameters
- batchSize: Number of items per batch
`;
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).not.toBeNull();
expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(result!).toContain('⚠️ OUTPUT INDICES ARE COUNTERINTUITIVE ⚠️');
expect(result!).toContain('Output 0 (index 0) = "done"');
expect(result!).toContain('Output 1 (index 1) = "loop"');
expect(result!).toContain('Correct Connection Pattern:');
expect(result!).toContain('Common Mistake:');
expect(result!).toContain('AI assistants often connect these backwards');
// Should insert before "When to use" section
const insertionIndex = result!.indexOf('## When to use');
const guidanceIndex = result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(guidanceIndex).toBeLessThan(insertionIndex);
expect(guidanceIndex).toBeGreaterThan(0);
});
it('should enhance SplitInBatches documentation when no "When to use" section exists', async () => {
const originalContent = `# Split In Batches Node
This node splits data into batches.
## Parameters
- batchSize: Number of items per batch
`;
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).not.toBeNull();
expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
// Should be inserted at the beginning since no "When to use" section
expect(result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION')).toBeLessThan(
result!.indexOf('# Split In Batches Node')
);
});
it('should handle splitInBatches in various node type formats', async () => {
const testCases = [
'splitInBatches',
'n8n-nodes-base.splitInBatches',
'nodes-base.splitInBatches'
];
for (const nodeType of testCases) {
const originalContent = '# Split In Batches\nOriginal content';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation(nodeType);
expect(result).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(result).toContain('Output 0 (index 0) = "done"');
}
});
it('should provide specific guidance for correct connection patterns', async () => {
const originalContent = '# Split In Batches\n## When to use\nContent';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).toContain('Connect nodes that PROCESS items inside the loop to **Output 1 ("loop")**');
expect(result).toContain('Connect nodes that run AFTER the loop completes to **Output 0 ("done")**');
expect(result).toContain('The last processing node in the loop must connect back to the SplitInBatches node');
});
it('should explain the common AI assistant mistake', async () => {
const originalContent = '# Split In Batches\n## When to use\nContent';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).toContain('AI assistants often connect these backwards');
expect(result).toContain('logical flow (loop first, then done) doesn\'t match the technical indices (done=0, loop=1)');
});
it('should not enhance non-splitInBatches nodes with loop guidance', async () => {
const originalContent = '# HTTP Request Node\nContent';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('httpRequest');
expect(result).not.toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(result).not.toContain('counterintuitive');
expect(result).toBe(originalContent); // Should be unchanged
});
});
describe('enhanceLoopNodeDocumentation - IF node', () => {
it('should enhance IF node documentation with output guidance', async () => {
const originalContent = `# IF Node
Route items based on conditions.
## Node parameters
Configure your conditions here.
`;
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('n8n-nodes-base.if');
expect(result).not.toBeNull();
expect(result!).toContain('Output Connection Information');
expect(result!).toContain('Output 0 (index 0) = "true"');
expect(result!).toContain('Output 1 (index 1) = "false"');
expect(result!).toContain('Items that match the condition');
expect(result!).toContain('Items that do not match the condition');
// Should insert before "Node parameters" section
const parametersIndex = result!.indexOf('## Node parameters');
const outputInfoIndex = result!.indexOf('Output Connection Information');
expect(outputInfoIndex).toBeLessThan(parametersIndex);
expect(outputInfoIndex).toBeGreaterThan(0);
});
it('should handle IF node when no "Node parameters" section exists', async () => {
const originalContent = `# IF Node
Route items based on conditions.
## Usage
Use this node to route data.
`;
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('n8n-nodes-base.if');
// When no "Node parameters" section exists, no enhancement is applied
expect(result).toBe(originalContent);
});
it('should handle various IF node type formats', async () => {
const testCases = [
'if',
'n8n-nodes-base.if',
'nodes-base.if'
];
for (const nodeType of testCases) {
const originalContent = '# IF Node\n## Node parameters\nContent';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation(nodeType);
if (nodeType.includes('.if')) {
expect(result).toContain('Output Connection Information');
expect(result).toContain('Output 0 (index 0) = "true"');
expect(result).toContain('Output 1 (index 1) = "false"');
} else {
// For 'if' without dot, no enhancement is applied
expect(result).toBe(originalContent);
}
}
});
});
describe('enhanceLoopNodeDocumentation - edge cases', () => {
it('should handle content without clear insertion points', async () => {
const originalContent = 'Simple content without markdown sections';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).not.toBeNull();
expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
// Should be prepended when no insertion point found (but there's a newline before original content)
const guidanceIndex = result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(guidanceIndex).toBeLessThan(result!.indexOf('Simple content'));
expect(guidanceIndex).toBeLessThanOrEqual(5); // Allow for some whitespace
});
it('should handle empty content', async () => {
const originalContent = '';
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).not.toBeNull();
expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(result!.length).toBeGreaterThan(0);
});
it('should handle content with multiple "When to use" sections', async () => {
const originalContent = `# Split In Batches
## When to use (overview)
General usage.
## When to use (detailed)
Detailed usage.
`;
vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).not.toBeNull();
expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
// Should insert before first occurrence
const firstWhenToUse = result!.indexOf('## When to use (overview)');
const guidanceIndex = result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(guidanceIndex).toBeLessThan(firstWhenToUse);
});
it('should not double-enhance already enhanced content', async () => {
const alreadyEnhancedContent = `# Split In Batches
## CRITICAL OUTPUT CONNECTION INFORMATION
Already enhanced.
## When to use
Content here.
`;
vi.mocked(fs.readFile).mockResolvedValueOnce(alreadyEnhancedContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
// Should still add enhancement (method doesn't check for existing enhancements)
expect(result).not.toBeNull();
const criticalSections = (result!.match(/CRITICAL OUTPUT CONNECTION INFORMATION/g) || []).length;
expect(criticalSections).toBe(2); // Original + new enhancement
});
it('should handle very large content efficiently', async () => {
const largeContent = 'a'.repeat(100000) + '\n## When to use\n' + 'b'.repeat(100000);
vi.mocked(fs.readFile).mockResolvedValueOnce(largeContent);
const result = await docsMapper.fetchDocumentation('splitInBatches');
expect(result).not.toBeNull();
expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
expect(result!.length).toBeGreaterThan(largeContent.length);
});
});
describe('DocsMapper instance', () => {
it('should use consistent docsPath across instances', () => {
const mapper1 = new DocsMapper();
const mapper2 = new DocsMapper();
// Both should construct the same base path
expect(mapper1['docsPath']).toBe(mapper2['docsPath']);
expect(mapper1['docsPath']).toBe(path.join('/mocked/path', 'n8n-docs'));
});
it('should maintain KNOWN_FIXES as readonly', () => {
const mapper = new DocsMapper();
// KNOWN_FIXES should be accessible but not modifiable
expect(mapper['KNOWN_FIXES']).toBeDefined();
expect(mapper['KNOWN_FIXES']['httpRequest']).toBe('httprequest');
});
});
});
```
--------------------------------------------------------------------------------
/tests/unit/telemetry/telemetry-manager.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
import { TelemetryManager, telemetry } from '../../../src/telemetry/telemetry-manager';
import { TelemetryConfigManager } from '../../../src/telemetry/config-manager';
import { TelemetryEventTracker } from '../../../src/telemetry/event-tracker';
import { TelemetryBatchProcessor } from '../../../src/telemetry/batch-processor';
import { createClient } from '@supabase/supabase-js';
import { TELEMETRY_BACKEND } from '../../../src/telemetry/telemetry-types';
import { TelemetryError, TelemetryErrorType } from '../../../src/telemetry/telemetry-error';
// Mock all dependencies
vi.mock('../../../src/utils/logger', () => ({
logger: {
debug: vi.fn(),
info: vi.fn(),
warn: vi.fn(),
error: vi.fn(),
}
}));
vi.mock('@supabase/supabase-js', () => ({
createClient: vi.fn()
}));
vi.mock('../../../src/telemetry/config-manager');
vi.mock('../../../src/telemetry/event-tracker');
vi.mock('../../../src/telemetry/batch-processor');
vi.mock('../../../src/telemetry/workflow-sanitizer');
describe('TelemetryManager', () => {
let mockConfigManager: any;
let mockSupabaseClient: any;
let mockEventTracker: any;
let mockBatchProcessor: any;
let manager: TelemetryManager;
beforeEach(() => {
// Reset singleton using the new method
TelemetryManager.resetInstance();
// Mock TelemetryConfigManager
mockConfigManager = {
isEnabled: vi.fn().mockReturnValue(true),
getUserId: vi.fn().mockReturnValue('test-user-123'),
disable: vi.fn(),
enable: vi.fn(),
getStatus: vi.fn().mockReturnValue('enabled')
};
vi.mocked(TelemetryConfigManager.getInstance).mockReturnValue(mockConfigManager);
// Mock Supabase client
mockSupabaseClient = {
from: vi.fn().mockReturnValue({
insert: vi.fn().mockResolvedValue({ data: null, error: null })
})
};
vi.mocked(createClient).mockReturnValue(mockSupabaseClient);
// Mock EventTracker
mockEventTracker = {
trackToolUsage: vi.fn(),
trackWorkflowCreation: vi.fn().mockResolvedValue(undefined),
trackError: vi.fn(),
trackEvent: vi.fn(),
trackSessionStart: vi.fn(),
trackSearchQuery: vi.fn(),
trackValidationDetails: vi.fn(),
trackToolSequence: vi.fn(),
trackNodeConfiguration: vi.fn(),
trackPerformanceMetric: vi.fn(),
updateToolSequence: vi.fn(),
getEventQueue: vi.fn().mockReturnValue([]),
getWorkflowQueue: vi.fn().mockReturnValue([]),
clearEventQueue: vi.fn(),
clearWorkflowQueue: vi.fn(),
getStats: vi.fn().mockReturnValue({
rateLimiter: { currentEvents: 0, droppedEvents: 0 },
validator: { successes: 0, errors: 0 },
eventQueueSize: 0,
workflowQueueSize: 0,
performanceMetrics: {}
})
};
vi.mocked(TelemetryEventTracker).mockImplementation(() => mockEventTracker);
// Mock BatchProcessor
mockBatchProcessor = {
start: vi.fn(),
stop: vi.fn(),
flush: vi.fn().mockResolvedValue(undefined),
getMetrics: vi.fn().mockReturnValue({
eventsTracked: 0,
eventsDropped: 0,
eventsFailed: 0,
batchesSent: 0,
batchesFailed: 0,
averageFlushTime: 0,
rateLimitHits: 0,
circuitBreakerState: { state: 'closed', failureCount: 0, canRetry: true },
deadLetterQueueSize: 0
}),
resetMetrics: vi.fn()
};
vi.mocked(TelemetryBatchProcessor).mockImplementation(() => mockBatchProcessor);
vi.clearAllMocks();
});
afterEach(() => {
// Clean up global state
TelemetryManager.resetInstance();
});
describe('singleton behavior', () => {
it('should create only one instance', () => {
const instance1 = TelemetryManager.getInstance();
const instance2 = TelemetryManager.getInstance();
expect(instance1).toBe(instance2);
});
it.skip('should use global singleton for telemetry export', async () => {
// Skip: Testing module import behavior with mocks is complex
// The core singleton behavior is tested in other tests
const instance = TelemetryManager.getInstance();
// Import the telemetry export
const { telemetry: telemetry1 } = await import('../../../src/telemetry/telemetry-manager');
// Both should reference the same global singleton
expect(telemetry1).toBe(instance);
});
});
describe('initialization', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should initialize successfully when enabled', () => {
// Trigger initialization by calling a tracking method
manager.trackEvent('test', {});
expect(mockConfigManager.isEnabled).toHaveBeenCalled();
expect(createClient).toHaveBeenCalledWith(
TELEMETRY_BACKEND.URL,
TELEMETRY_BACKEND.ANON_KEY,
expect.objectContaining({
auth: {
persistSession: false,
autoRefreshToken: false
}
})
);
expect(mockBatchProcessor.start).toHaveBeenCalled();
});
it('should use environment variables if provided', () => {
process.env.SUPABASE_URL = 'https://custom.supabase.co';
process.env.SUPABASE_ANON_KEY = 'custom-anon-key';
// Reset instance to trigger re-initialization
TelemetryManager.resetInstance();
manager = TelemetryManager.getInstance();
// Trigger initialization
manager.trackEvent('test', {});
expect(createClient).toHaveBeenCalledWith(
'https://custom.supabase.co',
'custom-anon-key',
expect.any(Object)
);
// Clean up
delete process.env.SUPABASE_URL;
delete process.env.SUPABASE_ANON_KEY;
});
it('should not initialize when disabled', () => {
mockConfigManager.isEnabled.mockReturnValue(false);
// Reset instance to trigger re-initialization
TelemetryManager.resetInstance();
manager = TelemetryManager.getInstance();
expect(createClient).not.toHaveBeenCalled();
expect(mockBatchProcessor.start).not.toHaveBeenCalled();
});
it('should handle initialization errors', () => {
vi.mocked(createClient).mockImplementation(() => {
throw new Error('Supabase initialization failed');
});
// Reset instance to trigger re-initialization
TelemetryManager.resetInstance();
manager = TelemetryManager.getInstance();
expect(mockBatchProcessor.start).not.toHaveBeenCalled();
});
});
describe('event tracking methods', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should track tool usage with sequence update', () => {
manager.trackToolUsage('httpRequest', true, 500);
expect(mockEventTracker.trackToolUsage).toHaveBeenCalledWith('httpRequest', true, 500);
expect(mockEventTracker.updateToolSequence).toHaveBeenCalledWith('httpRequest');
});
it('should track workflow creation and auto-flush', async () => {
const workflow = { nodes: [], connections: {} };
await manager.trackWorkflowCreation(workflow, true);
expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
expect(mockBatchProcessor.flush).toHaveBeenCalled();
});
it('should handle workflow creation errors', async () => {
const workflow = { nodes: [], connections: {} };
const error = new Error('Workflow tracking failed');
mockEventTracker.trackWorkflowCreation.mockRejectedValue(error);
await manager.trackWorkflowCreation(workflow, true);
// Should not throw, but should handle error internally
expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
});
it('should track errors', () => {
manager.trackError('ValidationError', 'Node configuration invalid', 'httpRequest', 'Required field "url" is missing');
expect(mockEventTracker.trackError).toHaveBeenCalledWith(
'ValidationError',
'Node configuration invalid',
'httpRequest',
'Required field "url" is missing'
);
});
it('should track generic events', () => {
const properties = { key: 'value', count: 42 };
manager.trackEvent('custom_event', properties);
expect(mockEventTracker.trackEvent).toHaveBeenCalledWith('custom_event', properties);
});
it('should track session start', () => {
manager.trackSessionStart();
expect(mockEventTracker.trackSessionStart).toHaveBeenCalled();
});
it('should track search queries', () => {
manager.trackSearchQuery('httpRequest nodes', 5, 'nodes');
expect(mockEventTracker.trackSearchQuery).toHaveBeenCalledWith(
'httpRequest nodes',
5,
'nodes'
);
});
it('should track validation details', () => {
const details = { field: 'url', value: 'invalid' };
manager.trackValidationDetails('nodes-base.httpRequest', 'required_field_missing', details);
expect(mockEventTracker.trackValidationDetails).toHaveBeenCalledWith(
'nodes-base.httpRequest',
'required_field_missing',
details
);
});
it('should track tool sequences', () => {
manager.trackToolSequence('httpRequest', 'webhook', 5000);
expect(mockEventTracker.trackToolSequence).toHaveBeenCalledWith(
'httpRequest',
'webhook',
5000
);
});
it('should track node configuration', () => {
manager.trackNodeConfiguration('nodes-base.httpRequest', 5, false);
expect(mockEventTracker.trackNodeConfiguration).toHaveBeenCalledWith(
'nodes-base.httpRequest',
5,
false
);
});
it('should track performance metrics', () => {
const metadata = { operation: 'database_query' };
manager.trackPerformanceMetric('search_nodes', 1500, metadata);
expect(mockEventTracker.trackPerformanceMetric).toHaveBeenCalledWith(
'search_nodes',
1500,
metadata
);
});
});
describe('flush()', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should flush events and workflows', async () => {
const mockEvents = [{ user_id: 'user1', event: 'test', properties: {} }];
const mockWorkflows = [{ user_id: 'user1', workflow_hash: 'hash1' }];
mockEventTracker.getEventQueue.mockReturnValue(mockEvents);
mockEventTracker.getWorkflowQueue.mockReturnValue(mockWorkflows);
await manager.flush();
expect(mockEventTracker.getEventQueue).toHaveBeenCalled();
expect(mockEventTracker.getWorkflowQueue).toHaveBeenCalled();
expect(mockEventTracker.clearEventQueue).toHaveBeenCalled();
expect(mockEventTracker.clearWorkflowQueue).toHaveBeenCalled();
expect(mockBatchProcessor.flush).toHaveBeenCalledWith(mockEvents, mockWorkflows);
});
it('should not flush when disabled', async () => {
mockConfigManager.isEnabled.mockReturnValue(false);
await manager.flush();
expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
});
it('should not flush without Supabase client', async () => {
// Simulate initialization failure
vi.mocked(createClient).mockImplementation(() => {
throw new Error('Init failed');
});
// Reset instance to trigger re-initialization with failure
(TelemetryManager as any).instance = undefined;
manager = TelemetryManager.getInstance();
await manager.flush();
expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
});
it('should handle flush errors gracefully', async () => {
const error = new Error('Flush failed');
mockBatchProcessor.flush.mockRejectedValue(error);
await manager.flush();
// Should not throw, error should be handled internally
expect(mockBatchProcessor.flush).toHaveBeenCalled();
});
it('should handle TelemetryError specifically', async () => {
const telemetryError = new TelemetryError(
TelemetryErrorType.NETWORK_ERROR,
'Network failed',
{ attempt: 1 },
true
);
mockBatchProcessor.flush.mockRejectedValue(telemetryError);
await manager.flush();
expect(mockBatchProcessor.flush).toHaveBeenCalled();
});
});
describe('enable/disable functionality', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should disable telemetry', () => {
manager.disable();
expect(mockConfigManager.disable).toHaveBeenCalled();
expect(mockBatchProcessor.stop).toHaveBeenCalled();
});
it('should enable telemetry', () => {
// Disable first to clear state
manager.disable();
vi.clearAllMocks();
// Now enable
manager.enable();
expect(mockConfigManager.enable).toHaveBeenCalled();
// Should initialize (createClient called once)
expect(createClient).toHaveBeenCalledTimes(1);
});
it('should get status from config manager', () => {
const status = manager.getStatus();
expect(mockConfigManager.getStatus).toHaveBeenCalled();
expect(status).toBe('enabled');
});
});
describe('getMetrics()', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
// Trigger initialization for enabled tests
manager.trackEvent('test', {});
});
it('should return comprehensive metrics when enabled', () => {
const metrics = manager.getMetrics();
expect(metrics).toEqual({
status: 'enabled',
initialized: true,
tracking: expect.any(Object),
processing: expect.any(Object),
errors: expect.any(Object),
performance: expect.any(Object),
overhead: expect.any(Object)
});
expect(mockEventTracker.getStats).toHaveBeenCalled();
expect(mockBatchProcessor.getMetrics).toHaveBeenCalled();
});
it('should return disabled status when disabled', () => {
mockConfigManager.isEnabled.mockReturnValue(false);
// Reset to get a fresh instance without initialization
TelemetryManager.resetInstance();
manager = TelemetryManager.getInstance();
const metrics = manager.getMetrics();
expect(metrics.status).toBe('disabled');
expect(metrics.initialized).toBe(false); // Not initialized when disabled
});
it('should reflect initialization failure', () => {
// Simulate initialization failure
vi.mocked(createClient).mockImplementation(() => {
throw new Error('Init failed');
});
// Reset instance to trigger re-initialization with failure
(TelemetryManager as any).instance = undefined;
manager = TelemetryManager.getInstance();
const metrics = manager.getMetrics();
expect(metrics.initialized).toBe(false);
});
});
describe('error handling and aggregation', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should aggregate initialization errors', () => {
vi.mocked(createClient).mockImplementation(() => {
throw new Error('Supabase connection failed');
});
// Reset instance to trigger re-initialization with error
TelemetryManager.resetInstance();
manager = TelemetryManager.getInstance();
// Trigger initialization which will fail
manager.trackEvent('test', {});
const metrics = manager.getMetrics();
expect(metrics.errors.totalErrors).toBeGreaterThan(0);
});
it('should aggregate workflow tracking errors', async () => {
const error = new TelemetryError(
TelemetryErrorType.VALIDATION_ERROR,
'Workflow validation failed'
);
mockEventTracker.trackWorkflowCreation.mockRejectedValue(error);
const workflow = { nodes: [], connections: {} };
await manager.trackWorkflowCreation(workflow, true);
const metrics = manager.getMetrics();
expect(metrics.errors.totalErrors).toBeGreaterThan(0);
});
it('should aggregate flush errors', async () => {
const error = new Error('Network timeout');
mockBatchProcessor.flush.mockRejectedValue(error);
await manager.flush();
const metrics = manager.getMetrics();
expect(metrics.errors.totalErrors).toBeGreaterThan(0);
});
});
describe('constructor privacy', () => {
it('should have private constructor', () => {
// Ensure there's already an instance
TelemetryManager.getInstance();
// Now trying to instantiate directly should throw
expect(() => new (TelemetryManager as any)()).toThrow('Use TelemetryManager.getInstance() instead of new TelemetryManager()');
});
});
describe('isEnabled() privacy', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should correctly check enabled state', async () => {
mockConfigManager.isEnabled.mockReturnValue(true);
await manager.flush();
expect(mockBatchProcessor.flush).toHaveBeenCalled();
});
it('should prevent operations when not initialized', async () => {
// Simulate initialization failure
vi.mocked(createClient).mockImplementation(() => {
throw new Error('Init failed');
});
// Reset instance to trigger re-initialization with failure
(TelemetryManager as any).instance = undefined;
manager = TelemetryManager.getInstance();
await manager.flush();
expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
});
});
describe('dependency injection and callbacks', () => {
it('should provide correct callbacks to EventTracker', () => {
const TelemetryEventTrackerMock = vi.mocked(TelemetryEventTracker);
const manager = TelemetryManager.getInstance();
// Trigger initialization
manager.trackEvent('test', {});
expect(TelemetryEventTrackerMock).toHaveBeenCalledWith(
expect.any(Function), // getUserId callback
expect.any(Function) // isEnabled callback
);
// Test the callbacks
const [getUserIdCallback, isEnabledCallback] = TelemetryEventTrackerMock.mock.calls[0];
expect(getUserIdCallback()).toBe('test-user-123');
expect(isEnabledCallback()).toBe(true);
});
it('should provide correct callbacks to BatchProcessor', () => {
const TelemetryBatchProcessorMock = vi.mocked(TelemetryBatchProcessor);
const manager = TelemetryManager.getInstance();
// Trigger initialization
manager.trackEvent('test', {});
expect(TelemetryBatchProcessorMock).toHaveBeenCalledTimes(2); // Once with null, once with Supabase client
const lastCall = TelemetryBatchProcessorMock.mock.calls[TelemetryBatchProcessorMock.mock.calls.length - 1];
const [supabaseClient, isEnabledCallback] = lastCall;
expect(supabaseClient).toBe(mockSupabaseClient);
expect(isEnabledCallback()).toBe(true);
});
});
describe('Supabase client configuration', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
// Trigger initialization
manager.trackEvent('test', {});
});
it('should configure Supabase client with correct options', () => {
expect(createClient).toHaveBeenCalledWith(
TELEMETRY_BACKEND.URL,
TELEMETRY_BACKEND.ANON_KEY,
{
auth: {
persistSession: false,
autoRefreshToken: false
},
realtime: {
params: {
eventsPerSecond: 1
}
}
}
);
});
});
describe('workflow creation auto-flush behavior', () => {
beforeEach(() => {
manager = TelemetryManager.getInstance();
});
it('should auto-flush after successful workflow tracking', async () => {
const workflow = { nodes: [], connections: {} };
await manager.trackWorkflowCreation(workflow, true);
expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
expect(mockBatchProcessor.flush).toHaveBeenCalled();
});
it('should not auto-flush if workflow tracking fails', async () => {
const workflow = { nodes: [], connections: {} };
mockEventTracker.trackWorkflowCreation.mockRejectedValue(new Error('Tracking failed'));
await manager.trackWorkflowCreation(workflow, true);
expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
// Flush should NOT be called if tracking fails
expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
});
});
describe('global singleton behavior', () => {
it('should preserve singleton across require() calls', async () => {
// Get the first instance
const manager1 = TelemetryManager.getInstance();
// Clear and re-get the instance - should be same due to global state
TelemetryManager.resetInstance();
const manager2 = TelemetryManager.getInstance();
// They should be different instances after reset
expect(manager2).not.toBe(manager1);
// But subsequent calls should return the same instance
const manager3 = TelemetryManager.getInstance();
expect(manager3).toBe(manager2);
});
it.skip('should handle undefined global state gracefully', async () => {
// Skip: Testing module import behavior with mocks is complex
// The core singleton behavior is tested in other tests
// Ensure clean state
TelemetryManager.resetInstance();
const manager1 = TelemetryManager.getInstance();
expect(manager1).toBeDefined();
// Import telemetry - it should use the same global instance
const { telemetry } = await import('../../../src/telemetry/telemetry-manager');
expect(telemetry).toBeDefined();
expect(telemetry).toBe(manager1);
});
});
});
```
--------------------------------------------------------------------------------
/tests/unit/telemetry/workflow-sanitizer.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect } from 'vitest';
import { WorkflowSanitizer } from '../../../src/telemetry/workflow-sanitizer';
describe('WorkflowSanitizer', () => {
describe('sanitizeWorkflow', () => {
it('should remove API keys from parameters', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
position: [100, 100],
parameters: {
url: 'https://api.example.com',
apiKey: 'sk-1234567890abcdef1234567890abcdef',
headers: {
'Authorization': 'Bearer sk-1234567890abcdef1234567890abcdef'
}
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes[0].parameters.apiKey).toBe('[REDACTED]');
expect(sanitized.nodes[0].parameters.headers.Authorization).toBe('[REDACTED]');
});
it('should sanitize webhook URLs but keep structure', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Webhook',
type: 'n8n-nodes-base.webhook',
position: [100, 100],
parameters: {
path: 'my-webhook',
webhookUrl: 'https://n8n.example.com/webhook/abc-def-ghi',
method: 'POST'
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes[0].parameters.webhookUrl).toBe('[REDACTED]');
expect(sanitized.nodes[0].parameters.method).toBe('POST'); // Method should remain
expect(sanitized.nodes[0].parameters.path).toBe('my-webhook'); // Path should remain
});
it('should remove credentials entirely', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Slack',
type: 'n8n-nodes-base.slack',
position: [100, 100],
parameters: {
channel: 'general',
text: 'Hello World'
},
credentials: {
slackApi: {
id: 'cred-123',
name: 'My Slack'
}
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes[0].credentials).toBeUndefined();
expect(sanitized.nodes[0].parameters.channel).toBe('general'); // Channel should remain
expect(sanitized.nodes[0].parameters.text).toBe('Hello World'); // Text should remain
});
it('should sanitize URLs in parameters', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
position: [100, 100],
parameters: {
url: 'https://api.example.com/endpoint',
endpoint: 'https://another.example.com/api',
baseUrl: 'https://base.example.com'
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes[0].parameters.url).toBe('[REDACTED]');
expect(sanitized.nodes[0].parameters.endpoint).toBe('[REDACTED]');
expect(sanitized.nodes[0].parameters.baseUrl).toBe('[REDACTED]');
});
it('should calculate workflow metrics correctly', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Webhook',
type: 'n8n-nodes-base.webhook',
position: [100, 100],
parameters: {}
},
{
id: '2',
name: 'HTTP Request',
type: 'n8n-nodes-base.httpRequest',
position: [200, 100],
parameters: {}
},
{
id: '3',
name: 'Slack',
type: 'n8n-nodes-base.slack',
position: [300, 100],
parameters: {}
}
],
connections: {
'1': {
main: [[{ node: '2', type: 'main', index: 0 }]]
},
'2': {
main: [[{ node: '3', type: 'main', index: 0 }]]
}
}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodeCount).toBe(3);
expect(sanitized.nodeTypes).toContain('n8n-nodes-base.webhook');
expect(sanitized.nodeTypes).toContain('n8n-nodes-base.httpRequest');
expect(sanitized.nodeTypes).toContain('n8n-nodes-base.slack');
expect(sanitized.hasTrigger).toBe(true);
expect(sanitized.hasWebhook).toBe(true);
expect(sanitized.complexity).toBe('simple');
});
it('should calculate complexity based on node count', () => {
const createWorkflow = (nodeCount: number) => ({
nodes: Array.from({ length: nodeCount }, (_, i) => ({
id: String(i),
name: `Node ${i}`,
type: 'n8n-nodes-base.function',
position: [i * 100, 100],
parameters: {}
})),
connections: {}
});
const simple = WorkflowSanitizer.sanitizeWorkflow(createWorkflow(5));
expect(simple.complexity).toBe('simple');
const medium = WorkflowSanitizer.sanitizeWorkflow(createWorkflow(15));
expect(medium.complexity).toBe('medium');
const complex = WorkflowSanitizer.sanitizeWorkflow(createWorkflow(25));
expect(complex.complexity).toBe('complex');
});
it('should generate consistent workflow hash', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Webhook',
type: 'n8n-nodes-base.webhook',
position: [100, 100],
parameters: { path: 'test' }
}
],
connections: {}
};
const hash1 = WorkflowSanitizer.generateWorkflowHash(workflow);
const hash2 = WorkflowSanitizer.generateWorkflowHash(workflow);
expect(hash1).toBe(hash2);
expect(hash1).toMatch(/^[a-f0-9]{16}$/);
});
it('should sanitize nested objects in parameters', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Complex Node',
type: 'n8n-nodes-base.httpRequest',
position: [100, 100],
parameters: {
options: {
headers: {
'X-API-Key': 'secret-key-1234567890abcdef',
'Content-Type': 'application/json'
},
body: {
data: 'some data',
token: 'another-secret-token-xyz123'
}
}
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes[0].parameters.options.headers['X-API-Key']).toBe('[REDACTED]');
expect(sanitized.nodes[0].parameters.options.headers['Content-Type']).toBe('application/json');
expect(sanitized.nodes[0].parameters.options.body.data).toBe('some data');
expect(sanitized.nodes[0].parameters.options.body.token).toBe('[REDACTED]');
});
it('should preserve connections structure', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Node 1',
type: 'n8n-nodes-base.start',
position: [100, 100],
parameters: {}
},
{
id: '2',
name: 'Node 2',
type: 'n8n-nodes-base.function',
position: [200, 100],
parameters: {}
}
],
connections: {
'1': {
main: [[{ node: '2', type: 'main', index: 0 }]],
error: [[{ node: '2', type: 'error', index: 0 }]]
}
}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.connections).toEqual({
'1': {
main: [[{ node: '2', type: 'main', index: 0 }]],
error: [[{ node: '2', type: 'error', index: 0 }]]
}
});
});
it('should remove sensitive workflow metadata', () => {
const workflow = {
id: 'workflow-123',
name: 'My Workflow',
nodes: [],
connections: {},
settings: {
errorWorkflow: 'error-workflow-id',
timezone: 'America/New_York'
},
staticData: { some: 'data' },
pinData: { node1: 'pinned' },
credentials: { slack: 'cred-123' },
sharedWorkflows: ['user-456'],
ownedBy: 'user-123',
createdBy: 'user-123',
updatedBy: 'user-456'
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
// Verify that sensitive workflow-level properties are not in the sanitized output
// The sanitized workflow should only have specific fields as defined in SanitizedWorkflow interface
expect(sanitized.nodes).toEqual([]);
expect(sanitized.connections).toEqual({});
expect(sanitized.nodeCount).toBe(0);
expect(sanitized.nodeTypes).toEqual([]);
// Verify these fields don't exist in the sanitized output
const sanitizedAsAny = sanitized as any;
expect(sanitizedAsAny.settings).toBeUndefined();
expect(sanitizedAsAny.staticData).toBeUndefined();
expect(sanitizedAsAny.pinData).toBeUndefined();
expect(sanitizedAsAny.credentials).toBeUndefined();
expect(sanitizedAsAny.sharedWorkflows).toBeUndefined();
expect(sanitizedAsAny.ownedBy).toBeUndefined();
expect(sanitizedAsAny.createdBy).toBeUndefined();
expect(sanitizedAsAny.updatedBy).toBeUndefined();
});
});
describe('edge cases and error handling', () => {
it('should handle null or undefined workflow', () => {
// The actual implementation will throw because JSON.parse(JSON.stringify(null)) is valid but creates issues
expect(() => WorkflowSanitizer.sanitizeWorkflow(null as any)).toThrow();
expect(() => WorkflowSanitizer.sanitizeWorkflow(undefined as any)).toThrow();
});
it('should handle workflow without nodes', () => {
const workflow = {
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodeCount).toBe(0);
expect(sanitized.nodeTypes).toEqual([]);
expect(sanitized.nodes).toEqual([]);
expect(sanitized.hasTrigger).toBe(false);
expect(sanitized.hasWebhook).toBe(false);
});
it('should handle workflow without connections', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Test Node',
type: 'n8n-nodes-base.function',
position: [100, 100],
parameters: {}
}
]
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.connections).toEqual({});
expect(sanitized.nodeCount).toBe(1);
});
it('should handle malformed nodes array', () => {
const workflow = {
nodes: [
{
id: '2',
name: 'Valid Node',
type: 'n8n-nodes-base.function',
position: [100, 100],
parameters: {}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
// Should handle workflow gracefully
expect(sanitized.nodeCount).toBe(1);
expect(sanitized.nodes.length).toBe(1);
});
it('should handle deeply nested objects in parameters', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Deep Node',
type: 'n8n-nodes-base.httpRequest',
position: [100, 100],
parameters: {
level1: {
level2: {
level3: {
level4: {
level5: {
secret: 'deep-secret-key-1234567890abcdef',
safe: 'safe-value'
}
}
}
}
}
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes[0].parameters.level1.level2.level3.level4.level5.secret).toBe('[REDACTED]');
expect(sanitized.nodes[0].parameters.level1.level2.level3.level4.level5.safe).toBe('safe-value');
});
it('should handle circular references gracefully', () => {
const workflow: any = {
nodes: [
{
id: '1',
name: 'Circular Node',
type: 'n8n-nodes-base.function',
position: [100, 100],
parameters: {}
}
],
connections: {}
};
// Create circular reference
workflow.nodes[0].parameters.selfRef = workflow.nodes[0];
// JSON.stringify throws on circular references, so this should throw
expect(() => WorkflowSanitizer.sanitizeWorkflow(workflow)).toThrow();
});
it('should handle extremely large workflows', () => {
const largeWorkflow = {
nodes: Array.from({ length: 1000 }, (_, i) => ({
id: String(i),
name: `Node ${i}`,
type: 'n8n-nodes-base.function',
position: [i * 10, 100],
parameters: {
code: `// Node ${i} code here`.repeat(100) // Large parameter
}
})),
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(largeWorkflow);
expect(sanitized.nodeCount).toBe(1000);
expect(sanitized.complexity).toBe('complex');
});
it('should handle various sensitive data patterns', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Sensitive Node',
type: 'n8n-nodes-base.httpRequest',
position: [100, 100],
parameters: {
// Different patterns of sensitive data
api_key: 'sk-1234567890abcdef1234567890abcdef',
accessToken: 'ghp_abcdefghijklmnopqrstuvwxyz123456',
secret_token: 'secret-123-abc-def',
authKey: 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9',
clientSecret: 'abc123def456ghi789',
webhookUrl: 'https://hooks.example.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX',
databaseUrl: 'postgres://user:password@localhost:5432/db',
connectionString: 'Server=myServerAddress;Database=myDataBase;Uid=myUsername;Pwd=myPassword;',
// Safe values that should remain
timeout: 5000,
method: 'POST',
retries: 3,
name: 'My API Call'
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
const params = sanitized.nodes[0].parameters;
expect(params.api_key).toBe('[REDACTED]');
expect(params.accessToken).toBe('[REDACTED]');
expect(params.secret_token).toBe('[REDACTED]');
expect(params.authKey).toBe('[REDACTED]');
expect(params.clientSecret).toBe('[REDACTED]');
expect(params.webhookUrl).toBe('[REDACTED]');
expect(params.databaseUrl).toBe('[REDACTED]');
expect(params.connectionString).toBe('[REDACTED]');
// Safe values should remain
expect(params.timeout).toBe(5000);
expect(params.method).toBe('POST');
expect(params.retries).toBe(3);
expect(params.name).toBe('My API Call');
});
it('should handle arrays in parameters', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Array Node',
type: 'n8n-nodes-base.httpRequest',
position: [100, 100],
parameters: {
headers: [
{ name: 'Authorization', value: 'Bearer secret-token-123456789' },
{ name: 'Content-Type', value: 'application/json' },
{ name: 'X-API-Key', value: 'api-key-abcdefghijklmnopqrstuvwxyz' }
],
methods: ['GET', 'POST']
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
const headers = sanitized.nodes[0].parameters.headers;
expect(headers[0].value).toBe('[REDACTED]'); // Authorization
expect(headers[1].value).toBe('application/json'); // Content-Type (safe)
expect(headers[2].value).toBe('[REDACTED]'); // X-API-Key
expect(sanitized.nodes[0].parameters.methods).toEqual(['GET', 'POST']); // Array should remain
});
it('should handle mixed data types in parameters', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Mixed Node',
type: 'n8n-nodes-base.function',
position: [100, 100],
parameters: {
numberValue: 42,
booleanValue: true,
stringValue: 'safe string',
nullValue: null,
undefinedValue: undefined,
dateValue: new Date('2024-01-01'),
arrayValue: [1, 2, 3],
nestedObject: {
secret: 'secret-key-12345678',
safe: 'safe-value'
}
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
const params = sanitized.nodes[0].parameters;
expect(params.numberValue).toBe(42);
expect(params.booleanValue).toBe(true);
expect(params.stringValue).toBe('safe string');
expect(params.nullValue).toBeNull();
expect(params.undefinedValue).toBeUndefined();
expect(params.arrayValue).toEqual([1, 2, 3]);
expect(params.nestedObject.secret).toBe('[REDACTED]');
expect(params.nestedObject.safe).toBe('safe-value');
});
it('should handle missing node properties gracefully', () => {
const workflow = {
nodes: [
{ id: '3', name: 'Complete', type: 'n8n-nodes-base.function' } // Missing position but has required fields
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodes).toBeDefined();
expect(sanitized.nodeCount).toBe(1);
});
it('should handle complex connection structures', () => {
const workflow = {
nodes: [
{ id: '1', name: 'Start', type: 'n8n-nodes-base.start', position: [0, 0], parameters: {} },
{ id: '2', name: 'Branch', type: 'n8n-nodes-base.if', position: [100, 0], parameters: {} },
{ id: '3', name: 'Path A', type: 'n8n-nodes-base.function', position: [200, 0], parameters: {} },
{ id: '4', name: 'Path B', type: 'n8n-nodes-base.function', position: [200, 100], parameters: {} },
{ id: '5', name: 'Merge', type: 'n8n-nodes-base.merge', position: [300, 50], parameters: {} }
],
connections: {
'1': {
main: [[{ node: '2', type: 'main', index: 0 }]]
},
'2': {
main: [
[{ node: '3', type: 'main', index: 0 }],
[{ node: '4', type: 'main', index: 0 }]
]
},
'3': {
main: [[{ node: '5', type: 'main', index: 0 }]]
},
'4': {
main: [[{ node: '5', type: 'main', index: 1 }]]
}
}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.connections).toEqual(workflow.connections);
expect(sanitized.nodeCount).toBe(5);
expect(sanitized.complexity).toBe('simple'); // 5 nodes = simple
});
it('should generate different hashes for different workflows', () => {
const workflow1 = {
nodes: [{ id: '1', name: 'Node1', type: 'type1', position: [0, 0], parameters: {} }],
connections: {}
};
const workflow2 = {
nodes: [{ id: '1', name: 'Node2', type: 'type2', position: [0, 0], parameters: {} }],
connections: {}
};
const hash1 = WorkflowSanitizer.generateWorkflowHash(workflow1);
const hash2 = WorkflowSanitizer.generateWorkflowHash(workflow2);
expect(hash1).not.toBe(hash2);
expect(hash1).toMatch(/^[a-f0-9]{16}$/);
expect(hash2).toMatch(/^[a-f0-9]{16}$/);
});
it('should handle workflow with only trigger nodes', () => {
const workflow = {
nodes: [
{ id: '1', name: 'Cron', type: 'n8n-nodes-base.cron', position: [0, 0], parameters: {} },
{ id: '2', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [100, 0], parameters: {} }
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.hasTrigger).toBe(true);
expect(sanitized.hasWebhook).toBe(true);
expect(sanitized.nodeTypes).toContain('n8n-nodes-base.cron');
expect(sanitized.nodeTypes).toContain('n8n-nodes-base.webhook');
});
it('should handle workflow with special characters in node names and types', () => {
const workflow = {
nodes: [
{
id: '1',
name: 'Node with émojis 🚀 and specíal chars',
type: 'n8n-nodes-base.function',
position: [0, 0],
parameters: {
message: 'Test with émojis 🎉 and URLs https://example.com'
}
}
],
connections: {}
};
const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
expect(sanitized.nodeCount).toBe(1);
expect(sanitized.nodes[0].name).toBe('Node with émojis 🚀 and specíal chars');
});
});
});
```
--------------------------------------------------------------------------------
/tests/unit/services/workflow-validator-mocks.test.ts:
--------------------------------------------------------------------------------
```typescript
import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
import { WorkflowValidator } from '@/services/workflow-validator';
import { NodeRepository } from '@/database/node-repository';
import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
vi.mock('@/utils/logger');
describe('WorkflowValidator - Mock-based Unit Tests', () => {
let validator: WorkflowValidator;
let mockNodeRepository: any;
let mockGetNode: Mock;
beforeEach(() => {
vi.clearAllMocks();
// Create detailed mock repository with spy functions
mockGetNode = vi.fn();
mockNodeRepository = {
getNode: mockGetNode
};
validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
// Default mock responses
mockGetNode.mockImplementation((type: string) => {
if (type.includes('httpRequest')) {
return {
node_type: type,
display_name: 'HTTP Request',
isVersioned: true,
version: 4
};
} else if (type.includes('set')) {
return {
node_type: type,
display_name: 'Set',
isVersioned: true,
version: 3
};
} else if (type.includes('respondToWebhook')) {
return {
node_type: type,
display_name: 'Respond to Webhook',
isVersioned: true,
version: 1
};
}
return null;
});
});
describe('Error Handler Detection Logic', () => {
it('should correctly identify error handlers by node name patterns', async () => {
const errorNodeNames = [
'Error Handler',
'Handle Error',
'Catch Exception',
'Failure Response',
'Error Notification',
'Fail Safe',
'Exception Handler',
'Error Callback'
];
const successNodeNames = [
'Process Data',
'Transform',
'Success Handler',
'Continue Process',
'Normal Flow'
];
for (const errorName of errorNodeNames) {
const workflow = {
nodes: [
{
id: '1',
name: 'Source',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'Success Path',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: errorName,
type: 'n8n-nodes-base.set',
position: [200, 100],
parameters: {}
}
],
connections: {
'Source': {
main: [
[
{ node: 'Success Path', type: 'main', index: 0 },
{ node: errorName, type: 'main', index: 0 } // Should be detected as error handler
]
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
// Should detect this as an incorrect error configuration
const hasError = result.errors.some(e =>
e.message.includes('Incorrect error output configuration') &&
e.message.includes(errorName)
);
expect(hasError).toBe(true);
}
// Test that success node names are NOT flagged
for (const successName of successNodeNames) {
const workflow = {
nodes: [
{
id: '1',
name: 'Source',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'First Process',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: successName,
type: 'n8n-nodes-base.set',
position: [200, 100],
parameters: {}
}
],
connections: {
'Source': {
main: [
[
{ node: 'First Process', type: 'main', index: 0 },
{ node: successName, type: 'main', index: 0 }
]
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
// Should NOT detect this as an error configuration
const hasError = result.errors.some(e =>
e.message.includes('Incorrect error output configuration')
);
expect(hasError).toBe(false);
}
});
it('should correctly identify error handlers by node type patterns', async () => {
const errorNodeTypes = [
'n8n-nodes-base.respondToWebhook',
'n8n-nodes-base.emailSend'
// Note: slack and webhook are not in the current detection logic
];
// Update mock to return appropriate node info for these types
mockGetNode.mockImplementation((type: string) => {
return {
node_type: type,
display_name: type.split('.').pop() || 'Unknown',
isVersioned: true,
version: 1
};
});
for (const nodeType of errorNodeTypes) {
const workflow = {
nodes: [
{
id: '1',
name: 'Source',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'Success Path',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: 'Response Node',
type: nodeType,
position: [200, 100],
parameters: {}
}
],
connections: {
'Source': {
main: [
[
{ node: 'Success Path', type: 'main', index: 0 },
{ node: 'Response Node', type: 'main', index: 0 } // Should be detected
]
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
// Should detect this as an incorrect error configuration
const hasError = result.errors.some(e =>
e.message.includes('Incorrect error output configuration') &&
e.message.includes('Response Node')
);
expect(hasError).toBe(true);
}
});
it('should handle cases where node repository returns null', async () => {
// Mock repository to return null for unknown nodes
mockGetNode.mockImplementation((type: string) => {
if (type === 'n8n-nodes-base.unknownNode') {
return null;
}
return {
node_type: type,
display_name: 'Known Node',
isVersioned: true,
version: 1
};
});
const workflow = {
nodes: [
{
id: '1',
name: 'Source',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'Unknown Node',
type: 'n8n-nodes-base.unknownNode',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: 'Error Handler',
type: 'n8n-nodes-base.set',
position: [200, 100],
parameters: {}
}
],
connections: {
'Source': {
main: [
[
{ node: 'Unknown Node', type: 'main', index: 0 },
{ node: 'Error Handler', type: 'main', index: 0 }
]
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
// Should still detect the error configuration based on node name
const hasError = result.errors.some(e =>
e.message.includes('Incorrect error output configuration') &&
e.message.includes('Error Handler')
);
expect(hasError).toBe(true);
// Should not crash due to null node info
expect(result).toHaveProperty('valid');
expect(Array.isArray(result.errors)).toBe(true);
});
});
describe('onError Property Validation Logic', () => {
it('should validate onError property combinations correctly', async () => {
const testCases = [
{
name: 'onError set but no error connections',
onError: 'continueErrorOutput',
hasErrorConnections: false,
expectedErrorType: 'error',
expectedMessage: "has onError: 'continueErrorOutput' but no error output connections"
},
{
name: 'error connections but no onError',
onError: undefined,
hasErrorConnections: true,
expectedErrorType: 'warning',
expectedMessage: 'error output connections in main[1] but missing onError'
},
{
name: 'onError set with error connections',
onError: 'continueErrorOutput',
hasErrorConnections: true,
expectedErrorType: null,
expectedMessage: null
},
{
name: 'no onError and no error connections',
onError: undefined,
hasErrorConnections: false,
expectedErrorType: null,
expectedMessage: null
}
];
for (const testCase of testCases) {
const workflow = {
nodes: [
{
id: '1',
name: 'Test Node',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {},
...(testCase.onError ? { onError: testCase.onError } : {})
},
{
id: '2',
name: 'Success Handler',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: 'Error Handler',
type: 'n8n-nodes-base.set',
position: [200, 100],
parameters: {}
}
],
connections: {
'Test Node': {
main: [
[
{ node: 'Success Handler', type: 'main', index: 0 }
],
...(testCase.hasErrorConnections ? [
[
{ node: 'Error Handler', type: 'main', index: 0 }
]
] : [])
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
if (testCase.expectedErrorType === 'error') {
const hasExpectedError = result.errors.some(e =>
e.nodeName === 'Test Node' &&
e.message.includes(testCase.expectedMessage!)
);
expect(hasExpectedError).toBe(true);
} else if (testCase.expectedErrorType === 'warning') {
const hasExpectedWarning = result.warnings.some(w =>
w.nodeName === 'Test Node' &&
w.message.includes(testCase.expectedMessage!)
);
expect(hasExpectedWarning).toBe(true);
} else {
// Should not have related errors or warnings about onError/error output mismatches
const hasRelatedError = result.errors.some(e =>
e.nodeName === 'Test Node' &&
(e.message.includes("has onError: 'continueErrorOutput' but no error output connections") ||
e.message.includes('Incorrect error output configuration'))
);
const hasRelatedWarning = result.warnings.some(w =>
w.nodeName === 'Test Node' &&
w.message.includes('error output connections in main[1] but missing onError')
);
expect(hasRelatedError).toBe(false);
expect(hasRelatedWarning).toBe(false);
}
}
});
it('should handle different onError values correctly', async () => {
const onErrorValues = [
'continueErrorOutput',
'continueRegularOutput',
'stopWorkflow'
];
for (const onErrorValue of onErrorValues) {
const workflow = {
nodes: [
{
id: '1',
name: 'Test Node',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {},
onError: onErrorValue
},
{
id: '2',
name: 'Next Node',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
}
],
connections: {
'Test Node': {
main: [
[
{ node: 'Next Node', type: 'main', index: 0 }
]
// No error connections
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
if (onErrorValue === 'continueErrorOutput') {
// Should have error about missing error connections
const hasError = result.errors.some(e =>
e.nodeName === 'Test Node' &&
e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
);
expect(hasError).toBe(true);
} else {
// Should not have error about missing error connections
const hasError = result.errors.some(e =>
e.nodeName === 'Test Node' &&
e.message.includes('but no error output connections')
);
expect(hasError).toBe(false);
}
}
});
});
describe('JSON Format Generation', () => {
it('should generate valid JSON in error messages', async () => {
const workflow = {
nodes: [
{
id: '1',
name: 'API Call',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'Success Process',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: 'Error Handler',
type: 'n8n-nodes-base.respondToWebhook',
position: [200, 100],
parameters: {}
}
],
connections: {
'API Call': {
main: [
[
{ node: 'Success Process', type: 'main', index: 0 },
{ node: 'Error Handler', type: 'main', index: 0 }
]
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
const errorConfigError = result.errors.find(e =>
e.message.includes('Incorrect error output configuration')
);
expect(errorConfigError).toBeDefined();
// Extract JSON sections from error message
const incorrectMatch = errorConfigError!.message.match(/INCORRECT \(current\):\n([\s\S]*?)\n\nCORRECT/);
const correctMatch = errorConfigError!.message.match(/CORRECT \(should be\):\n([\s\S]*?)\n\nAlso add/);
expect(incorrectMatch).toBeDefined();
expect(correctMatch).toBeDefined();
// Extract just the JSON part (remove comments)
const incorrectJsonStr = incorrectMatch![1];
const correctJsonStr = correctMatch![1];
// Remove comments and clean up for JSON parsing
const cleanIncorrectJson = incorrectJsonStr.replace(/\/\/.*$/gm, '').replace(/,\s*$/, '');
const cleanCorrectJson = correctJsonStr.replace(/\/\/.*$/gm, '').replace(/,\s*$/, '');
const incorrectJson = `{${cleanIncorrectJson}}`;
const correctJson = `{${cleanCorrectJson}}`;
expect(() => JSON.parse(incorrectJson)).not.toThrow();
expect(() => JSON.parse(correctJson)).not.toThrow();
const parsedIncorrect = JSON.parse(incorrectJson);
const parsedCorrect = JSON.parse(correctJson);
// Validate structure
expect(parsedIncorrect).toHaveProperty('API Call');
expect(parsedCorrect).toHaveProperty('API Call');
expect(parsedIncorrect['API Call']).toHaveProperty('main');
expect(parsedCorrect['API Call']).toHaveProperty('main');
// Incorrect should have both nodes in main[0]
expect(Array.isArray(parsedIncorrect['API Call'].main)).toBe(true);
expect(parsedIncorrect['API Call'].main).toHaveLength(1);
expect(parsedIncorrect['API Call'].main[0]).toHaveLength(2);
// Correct should have separate arrays
expect(Array.isArray(parsedCorrect['API Call'].main)).toBe(true);
expect(parsedCorrect['API Call'].main).toHaveLength(2);
expect(parsedCorrect['API Call'].main[0]).toHaveLength(1); // Success only
expect(parsedCorrect['API Call'].main[1]).toHaveLength(1); // Error only
});
it('should handle special characters in node names in JSON', async () => {
// Test simpler special characters that are easier to handle in JSON
const specialNodeNames = [
'Node with spaces',
'Node-with-dashes',
'Node_with_underscores'
];
for (const specialName of specialNodeNames) {
const workflow = {
nodes: [
{
id: '1',
name: 'Source',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'Success',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: specialName,
type: 'n8n-nodes-base.respondToWebhook',
position: [200, 100],
parameters: {}
}
],
connections: {
'Source': {
main: [
[
{ node: 'Success', type: 'main', index: 0 },
{ node: specialName, type: 'main', index: 0 }
]
]
}
}
};
const result = await validator.validateWorkflow(workflow as any);
const errorConfigError = result.errors.find(e =>
e.message.includes('Incorrect error output configuration')
);
expect(errorConfigError).toBeDefined();
// Verify the error message contains the special node name
expect(errorConfigError!.message).toContain(specialName);
// Verify JSON structure is present (but don't parse due to comments)
expect(errorConfigError!.message).toContain('INCORRECT (current):');
expect(errorConfigError!.message).toContain('CORRECT (should be):');
expect(errorConfigError!.message).toContain('main[0]');
expect(errorConfigError!.message).toContain('main[1]');
}
});
});
describe('Repository Interaction Patterns', () => {
it('should call repository getNode with correct parameters', async () => {
const workflow = {
nodes: [
{
id: '1',
name: 'HTTP Node',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'Set Node',
type: 'n8n-nodes-base.set',
position: [200, 0],
parameters: {}
}
],
connections: {
'HTTP Node': {
main: [
[
{ node: 'Set Node', type: 'main', index: 0 }
]
]
}
}
};
await validator.validateWorkflow(workflow as any);
// Should have called getNode for each node type (normalized to short form)
expect(mockGetNode).toHaveBeenCalledWith('nodes-base.httpRequest');
expect(mockGetNode).toHaveBeenCalledWith('nodes-base.set');
expect(mockGetNode).toHaveBeenCalledTimes(2);
});
it('should handle repository errors gracefully', async () => {
// Mock repository to throw error
mockGetNode.mockImplementation(() => {
throw new Error('Database connection failed');
});
const workflow = {
nodes: [
{
id: '1',
name: 'Test Node',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
}
],
connections: {}
};
// Should not throw error
const result = await validator.validateWorkflow(workflow as any);
// Should still return a valid result
expect(result).toHaveProperty('valid');
expect(Array.isArray(result.errors)).toBe(true);
expect(Array.isArray(result.warnings)).toBe(true);
});
it('should optimize repository calls for duplicate node types', async () => {
const workflow = {
nodes: [
{
id: '1',
name: 'HTTP 1',
type: 'n8n-nodes-base.httpRequest',
position: [0, 0],
parameters: {}
},
{
id: '2',
name: 'HTTP 2',
type: 'n8n-nodes-base.httpRequest',
position: [200, 0],
parameters: {}
},
{
id: '3',
name: 'HTTP 3',
type: 'n8n-nodes-base.httpRequest',
position: [400, 0],
parameters: {}
}
],
connections: {}
};
await validator.validateWorkflow(workflow as any);
// Should call getNode for the same type multiple times (current implementation)
// Note: This test documents current behavior. Could be optimized in the future.
const httpRequestCalls = mockGetNode.mock.calls.filter(
call => call[0] === 'nodes-base.httpRequest'
);
expect(httpRequestCalls.length).toBeGreaterThan(0);
});
});
});
```