#
tokens: 47811/50000 6/617 files (page 31/59)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 31 of 59. Use http://codebase.md/czlonkowski/n8n-mcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── _config.yml
├── .claude
│   └── agents
│       ├── code-reviewer.md
│       ├── context-manager.md
│       ├── debugger.md
│       ├── deployment-engineer.md
│       ├── mcp-backend-engineer.md
│       ├── n8n-mcp-tester.md
│       ├── technical-researcher.md
│       └── test-automator.md
├── .dockerignore
├── .env.docker
├── .env.example
├── .env.n8n.example
├── .env.test
├── .env.test.example
├── .github
│   ├── ABOUT.md
│   ├── BENCHMARK_THRESHOLDS.md
│   ├── FUNDING.yml
│   ├── gh-pages.yml
│   ├── secret_scanning.yml
│   └── workflows
│       ├── benchmark-pr.yml
│       ├── benchmark.yml
│       ├── docker-build-fast.yml
│       ├── docker-build-n8n.yml
│       ├── docker-build.yml
│       ├── release.yml
│       ├── test.yml
│       └── update-n8n-deps.yml
├── .gitignore
├── .npmignore
├── ATTRIBUTION.md
├── CHANGELOG.md
├── CLAUDE.md
├── codecov.yml
├── coverage.json
├── data
│   ├── .gitkeep
│   ├── nodes.db
│   ├── nodes.db-shm
│   ├── nodes.db-wal
│   └── templates.db
├── deploy
│   └── quick-deploy-n8n.sh
├── docker
│   ├── docker-entrypoint.sh
│   ├── n8n-mcp
│   ├── parse-config.js
│   └── README.md
├── docker-compose.buildkit.yml
├── docker-compose.extract.yml
├── docker-compose.n8n.yml
├── docker-compose.override.yml.example
├── docker-compose.test-n8n.yml
├── docker-compose.yml
├── Dockerfile
├── Dockerfile.railway
├── Dockerfile.test
├── docs
│   ├── AUTOMATED_RELEASES.md
│   ├── BENCHMARKS.md
│   ├── CHANGELOG.md
│   ├── CLAUDE_CODE_SETUP.md
│   ├── CLAUDE_INTERVIEW.md
│   ├── CODECOV_SETUP.md
│   ├── CODEX_SETUP.md
│   ├── CURSOR_SETUP.md
│   ├── DEPENDENCY_UPDATES.md
│   ├── DOCKER_README.md
│   ├── DOCKER_TROUBLESHOOTING.md
│   ├── FINAL_AI_VALIDATION_SPEC.md
│   ├── FLEXIBLE_INSTANCE_CONFIGURATION.md
│   ├── HTTP_DEPLOYMENT.md
│   ├── img
│   │   ├── cc_command.png
│   │   ├── cc_connected.png
│   │   ├── codex_connected.png
│   │   ├── cursor_tut.png
│   │   ├── Railway_api.png
│   │   ├── Railway_server_address.png
│   │   ├── vsc_ghcp_chat_agent_mode.png
│   │   ├── vsc_ghcp_chat_instruction_files.png
│   │   ├── vsc_ghcp_chat_thinking_tool.png
│   │   └── windsurf_tut.png
│   ├── INSTALLATION.md
│   ├── LIBRARY_USAGE.md
│   ├── local
│   │   ├── DEEP_DIVE_ANALYSIS_2025-10-02.md
│   │   ├── DEEP_DIVE_ANALYSIS_README.md
│   │   ├── Deep_dive_p1_p2.md
│   │   ├── integration-testing-plan.md
│   │   ├── integration-tests-phase1-summary.md
│   │   ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
│   │   ├── P0_IMPLEMENTATION_PLAN.md
│   │   └── TEMPLATE_MINING_ANALYSIS.md
│   ├── MCP_ESSENTIALS_README.md
│   ├── MCP_QUICK_START_GUIDE.md
│   ├── N8N_DEPLOYMENT.md
│   ├── RAILWAY_DEPLOYMENT.md
│   ├── README_CLAUDE_SETUP.md
│   ├── README.md
│   ├── tools-documentation-usage.md
│   ├── VS_CODE_PROJECT_SETUP.md
│   ├── WINDSURF_SETUP.md
│   └── workflow-diff-examples.md
├── examples
│   └── enhanced-documentation-demo.js
├── fetch_log.txt
├── LICENSE
├── MEMORY_N8N_UPDATE.md
├── MEMORY_TEMPLATE_UPDATE.md
├── monitor_fetch.sh
├── N8N_HTTP_STREAMABLE_SETUP.md
├── n8n-nodes.db
├── P0-R3-TEST-PLAN.md
├── package-lock.json
├── package.json
├── package.runtime.json
├── PRIVACY.md
├── railway.json
├── README.md
├── renovate.json
├── scripts
│   ├── analyze-optimization.sh
│   ├── audit-schema-coverage.ts
│   ├── build-optimized.sh
│   ├── compare-benchmarks.js
│   ├── demo-optimization.sh
│   ├── deploy-http.sh
│   ├── deploy-to-vm.sh
│   ├── export-webhook-workflows.ts
│   ├── extract-changelog.js
│   ├── extract-from-docker.js
│   ├── extract-nodes-docker.sh
│   ├── extract-nodes-simple.sh
│   ├── format-benchmark-results.js
│   ├── generate-benchmark-stub.js
│   ├── generate-detailed-reports.js
│   ├── generate-test-summary.js
│   ├── http-bridge.js
│   ├── mcp-http-client.js
│   ├── migrate-nodes-fts.ts
│   ├── migrate-tool-docs.ts
│   ├── n8n-docs-mcp.service
│   ├── nginx-n8n-mcp.conf
│   ├── prebuild-fts5.ts
│   ├── prepare-release.js
│   ├── publish-npm-quick.sh
│   ├── publish-npm.sh
│   ├── quick-test.ts
│   ├── run-benchmarks-ci.js
│   ├── sync-runtime-version.js
│   ├── test-ai-validation-debug.ts
│   ├── test-code-node-enhancements.ts
│   ├── test-code-node-fixes.ts
│   ├── test-docker-config.sh
│   ├── test-docker-fingerprint.ts
│   ├── test-docker-optimization.sh
│   ├── test-docker.sh
│   ├── test-empty-connection-validation.ts
│   ├── test-error-message-tracking.ts
│   ├── test-error-output-validation.ts
│   ├── test-error-validation.js
│   ├── test-essentials.ts
│   ├── test-expression-code-validation.ts
│   ├── test-expression-format-validation.js
│   ├── test-fts5-search.ts
│   ├── test-fuzzy-fix.ts
│   ├── test-fuzzy-simple.ts
│   ├── test-helpers-validation.ts
│   ├── test-http-search.ts
│   ├── test-http.sh
│   ├── test-jmespath-validation.ts
│   ├── test-multi-tenant-simple.ts
│   ├── test-multi-tenant.ts
│   ├── test-n8n-integration.sh
│   ├── test-node-info.js
│   ├── test-node-type-validation.ts
│   ├── test-nodes-base-prefix.ts
│   ├── test-operation-validation.ts
│   ├── test-optimized-docker.sh
│   ├── test-release-automation.js
│   ├── test-search-improvements.ts
│   ├── test-security.ts
│   ├── test-single-session.sh
│   ├── test-sqljs-triggers.ts
│   ├── test-telemetry-debug.ts
│   ├── test-telemetry-direct.ts
│   ├── test-telemetry-env.ts
│   ├── test-telemetry-integration.ts
│   ├── test-telemetry-no-select.ts
│   ├── test-telemetry-security.ts
│   ├── test-telemetry-simple.ts
│   ├── test-typeversion-validation.ts
│   ├── test-url-configuration.ts
│   ├── test-user-id-persistence.ts
│   ├── test-webhook-validation.ts
│   ├── test-workflow-insert.ts
│   ├── test-workflow-sanitizer.ts
│   ├── test-workflow-tracking-debug.ts
│   ├── update-and-publish-prep.sh
│   ├── update-n8n-deps.js
│   ├── update-readme-version.js
│   ├── vitest-benchmark-json-reporter.js
│   └── vitest-benchmark-reporter.ts
├── SECURITY.md
├── src
│   ├── config
│   │   └── n8n-api.ts
│   ├── data
│   │   └── canonical-ai-tool-examples.json
│   ├── database
│   │   ├── database-adapter.ts
│   │   ├── migrations
│   │   │   └── add-template-node-configs.sql
│   │   ├── node-repository.ts
│   │   ├── nodes.db
│   │   ├── schema-optimized.sql
│   │   └── schema.sql
│   ├── errors
│   │   └── validation-service-error.ts
│   ├── http-server-single-session.ts
│   ├── http-server.ts
│   ├── index.ts
│   ├── loaders
│   │   └── node-loader.ts
│   ├── mappers
│   │   └── docs-mapper.ts
│   ├── mcp
│   │   ├── handlers-n8n-manager.ts
│   │   ├── handlers-workflow-diff.ts
│   │   ├── index.ts
│   │   ├── server.ts
│   │   ├── stdio-wrapper.ts
│   │   ├── tool-docs
│   │   │   ├── configuration
│   │   │   │   ├── get-node-as-tool-info.ts
│   │   │   │   ├── get-node-documentation.ts
│   │   │   │   ├── get-node-essentials.ts
│   │   │   │   ├── get-node-info.ts
│   │   │   │   ├── get-property-dependencies.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── search-node-properties.ts
│   │   │   ├── discovery
│   │   │   │   ├── get-database-statistics.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-ai-tools.ts
│   │   │   │   ├── list-nodes.ts
│   │   │   │   └── search-nodes.ts
│   │   │   ├── guides
│   │   │   │   ├── ai-agents-guide.ts
│   │   │   │   └── index.ts
│   │   │   ├── index.ts
│   │   │   ├── system
│   │   │   │   ├── index.ts
│   │   │   │   ├── n8n-diagnostic.ts
│   │   │   │   ├── n8n-health-check.ts
│   │   │   │   ├── n8n-list-available-tools.ts
│   │   │   │   └── tools-documentation.ts
│   │   │   ├── templates
│   │   │   │   ├── get-template.ts
│   │   │   │   ├── get-templates-for-task.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-node-templates.ts
│   │   │   │   ├── list-tasks.ts
│   │   │   │   ├── search-templates-by-metadata.ts
│   │   │   │   └── search-templates.ts
│   │   │   ├── types.ts
│   │   │   ├── validation
│   │   │   │   ├── index.ts
│   │   │   │   ├── validate-node-minimal.ts
│   │   │   │   ├── validate-node-operation.ts
│   │   │   │   ├── validate-workflow-connections.ts
│   │   │   │   ├── validate-workflow-expressions.ts
│   │   │   │   └── validate-workflow.ts
│   │   │   └── workflow_management
│   │   │       ├── index.ts
│   │   │       ├── n8n-autofix-workflow.ts
│   │   │       ├── n8n-create-workflow.ts
│   │   │       ├── n8n-delete-execution.ts
│   │   │       ├── n8n-delete-workflow.ts
│   │   │       ├── n8n-get-execution.ts
│   │   │       ├── n8n-get-workflow-details.ts
│   │   │       ├── n8n-get-workflow-minimal.ts
│   │   │       ├── n8n-get-workflow-structure.ts
│   │   │       ├── n8n-get-workflow.ts
│   │   │       ├── n8n-list-executions.ts
│   │   │       ├── n8n-list-workflows.ts
│   │   │       ├── n8n-trigger-webhook-workflow.ts
│   │   │       ├── n8n-update-full-workflow.ts
│   │   │       ├── n8n-update-partial-workflow.ts
│   │   │       └── n8n-validate-workflow.ts
│   │   ├── tools-documentation.ts
│   │   ├── tools-n8n-friendly.ts
│   │   ├── tools-n8n-manager.ts
│   │   ├── tools.ts
│   │   └── workflow-examples.ts
│   ├── mcp-engine.ts
│   ├── mcp-tools-engine.ts
│   ├── n8n
│   │   ├── MCPApi.credentials.ts
│   │   └── MCPNode.node.ts
│   ├── parsers
│   │   ├── node-parser.ts
│   │   ├── property-extractor.ts
│   │   └── simple-parser.ts
│   ├── scripts
│   │   ├── debug-http-search.ts
│   │   ├── extract-from-docker.ts
│   │   ├── fetch-templates-robust.ts
│   │   ├── fetch-templates.ts
│   │   ├── rebuild-database.ts
│   │   ├── rebuild-optimized.ts
│   │   ├── rebuild.ts
│   │   ├── sanitize-templates.ts
│   │   ├── seed-canonical-ai-examples.ts
│   │   ├── test-autofix-documentation.ts
│   │   ├── test-autofix-workflow.ts
│   │   ├── test-execution-filtering.ts
│   │   ├── test-node-suggestions.ts
│   │   ├── test-protocol-negotiation.ts
│   │   ├── test-summary.ts
│   │   ├── test-webhook-autofix.ts
│   │   ├── validate.ts
│   │   └── validation-summary.ts
│   ├── services
│   │   ├── ai-node-validator.ts
│   │   ├── ai-tool-validators.ts
│   │   ├── confidence-scorer.ts
│   │   ├── config-validator.ts
│   │   ├── enhanced-config-validator.ts
│   │   ├── example-generator.ts
│   │   ├── execution-processor.ts
│   │   ├── expression-format-validator.ts
│   │   ├── expression-validator.ts
│   │   ├── n8n-api-client.ts
│   │   ├── n8n-validation.ts
│   │   ├── node-documentation-service.ts
│   │   ├── node-sanitizer.ts
│   │   ├── node-similarity-service.ts
│   │   ├── node-specific-validators.ts
│   │   ├── operation-similarity-service.ts
│   │   ├── property-dependencies.ts
│   │   ├── property-filter.ts
│   │   ├── resource-similarity-service.ts
│   │   ├── sqlite-storage-service.ts
│   │   ├── task-templates.ts
│   │   ├── universal-expression-validator.ts
│   │   ├── workflow-auto-fixer.ts
│   │   ├── workflow-diff-engine.ts
│   │   └── workflow-validator.ts
│   ├── telemetry
│   │   ├── batch-processor.ts
│   │   ├── config-manager.ts
│   │   ├── early-error-logger.ts
│   │   ├── error-sanitization-utils.ts
│   │   ├── error-sanitizer.ts
│   │   ├── event-tracker.ts
│   │   ├── event-validator.ts
│   │   ├── index.ts
│   │   ├── performance-monitor.ts
│   │   ├── rate-limiter.ts
│   │   ├── startup-checkpoints.ts
│   │   ├── telemetry-error.ts
│   │   ├── telemetry-manager.ts
│   │   ├── telemetry-types.ts
│   │   └── workflow-sanitizer.ts
│   ├── templates
│   │   ├── batch-processor.ts
│   │   ├── metadata-generator.ts
│   │   ├── README.md
│   │   ├── template-fetcher.ts
│   │   ├── template-repository.ts
│   │   └── template-service.ts
│   ├── types
│   │   ├── index.ts
│   │   ├── instance-context.ts
│   │   ├── n8n-api.ts
│   │   ├── node-types.ts
│   │   └── workflow-diff.ts
│   └── utils
│       ├── auth.ts
│       ├── bridge.ts
│       ├── cache-utils.ts
│       ├── console-manager.ts
│       ├── documentation-fetcher.ts
│       ├── enhanced-documentation-fetcher.ts
│       ├── error-handler.ts
│       ├── example-generator.ts
│       ├── fixed-collection-validator.ts
│       ├── logger.ts
│       ├── mcp-client.ts
│       ├── n8n-errors.ts
│       ├── node-source-extractor.ts
│       ├── node-type-normalizer.ts
│       ├── node-type-utils.ts
│       ├── node-utils.ts
│       ├── npm-version-checker.ts
│       ├── protocol-version.ts
│       ├── simple-cache.ts
│       ├── ssrf-protection.ts
│       ├── template-node-resolver.ts
│       ├── template-sanitizer.ts
│       ├── url-detector.ts
│       ├── validation-schemas.ts
│       └── version.ts
├── test-output.txt
├── test-reinit-fix.sh
├── tests
│   ├── __snapshots__
│   │   └── .gitkeep
│   ├── auth.test.ts
│   ├── benchmarks
│   │   ├── database-queries.bench.ts
│   │   ├── index.ts
│   │   ├── mcp-tools.bench.ts
│   │   ├── mcp-tools.bench.ts.disabled
│   │   ├── mcp-tools.bench.ts.skip
│   │   ├── node-loading.bench.ts.disabled
│   │   ├── README.md
│   │   ├── search-operations.bench.ts.disabled
│   │   └── validation-performance.bench.ts.disabled
│   ├── bridge.test.ts
│   ├── comprehensive-extraction-test.js
│   ├── data
│   │   └── .gitkeep
│   ├── debug-slack-doc.js
│   ├── demo-enhanced-documentation.js
│   ├── docker-tests-README.md
│   ├── error-handler.test.ts
│   ├── examples
│   │   └── using-database-utils.test.ts
│   ├── extracted-nodes-db
│   │   ├── database-import.json
│   │   ├── extraction-report.json
│   │   ├── insert-nodes.sql
│   │   ├── n8n-nodes-base__Airtable.json
│   │   ├── n8n-nodes-base__Discord.json
│   │   ├── n8n-nodes-base__Function.json
│   │   ├── n8n-nodes-base__HttpRequest.json
│   │   ├── n8n-nodes-base__If.json
│   │   ├── n8n-nodes-base__Slack.json
│   │   ├── n8n-nodes-base__SplitInBatches.json
│   │   └── n8n-nodes-base__Webhook.json
│   ├── factories
│   │   ├── node-factory.ts
│   │   └── property-definition-factory.ts
│   ├── fixtures
│   │   ├── .gitkeep
│   │   ├── database
│   │   │   └── test-nodes.json
│   │   ├── factories
│   │   │   ├── node.factory.ts
│   │   │   └── parser-node.factory.ts
│   │   └── template-configs.ts
│   ├── helpers
│   │   └── env-helpers.ts
│   ├── http-server-auth.test.ts
│   ├── integration
│   │   ├── ai-validation
│   │   │   ├── ai-agent-validation.test.ts
│   │   │   ├── ai-tool-validation.test.ts
│   │   │   ├── chat-trigger-validation.test.ts
│   │   │   ├── e2e-validation.test.ts
│   │   │   ├── helpers.ts
│   │   │   ├── llm-chain-validation.test.ts
│   │   │   ├── README.md
│   │   │   └── TEST_REPORT.md
│   │   ├── ci
│   │   │   └── database-population.test.ts
│   │   ├── database
│   │   │   ├── connection-management.test.ts
│   │   │   ├── empty-database.test.ts
│   │   │   ├── fts5-search.test.ts
│   │   │   ├── node-fts5-search.test.ts
│   │   │   ├── node-repository.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── sqljs-memory-leak.test.ts
│   │   │   ├── template-node-configs.test.ts
│   │   │   ├── template-repository.test.ts
│   │   │   ├── test-utils.ts
│   │   │   └── transactions.test.ts
│   │   ├── database-integration.test.ts
│   │   ├── docker
│   │   │   ├── docker-config.test.ts
│   │   │   ├── docker-entrypoint.test.ts
│   │   │   └── test-helpers.ts
│   │   ├── flexible-instance-config.test.ts
│   │   ├── mcp
│   │   │   └── template-examples-e2e.test.ts
│   │   ├── mcp-protocol
│   │   │   ├── basic-connection.test.ts
│   │   │   ├── error-handling.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── protocol-compliance.test.ts
│   │   │   ├── README.md
│   │   │   ├── session-management.test.ts
│   │   │   ├── test-helpers.ts
│   │   │   ├── tool-invocation.test.ts
│   │   │   └── workflow-error-validation.test.ts
│   │   ├── msw-setup.test.ts
│   │   ├── n8n-api
│   │   │   ├── executions
│   │   │   │   ├── delete-execution.test.ts
│   │   │   │   ├── get-execution.test.ts
│   │   │   │   ├── list-executions.test.ts
│   │   │   │   └── trigger-webhook.test.ts
│   │   │   ├── scripts
│   │   │   │   └── cleanup-orphans.ts
│   │   │   ├── system
│   │   │   │   ├── diagnostic.test.ts
│   │   │   │   ├── health-check.test.ts
│   │   │   │   └── list-tools.test.ts
│   │   │   ├── test-connection.ts
│   │   │   ├── types
│   │   │   │   └── mcp-responses.ts
│   │   │   ├── utils
│   │   │   │   ├── cleanup-helpers.ts
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── factories.ts
│   │   │   │   ├── fixtures.ts
│   │   │   │   ├── mcp-context.ts
│   │   │   │   ├── n8n-client.ts
│   │   │   │   ├── node-repository.ts
│   │   │   │   ├── response-types.ts
│   │   │   │   ├── test-context.ts
│   │   │   │   └── webhook-workflows.ts
│   │   │   └── workflows
│   │   │       ├── autofix-workflow.test.ts
│   │   │       ├── create-workflow.test.ts
│   │   │       ├── delete-workflow.test.ts
│   │   │       ├── get-workflow-details.test.ts
│   │   │       ├── get-workflow-minimal.test.ts
│   │   │       ├── get-workflow-structure.test.ts
│   │   │       ├── get-workflow.test.ts
│   │   │       ├── list-workflows.test.ts
│   │   │       ├── smart-parameters.test.ts
│   │   │       ├── update-partial-workflow.test.ts
│   │   │       ├── update-workflow.test.ts
│   │   │       └── validate-workflow.test.ts
│   │   ├── security
│   │   │   ├── command-injection-prevention.test.ts
│   │   │   └── rate-limiting.test.ts
│   │   ├── setup
│   │   │   ├── integration-setup.ts
│   │   │   └── msw-test-server.ts
│   │   ├── telemetry
│   │   │   ├── docker-user-id-stability.test.ts
│   │   │   └── mcp-telemetry.test.ts
│   │   ├── templates
│   │   │   └── metadata-operations.test.ts
│   │   └── workflow-creation-node-type-format.test.ts
│   ├── logger.test.ts
│   ├── MOCKING_STRATEGY.md
│   ├── mocks
│   │   ├── n8n-api
│   │   │   ├── data
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── executions.ts
│   │   │   │   └── workflows.ts
│   │   │   ├── handlers.ts
│   │   │   └── index.ts
│   │   └── README.md
│   ├── node-storage-export.json
│   ├── setup
│   │   ├── global-setup.ts
│   │   ├── msw-setup.ts
│   │   ├── TEST_ENV_DOCUMENTATION.md
│   │   └── test-env.ts
│   ├── test-database-extraction.js
│   ├── test-direct-extraction.js
│   ├── test-enhanced-documentation.js
│   ├── test-enhanced-integration.js
│   ├── test-mcp-extraction.js
│   ├── test-mcp-server-extraction.js
│   ├── test-mcp-tools-integration.js
│   ├── test-node-documentation-service.js
│   ├── test-node-list.js
│   ├── test-package-info.js
│   ├── test-parsing-operations.js
│   ├── test-slack-node-complete.js
│   ├── test-small-rebuild.js
│   ├── test-sqlite-search.js
│   ├── test-storage-system.js
│   ├── unit
│   │   ├── __mocks__
│   │   │   ├── n8n-nodes-base.test.ts
│   │   │   ├── n8n-nodes-base.ts
│   │   │   └── README.md
│   │   ├── database
│   │   │   ├── __mocks__
│   │   │   │   └── better-sqlite3.ts
│   │   │   ├── database-adapter-unit.test.ts
│   │   │   ├── node-repository-core.test.ts
│   │   │   ├── node-repository-operations.test.ts
│   │   │   ├── node-repository-outputs.test.ts
│   │   │   ├── README.md
│   │   │   └── template-repository-core.test.ts
│   │   ├── docker
│   │   │   ├── config-security.test.ts
│   │   │   ├── edge-cases.test.ts
│   │   │   ├── parse-config.test.ts
│   │   │   └── serve-command.test.ts
│   │   ├── errors
│   │   │   └── validation-service-error.test.ts
│   │   ├── examples
│   │   │   └── using-n8n-nodes-base-mock.test.ts
│   │   ├── flexible-instance-security-advanced.test.ts
│   │   ├── flexible-instance-security.test.ts
│   │   ├── http-server
│   │   │   └── multi-tenant-support.test.ts
│   │   ├── http-server-n8n-mode.test.ts
│   │   ├── http-server-n8n-reinit.test.ts
│   │   ├── http-server-session-management.test.ts
│   │   ├── loaders
│   │   │   └── node-loader.test.ts
│   │   ├── mappers
│   │   │   └── docs-mapper.test.ts
│   │   ├── mcp
│   │   │   ├── get-node-essentials-examples.test.ts
│   │   │   ├── handlers-n8n-manager-simple.test.ts
│   │   │   ├── handlers-n8n-manager.test.ts
│   │   │   ├── handlers-workflow-diff.test.ts
│   │   │   ├── lru-cache-behavior.test.ts
│   │   │   ├── multi-tenant-tool-listing.test.ts.disabled
│   │   │   ├── parameter-validation.test.ts
│   │   │   ├── search-nodes-examples.test.ts
│   │   │   ├── tools-documentation.test.ts
│   │   │   └── tools.test.ts
│   │   ├── monitoring
│   │   │   └── cache-metrics.test.ts
│   │   ├── MULTI_TENANT_TEST_COVERAGE.md
│   │   ├── multi-tenant-integration.test.ts
│   │   ├── parsers
│   │   │   ├── node-parser-outputs.test.ts
│   │   │   ├── node-parser.test.ts
│   │   │   ├── property-extractor.test.ts
│   │   │   └── simple-parser.test.ts
│   │   ├── scripts
│   │   │   └── fetch-templates-extraction.test.ts
│   │   ├── services
│   │   │   ├── ai-node-validator.test.ts
│   │   │   ├── ai-tool-validators.test.ts
│   │   │   ├── confidence-scorer.test.ts
│   │   │   ├── config-validator-basic.test.ts
│   │   │   ├── config-validator-edge-cases.test.ts
│   │   │   ├── config-validator-node-specific.test.ts
│   │   │   ├── config-validator-security.test.ts
│   │   │   ├── debug-validator.test.ts
│   │   │   ├── enhanced-config-validator-integration.test.ts
│   │   │   ├── enhanced-config-validator-operations.test.ts
│   │   │   ├── enhanced-config-validator.test.ts
│   │   │   ├── example-generator.test.ts
│   │   │   ├── execution-processor.test.ts
│   │   │   ├── expression-format-validator.test.ts
│   │   │   ├── expression-validator-edge-cases.test.ts
│   │   │   ├── expression-validator.test.ts
│   │   │   ├── fixed-collection-validation.test.ts
│   │   │   ├── loop-output-edge-cases.test.ts
│   │   │   ├── n8n-api-client.test.ts
│   │   │   ├── n8n-validation.test.ts
│   │   │   ├── node-sanitizer.test.ts
│   │   │   ├── node-similarity-service.test.ts
│   │   │   ├── node-specific-validators.test.ts
│   │   │   ├── operation-similarity-service-comprehensive.test.ts
│   │   │   ├── operation-similarity-service.test.ts
│   │   │   ├── property-dependencies.test.ts
│   │   │   ├── property-filter-edge-cases.test.ts
│   │   │   ├── property-filter.test.ts
│   │   │   ├── resource-similarity-service-comprehensive.test.ts
│   │   │   ├── resource-similarity-service.test.ts
│   │   │   ├── task-templates.test.ts
│   │   │   ├── template-service.test.ts
│   │   │   ├── universal-expression-validator.test.ts
│   │   │   ├── validation-fixes.test.ts
│   │   │   ├── workflow-auto-fixer.test.ts
│   │   │   ├── workflow-diff-engine.test.ts
│   │   │   ├── workflow-fixed-collection-validation.test.ts
│   │   │   ├── workflow-validator-comprehensive.test.ts
│   │   │   ├── workflow-validator-edge-cases.test.ts
│   │   │   ├── workflow-validator-error-outputs.test.ts
│   │   │   ├── workflow-validator-expression-format.test.ts
│   │   │   ├── workflow-validator-loops-simple.test.ts
│   │   │   ├── workflow-validator-loops.test.ts
│   │   │   ├── workflow-validator-mocks.test.ts
│   │   │   ├── workflow-validator-performance.test.ts
│   │   │   ├── workflow-validator-with-mocks.test.ts
│   │   │   └── workflow-validator.test.ts
│   │   ├── telemetry
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── config-manager.test.ts
│   │   │   ├── event-tracker.test.ts
│   │   │   ├── event-validator.test.ts
│   │   │   ├── rate-limiter.test.ts
│   │   │   ├── telemetry-error.test.ts
│   │   │   ├── telemetry-manager.test.ts
│   │   │   ├── v2.18.3-fixes-verification.test.ts
│   │   │   └── workflow-sanitizer.test.ts
│   │   ├── templates
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── metadata-generator.test.ts
│   │   │   ├── template-repository-metadata.test.ts
│   │   │   └── template-repository-security.test.ts
│   │   ├── test-env-example.test.ts
│   │   ├── test-infrastructure.test.ts
│   │   ├── types
│   │   │   ├── instance-context-coverage.test.ts
│   │   │   └── instance-context-multi-tenant.test.ts
│   │   ├── utils
│   │   │   ├── auth-timing-safe.test.ts
│   │   │   ├── cache-utils.test.ts
│   │   │   ├── console-manager.test.ts
│   │   │   ├── database-utils.test.ts
│   │   │   ├── fixed-collection-validator.test.ts
│   │   │   ├── n8n-errors.test.ts
│   │   │   ├── node-type-normalizer.test.ts
│   │   │   ├── node-type-utils.test.ts
│   │   │   ├── node-utils.test.ts
│   │   │   ├── simple-cache-memory-leak-fix.test.ts
│   │   │   ├── ssrf-protection.test.ts
│   │   │   └── template-node-resolver.test.ts
│   │   └── validation-fixes.test.ts
│   └── utils
│       ├── assertions.ts
│       ├── builders
│       │   └── workflow.builder.ts
│       ├── data-generators.ts
│       ├── database-utils.ts
│       ├── README.md
│       └── test-helpers.ts
├── thumbnail.png
├── tsconfig.build.json
├── tsconfig.json
├── types
│   ├── mcp.d.ts
│   └── test-env.d.ts
├── verify-telemetry-fix.js
├── versioned-nodes.md
├── vitest.config.benchmark.ts
├── vitest.config.integration.ts
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/scripts/generate-detailed-reports.js:
--------------------------------------------------------------------------------

```javascript
  1 | #!/usr/bin/env node
  2 | import { readFileSync, writeFileSync, existsSync, mkdirSync } from 'fs';
  3 | import { resolve, dirname } from 'path';
  4 | 
  5 | /**
  6 |  * Generate detailed test reports in multiple formats
  7 |  */
  8 | class TestReportGenerator {
  9 |   constructor() {
 10 |     this.results = {
 11 |       tests: null,
 12 |       coverage: null,
 13 |       benchmarks: null,
 14 |       metadata: {
 15 |         timestamp: new Date().toISOString(),
 16 |         repository: process.env.GITHUB_REPOSITORY || 'n8n-mcp',
 17 |         sha: process.env.GITHUB_SHA || 'unknown',
 18 |         branch: process.env.GITHUB_REF || 'unknown',
 19 |         runId: process.env.GITHUB_RUN_ID || 'local',
 20 |         runNumber: process.env.GITHUB_RUN_NUMBER || '0',
 21 |       }
 22 |     };
 23 |   }
 24 | 
 25 |   loadTestResults() {
 26 |     const testResultPath = resolve(process.cwd(), 'test-results/results.json');
 27 |     if (existsSync(testResultPath)) {
 28 |       try {
 29 |         const data = JSON.parse(readFileSync(testResultPath, 'utf-8'));
 30 |         this.results.tests = this.processTestResults(data);
 31 |       } catch (error) {
 32 |         console.error('Error loading test results:', error);
 33 |       }
 34 |     }
 35 |   }
 36 | 
 37 |   processTestResults(data) {
 38 |     const processedResults = {
 39 |       summary: {
 40 |         total: data.numTotalTests || 0,
 41 |         passed: data.numPassedTests || 0,
 42 |         failed: data.numFailedTests || 0,
 43 |         skipped: data.numSkippedTests || 0,
 44 |         duration: data.duration || 0,
 45 |         success: (data.numFailedTests || 0) === 0
 46 |       },
 47 |       testSuites: [],
 48 |       failedTests: []
 49 |     };
 50 | 
 51 |     // Process test suites
 52 |     if (data.testResults) {
 53 |       for (const suite of data.testResults) {
 54 |         const suiteInfo = {
 55 |           name: suite.name,
 56 |           duration: suite.duration || 0,
 57 |           tests: {
 58 |             total: suite.numPassingTests + suite.numFailingTests + suite.numPendingTests,
 59 |             passed: suite.numPassingTests || 0,
 60 |             failed: suite.numFailingTests || 0,
 61 |             skipped: suite.numPendingTests || 0
 62 |           },
 63 |           status: suite.numFailingTests === 0 ? 'passed' : 'failed'
 64 |         };
 65 | 
 66 |         processedResults.testSuites.push(suiteInfo);
 67 | 
 68 |         // Collect failed tests
 69 |         if (suite.testResults) {
 70 |           for (const test of suite.testResults) {
 71 |             if (test.status === 'failed') {
 72 |               processedResults.failedTests.push({
 73 |                 suite: suite.name,
 74 |                 test: test.title,
 75 |                 duration: test.duration || 0,
 76 |                 error: test.failureMessages ? test.failureMessages.join('\n') : 'Unknown error'
 77 |               });
 78 |             }
 79 |           }
 80 |         }
 81 |       }
 82 |     }
 83 | 
 84 |     return processedResults;
 85 |   }
 86 | 
 87 |   loadCoverageResults() {
 88 |     const coveragePath = resolve(process.cwd(), 'coverage/coverage-summary.json');
 89 |     if (existsSync(coveragePath)) {
 90 |       try {
 91 |         const data = JSON.parse(readFileSync(coveragePath, 'utf-8'));
 92 |         this.results.coverage = this.processCoverageResults(data);
 93 |       } catch (error) {
 94 |         console.error('Error loading coverage results:', error);
 95 |       }
 96 |     }
 97 |   }
 98 | 
 99 |   processCoverageResults(data) {
100 |     const coverage = {
101 |       summary: {
102 |         lines: data.total.lines.pct,
103 |         statements: data.total.statements.pct,
104 |         functions: data.total.functions.pct,
105 |         branches: data.total.branches.pct,
106 |         average: 0
107 |       },
108 |       files: []
109 |     };
110 | 
111 |     // Calculate average
112 |     coverage.summary.average = (
113 |       coverage.summary.lines +
114 |       coverage.summary.statements +
115 |       coverage.summary.functions +
116 |       coverage.summary.branches
117 |     ) / 4;
118 | 
119 |     // Process file coverage
120 |     for (const [filePath, fileData] of Object.entries(data)) {
121 |       if (filePath !== 'total') {
122 |         coverage.files.push({
123 |           path: filePath,
124 |           lines: fileData.lines.pct,
125 |           statements: fileData.statements.pct,
126 |           functions: fileData.functions.pct,
127 |           branches: fileData.branches.pct,
128 |           uncoveredLines: fileData.lines.total - fileData.lines.covered
129 |         });
130 |       }
131 |     }
132 | 
133 |     // Sort files by coverage (lowest first)
134 |     coverage.files.sort((a, b) => a.lines - b.lines);
135 | 
136 |     return coverage;
137 |   }
138 | 
139 |   loadBenchmarkResults() {
140 |     const benchmarkPath = resolve(process.cwd(), 'benchmark-results.json');
141 |     if (existsSync(benchmarkPath)) {
142 |       try {
143 |         const data = JSON.parse(readFileSync(benchmarkPath, 'utf-8'));
144 |         this.results.benchmarks = this.processBenchmarkResults(data);
145 |       } catch (error) {
146 |         console.error('Error loading benchmark results:', error);
147 |       }
148 |     }
149 |   }
150 | 
151 |   processBenchmarkResults(data) {
152 |     const benchmarks = {
153 |       timestamp: data.timestamp,
154 |       results: []
155 |     };
156 | 
157 |     for (const file of data.files || []) {
158 |       for (const group of file.groups || []) {
159 |         for (const benchmark of group.benchmarks || []) {
160 |           benchmarks.results.push({
161 |             file: file.filepath,
162 |             group: group.name,
163 |             name: benchmark.name,
164 |             ops: benchmark.result.hz,
165 |             mean: benchmark.result.mean,
166 |             min: benchmark.result.min,
167 |             max: benchmark.result.max,
168 |             p75: benchmark.result.p75,
169 |             p99: benchmark.result.p99,
170 |             samples: benchmark.result.samples
171 |           });
172 |         }
173 |       }
174 |     }
175 | 
176 |     // Sort by ops/sec (highest first)
177 |     benchmarks.results.sort((a, b) => b.ops - a.ops);
178 | 
179 |     return benchmarks;
180 |   }
181 | 
182 |   generateMarkdownReport() {
183 |     let report = '# n8n-mcp Test Report\n\n';
184 |     report += `Generated: ${this.results.metadata.timestamp}\n\n`;
185 |     
186 |     // Metadata
187 |     report += '## Build Information\n\n';
188 |     report += `- **Repository**: ${this.results.metadata.repository}\n`;
189 |     report += `- **Commit**: ${this.results.metadata.sha.substring(0, 7)}\n`;
190 |     report += `- **Branch**: ${this.results.metadata.branch}\n`;
191 |     report += `- **Run**: #${this.results.metadata.runNumber}\n\n`;
192 | 
193 |     // Test Results
194 |     if (this.results.tests) {
195 |       const { summary, testSuites, failedTests } = this.results.tests;
196 |       const emoji = summary.success ? '✅' : '❌';
197 |       
198 |       report += `## ${emoji} Test Results\n\n`;
199 |       report += `### Summary\n\n`;
200 |       report += `- **Total Tests**: ${summary.total}\n`;
201 |       report += `- **Passed**: ${summary.passed} (${((summary.passed / summary.total) * 100).toFixed(1)}%)\n`;
202 |       report += `- **Failed**: ${summary.failed}\n`;
203 |       report += `- **Skipped**: ${summary.skipped}\n`;
204 |       report += `- **Duration**: ${(summary.duration / 1000).toFixed(2)}s\n\n`;
205 | 
206 |       // Test Suites
207 |       if (testSuites.length > 0) {
208 |         report += '### Test Suites\n\n';
209 |         report += '| Suite | Status | Tests | Duration |\n';
210 |         report += '|-------|--------|-------|----------|\n';
211 |         
212 |         for (const suite of testSuites) {
213 |           const status = suite.status === 'passed' ? '✅' : '❌';
214 |           const tests = `${suite.tests.passed}/${suite.tests.total}`;
215 |           const duration = `${(suite.duration / 1000).toFixed(2)}s`;
216 |           report += `| ${suite.name} | ${status} | ${tests} | ${duration} |\n`;
217 |         }
218 |         report += '\n';
219 |       }
220 | 
221 |       // Failed Tests
222 |       if (failedTests.length > 0) {
223 |         report += '### Failed Tests\n\n';
224 |         for (const failed of failedTests) {
225 |           report += `#### ${failed.suite} > ${failed.test}\n\n`;
226 |           report += '```\n';
227 |           report += failed.error;
228 |           report += '\n```\n\n';
229 |         }
230 |       }
231 |     }
232 | 
233 |     // Coverage Results
234 |     if (this.results.coverage) {
235 |       const { summary, files } = this.results.coverage;
236 |       const emoji = summary.average >= 80 ? '✅' : summary.average >= 60 ? '⚠️' : '❌';
237 |       
238 |       report += `## ${emoji} Coverage Report\n\n`;
239 |       report += '### Summary\n\n';
240 |       report += `- **Lines**: ${summary.lines.toFixed(2)}%\n`;
241 |       report += `- **Statements**: ${summary.statements.toFixed(2)}%\n`;
242 |       report += `- **Functions**: ${summary.functions.toFixed(2)}%\n`;
243 |       report += `- **Branches**: ${summary.branches.toFixed(2)}%\n`;
244 |       report += `- **Average**: ${summary.average.toFixed(2)}%\n\n`;
245 | 
246 |       // Files with low coverage
247 |       const lowCoverageFiles = files.filter(f => f.lines < 80).slice(0, 10);
248 |       if (lowCoverageFiles.length > 0) {
249 |         report += '### Files with Low Coverage\n\n';
250 |         report += '| File | Lines | Uncovered Lines |\n';
251 |         report += '|------|-------|----------------|\n';
252 |         
253 |         for (const file of lowCoverageFiles) {
254 |           const fileName = file.path.split('/').pop();
255 |           report += `| ${fileName} | ${file.lines.toFixed(1)}% | ${file.uncoveredLines} |\n`;
256 |         }
257 |         report += '\n';
258 |       }
259 |     }
260 | 
261 |     // Benchmark Results
262 |     if (this.results.benchmarks && this.results.benchmarks.results.length > 0) {
263 |       report += '## ⚡ Benchmark Results\n\n';
264 |       report += '### Top Performers\n\n';
265 |       report += '| Benchmark | Ops/sec | Mean (ms) | Samples |\n';
266 |       report += '|-----------|---------|-----------|----------|\n';
267 |       
268 |       for (const bench of this.results.benchmarks.results.slice(0, 10)) {
269 |         const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
270 |         const meanFormatted = (bench.mean * 1000).toFixed(3);
271 |         report += `| ${bench.name} | ${opsFormatted} | ${meanFormatted} | ${bench.samples} |\n`;
272 |       }
273 |       report += '\n';
274 |     }
275 | 
276 |     return report;
277 |   }
278 | 
279 |   generateJsonReport() {
280 |     return JSON.stringify(this.results, null, 2);
281 |   }
282 | 
283 |   generateHtmlReport() {
284 |     const htmlTemplate = `<!DOCTYPE html>
285 | <html lang="en">
286 | <head>
287 |     <meta charset="UTF-8">
288 |     <meta name="viewport" content="width=device-width, initial-scale=1.0">
289 |     <title>n8n-mcp Test Report</title>
290 |     <style>
291 |         body {
292 |             font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
293 |             line-height: 1.6;
294 |             color: #333;
295 |             max-width: 1200px;
296 |             margin: 0 auto;
297 |             padding: 20px;
298 |             background-color: #f5f5f5;
299 |         }
300 |         .header {
301 |             background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
302 |             color: white;
303 |             padding: 30px;
304 |             border-radius: 10px;
305 |             margin-bottom: 30px;
306 |         }
307 |         .header h1 {
308 |             margin: 0 0 10px 0;
309 |             font-size: 2.5em;
310 |         }
311 |         .metadata {
312 |             opacity: 0.9;
313 |             font-size: 0.9em;
314 |         }
315 |         .section {
316 |             background: white;
317 |             padding: 25px;
318 |             margin-bottom: 20px;
319 |             border-radius: 10px;
320 |             box-shadow: 0 2px 10px rgba(0,0,0,0.1);
321 |         }
322 |         .section h2 {
323 |             margin-top: 0;
324 |             color: #333;
325 |             border-bottom: 2px solid #eee;
326 |             padding-bottom: 10px;
327 |         }
328 |         .stats {
329 |             display: grid;
330 |             grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
331 |             gap: 20px;
332 |             margin: 20px 0;
333 |         }
334 |         .stat-card {
335 |             background: #f8f9fa;
336 |             padding: 20px;
337 |             border-radius: 8px;
338 |             text-align: center;
339 |             border: 1px solid #e9ecef;
340 |         }
341 |         .stat-card .value {
342 |             font-size: 2em;
343 |             font-weight: bold;
344 |             color: #667eea;
345 |         }
346 |         .stat-card .label {
347 |             color: #666;
348 |             font-size: 0.9em;
349 |             margin-top: 5px;
350 |         }
351 |         table {
352 |             width: 100%;
353 |             border-collapse: collapse;
354 |             margin: 20px 0;
355 |         }
356 |         th, td {
357 |             padding: 12px;
358 |             text-align: left;
359 |             border-bottom: 1px solid #ddd;
360 |         }
361 |         th {
362 |             background-color: #f8f9fa;
363 |             font-weight: 600;
364 |             color: #495057;
365 |         }
366 |         tr:hover {
367 |             background-color: #f8f9fa;
368 |         }
369 |         .success { color: #28a745; }
370 |         .warning { color: #ffc107; }
371 |         .danger { color: #dc3545; }
372 |         .failed-test {
373 |             background-color: #fff5f5;
374 |             border: 1px solid #feb2b2;
375 |             border-radius: 5px;
376 |             padding: 15px;
377 |             margin: 10px 0;
378 |         }
379 |         .failed-test h4 {
380 |             margin: 0 0 10px 0;
381 |             color: #c53030;
382 |         }
383 |         .error-message {
384 |             background-color: #1a202c;
385 |             color: #e2e8f0;
386 |             padding: 15px;
387 |             border-radius: 5px;
388 |             font-family: 'Courier New', monospace;
389 |             font-size: 0.9em;
390 |             overflow-x: auto;
391 |         }
392 |         .progress-bar {
393 |             width: 100%;
394 |             height: 20px;
395 |             background-color: #e9ecef;
396 |             border-radius: 10px;
397 |             overflow: hidden;
398 |             margin: 10px 0;
399 |         }
400 |         .progress-fill {
401 |             height: 100%;
402 |             background: linear-gradient(90deg, #28a745 0%, #20c997 100%);
403 |             transition: width 0.3s ease;
404 |         }
405 |         .coverage-low { background: linear-gradient(90deg, #dc3545 0%, #f86734 100%); }
406 |         .coverage-medium { background: linear-gradient(90deg, #ffc107 0%, #ffb347 100%); }
407 |     </style>
408 | </head>
409 | <body>
410 |     <div class="header">
411 |         <h1>n8n-mcp Test Report</h1>
412 |         <div class="metadata">
413 |             <div>Repository: ${this.results.metadata.repository}</div>
414 |             <div>Commit: ${this.results.metadata.sha.substring(0, 7)}</div>
415 |             <div>Run: #${this.results.metadata.runNumber}</div>
416 |             <div>Generated: ${new Date(this.results.metadata.timestamp).toLocaleString()}</div>
417 |         </div>
418 |     </div>
419 |     
420 |     ${this.generateTestResultsHtml()}
421 |     ${this.generateCoverageHtml()}
422 |     ${this.generateBenchmarkHtml()}
423 | </body>
424 | </html>`;
425 | 
426 |     return htmlTemplate;
427 |   }
428 | 
429 |   generateTestResultsHtml() {
430 |     if (!this.results.tests) return '';
431 |     
432 |     const { summary, testSuites, failedTests } = this.results.tests;
433 |     const successRate = ((summary.passed / summary.total) * 100).toFixed(1);
434 |     const statusClass = summary.success ? 'success' : 'danger';
435 |     const statusIcon = summary.success ? '✅' : '❌';
436 | 
437 |     let html = `
438 |     <div class="section">
439 |         <h2>${statusIcon} Test Results</h2>
440 |         <div class="stats">
441 |             <div class="stat-card">
442 |                 <div class="value">${summary.total}</div>
443 |                 <div class="label">Total Tests</div>
444 |             </div>
445 |             <div class="stat-card">
446 |                 <div class="value ${statusClass}">${summary.passed}</div>
447 |                 <div class="label">Passed</div>
448 |             </div>
449 |             <div class="stat-card">
450 |                 <div class="value ${summary.failed > 0 ? 'danger' : ''}">${summary.failed}</div>
451 |                 <div class="label">Failed</div>
452 |             </div>
453 |             <div class="stat-card">
454 |                 <div class="value">${successRate}%</div>
455 |                 <div class="label">Success Rate</div>
456 |             </div>
457 |             <div class="stat-card">
458 |                 <div class="value">${(summary.duration / 1000).toFixed(1)}s</div>
459 |                 <div class="label">Duration</div>
460 |             </div>
461 |         </div>`;
462 | 
463 |     if (testSuites.length > 0) {
464 |       html += `
465 |         <h3>Test Suites</h3>
466 |         <table>
467 |             <thead>
468 |                 <tr>
469 |                     <th>Suite</th>
470 |                     <th>Status</th>
471 |                     <th>Tests</th>
472 |                     <th>Duration</th>
473 |                 </tr>
474 |             </thead>
475 |             <tbody>`;
476 |       
477 |       for (const suite of testSuites) {
478 |         const status = suite.status === 'passed' ? '✅' : '❌';
479 |         const statusClass = suite.status === 'passed' ? 'success' : 'danger';
480 |         html += `
481 |                 <tr>
482 |                     <td>${suite.name}</td>
483 |                     <td class="${statusClass}">${status}</td>
484 |                     <td>${suite.tests.passed}/${suite.tests.total}</td>
485 |                     <td>${(suite.duration / 1000).toFixed(2)}s</td>
486 |                 </tr>`;
487 |       }
488 |       
489 |       html += `
490 |             </tbody>
491 |         </table>`;
492 |     }
493 | 
494 |     if (failedTests.length > 0) {
495 |       html += `
496 |         <h3>Failed Tests</h3>`;
497 |       
498 |       for (const failed of failedTests) {
499 |         html += `
500 |         <div class="failed-test">
501 |             <h4>${failed.suite} > ${failed.test}</h4>
502 |             <div class="error-message">${this.escapeHtml(failed.error)}</div>
503 |         </div>`;
504 |       }
505 |     }
506 | 
507 |     html += `</div>`;
508 |     return html;
509 |   }
510 | 
511 |   generateCoverageHtml() {
512 |     if (!this.results.coverage) return '';
513 |     
514 |     const { summary, files } = this.results.coverage;
515 |     const coverageClass = summary.average >= 80 ? 'success' : summary.average >= 60 ? 'warning' : 'danger';
516 |     const progressClass = summary.average >= 80 ? '' : summary.average >= 60 ? 'coverage-medium' : 'coverage-low';
517 | 
518 |     let html = `
519 |     <div class="section">
520 |         <h2>📊 Coverage Report</h2>
521 |         <div class="stats">
522 |             <div class="stat-card">
523 |                 <div class="value ${coverageClass}">${summary.average.toFixed(1)}%</div>
524 |                 <div class="label">Average Coverage</div>
525 |             </div>
526 |             <div class="stat-card">
527 |                 <div class="value">${summary.lines.toFixed(1)}%</div>
528 |                 <div class="label">Lines</div>
529 |             </div>
530 |             <div class="stat-card">
531 |                 <div class="value">${summary.statements.toFixed(1)}%</div>
532 |                 <div class="label">Statements</div>
533 |             </div>
534 |             <div class="stat-card">
535 |                 <div class="value">${summary.functions.toFixed(1)}%</div>
536 |                 <div class="label">Functions</div>
537 |             </div>
538 |             <div class="stat-card">
539 |                 <div class="value">${summary.branches.toFixed(1)}%</div>
540 |                 <div class="label">Branches</div>
541 |             </div>
542 |         </div>
543 |         
544 |         <div class="progress-bar">
545 |             <div class="progress-fill ${progressClass}" style="width: ${summary.average}%"></div>
546 |         </div>`;
547 | 
548 |     const lowCoverageFiles = files.filter(f => f.lines < 80).slice(0, 10);
549 |     if (lowCoverageFiles.length > 0) {
550 |       html += `
551 |         <h3>Files with Low Coverage</h3>
552 |         <table>
553 |             <thead>
554 |                 <tr>
555 |                     <th>File</th>
556 |                     <th>Lines</th>
557 |                     <th>Statements</th>
558 |                     <th>Functions</th>
559 |                     <th>Branches</th>
560 |                 </tr>
561 |             </thead>
562 |             <tbody>`;
563 |       
564 |       for (const file of lowCoverageFiles) {
565 |         const fileName = file.path.split('/').pop();
566 |         html += `
567 |                 <tr>
568 |                     <td>${fileName}</td>
569 |                     <td class="${file.lines < 50 ? 'danger' : file.lines < 80 ? 'warning' : ''}">${file.lines.toFixed(1)}%</td>
570 |                     <td>${file.statements.toFixed(1)}%</td>
571 |                     <td>${file.functions.toFixed(1)}%</td>
572 |                     <td>${file.branches.toFixed(1)}%</td>
573 |                 </tr>`;
574 |       }
575 |       
576 |       html += `
577 |             </tbody>
578 |         </table>`;
579 |     }
580 | 
581 |     html += `</div>`;
582 |     return html;
583 |   }
584 | 
585 |   generateBenchmarkHtml() {
586 |     if (!this.results.benchmarks || this.results.benchmarks.results.length === 0) return '';
587 |     
588 |     let html = `
589 |     <div class="section">
590 |         <h2>⚡ Benchmark Results</h2>
591 |         <table>
592 |             <thead>
593 |                 <tr>
594 |                     <th>Benchmark</th>
595 |                     <th>Operations/sec</th>
596 |                     <th>Mean Time (ms)</th>
597 |                     <th>Min (ms)</th>
598 |                     <th>Max (ms)</th>
599 |                     <th>Samples</th>
600 |                 </tr>
601 |             </thead>
602 |             <tbody>`;
603 |     
604 |     for (const bench of this.results.benchmarks.results.slice(0, 20)) {
605 |       const opsFormatted = bench.ops.toLocaleString('en-US', { maximumFractionDigits: 0 });
606 |       const meanFormatted = (bench.mean * 1000).toFixed(3);
607 |       const minFormatted = (bench.min * 1000).toFixed(3);
608 |       const maxFormatted = (bench.max * 1000).toFixed(3);
609 |       
610 |       html += `
611 |                 <tr>
612 |                     <td>${bench.name}</td>
613 |                     <td><strong>${opsFormatted}</strong></td>
614 |                     <td>${meanFormatted}</td>
615 |                     <td>${minFormatted}</td>
616 |                     <td>${maxFormatted}</td>
617 |                     <td>${bench.samples}</td>
618 |                 </tr>`;
619 |     }
620 |     
621 |     html += `
622 |             </tbody>
623 |         </table>`;
624 |     
625 |     if (this.results.benchmarks.results.length > 20) {
626 |       html += `<p><em>Showing top 20 of ${this.results.benchmarks.results.length} benchmarks</em></p>`;
627 |     }
628 |     
629 |     html += `</div>`;
630 |     return html;
631 |   }
632 | 
633 |   escapeHtml(text) {
634 |     const map = {
635 |       '&': '&amp;',
636 |       '<': '&lt;',
637 |       '>': '&gt;',
638 |       '"': '&quot;',
639 |       "'": '&#039;'
640 |     };
641 |     return text.replace(/[&<>"']/g, m => map[m]);
642 |   }
643 | 
644 |   async generate() {
645 |     // Load all results
646 |     this.loadTestResults();
647 |     this.loadCoverageResults();
648 |     this.loadBenchmarkResults();
649 | 
650 |     // Ensure output directory exists
651 |     const outputDir = resolve(process.cwd(), 'test-reports');
652 |     if (!existsSync(outputDir)) {
653 |       mkdirSync(outputDir, { recursive: true });
654 |     }
655 | 
656 |     // Generate reports in different formats
657 |     const markdownReport = this.generateMarkdownReport();
658 |     const jsonReport = this.generateJsonReport();
659 |     const htmlReport = this.generateHtmlReport();
660 | 
661 |     // Write reports
662 |     writeFileSync(resolve(outputDir, 'report.md'), markdownReport);
663 |     writeFileSync(resolve(outputDir, 'report.json'), jsonReport);
664 |     writeFileSync(resolve(outputDir, 'report.html'), htmlReport);
665 | 
666 |     console.log('Test reports generated successfully:');
667 |     console.log('- test-reports/report.md');
668 |     console.log('- test-reports/report.json');
669 |     console.log('- test-reports/report.html');
670 |   }
671 | }
672 | 
673 | // Run the generator
674 | const generator = new TestReportGenerator();
675 | generator.generate().catch(console.error);
```

--------------------------------------------------------------------------------
/docs/N8N_DEPLOYMENT.md:
--------------------------------------------------------------------------------

```markdown
  1 | # n8n-MCP Deployment Guide
  2 | 
  3 | This guide covers how to deploy n8n-MCP and connect it to your n8n instance. Whether you're testing locally or deploying to production, we'll show you how to set up n8n-MCP for use with n8n's MCP Client Tool node.
  4 | 
  5 | ## Table of Contents
  6 | - [Overview](#overview)
  7 | - [Local Testing](#local-testing)
  8 | - [Production Deployment](#production-deployment)
  9 |   - [Same Server as n8n](#same-server-as-n8n)
 10 |   - [Different Server (Cloud Deployment)](#different-server-cloud-deployment)
 11 | - [Connecting n8n to n8n-MCP](#connecting-n8n-to-n8n-mcp)
 12 | - [Security & Best Practices](#security--best-practices)
 13 | - [Troubleshooting](#troubleshooting)
 14 | 
 15 | ## Overview
 16 | 
 17 | n8n-MCP is a Model Context Protocol server that provides AI assistants with comprehensive access to n8n node documentation and management capabilities. When connected to n8n via the MCP Client Tool node, it enables:
 18 | - AI-powered workflow creation and validation
 19 | - Access to documentation for 500+ n8n nodes
 20 | - Workflow management through the n8n API
 21 | - Real-time configuration validation
 22 | 
 23 | ## Local Testing
 24 | 
 25 | ### Quick Test Script
 26 | 
 27 | Test n8n-MCP locally with the provided test script:
 28 | 
 29 | ```bash
 30 | # Clone the repository
 31 | git clone https://github.com/czlonkowski/n8n-mcp.git
 32 | cd n8n-mcp
 33 | 
 34 | # Build the project
 35 | npm install
 36 | npm run build
 37 | 
 38 | # Run the integration test script
 39 | ./scripts/test-n8n-integration.sh
 40 | ```
 41 | 
 42 | This script will:
 43 | 1. Start a real n8n instance in Docker
 44 | 2. Start n8n-MCP server configured for n8n
 45 | 3. Guide you through API key setup for workflow management
 46 | 4. Test the complete integration between n8n and n8n-MCP
 47 | 
 48 | ### Manual Local Setup
 49 | 
 50 | For development or custom testing:
 51 | 
 52 | 1. **Prerequisites**:
 53 |    - n8n instance running (local or remote)
 54 |    - n8n API key (from n8n Settings → API)
 55 | 
 56 | 2. **Start n8n-MCP**:
 57 | ```bash
 58 | # Set environment variables
 59 | export N8N_MODE=true
 60 | export MCP_MODE=http                       # Required for HTTP mode
 61 | export N8N_API_URL=http://localhost:5678  # Your n8n instance URL
 62 | export N8N_API_KEY=your-api-key-here       # Your n8n API key
 63 | export MCP_AUTH_TOKEN=test-token-minimum-32-chars-long
 64 | export AUTH_TOKEN=test-token-minimum-32-chars-long  # Same value as MCP_AUTH_TOKEN
 65 | export PORT=3001
 66 | 
 67 | # Start the server
 68 | npm start
 69 | ```
 70 | 
 71 | 3. **Verify it's running**:
 72 | ```bash
 73 | # Check health
 74 | curl http://localhost:3001/health
 75 | 
 76 | # Check MCP protocol endpoint (this is the endpoint n8n connects to)
 77 | curl http://localhost:3001/mcp
 78 | # Should return: {"protocolVersion":"2024-11-05"} for n8n compatibility
 79 | ```
 80 | 
 81 | ## Environment Variables Reference
 82 | 
 83 | | Variable | Required | Description | Example Value |
 84 | |----------|----------|-------------|---------------|
 85 | | `N8N_MODE` | Yes | Enables n8n integration mode | `true` |
 86 | | `MCP_MODE` | Yes | Enables HTTP mode for n8n MCP Client | `http` |
 87 | | `N8N_API_URL` | Yes* | URL of your n8n instance | `http://localhost:5678` |
 88 | | `N8N_API_KEY` | Yes* | n8n API key for workflow management | `n8n_api_xxx...` |
 89 | | `MCP_AUTH_TOKEN` | Yes | Authentication token for MCP requests (min 32 chars) | `secure-random-32-char-token` |
 90 | | `AUTH_TOKEN` | Yes | **MUST match MCP_AUTH_TOKEN exactly** | `secure-random-32-char-token` |
 91 | | `PORT` | No | Port for the HTTP server | `3000` (default) |
 92 | | `LOG_LEVEL` | No | Logging verbosity | `info`, `debug`, `error` |
 93 | 
 94 | *Required only for workflow management features. Documentation tools work without these.
 95 | 
 96 | ## Docker Build Changes (v2.9.2+)
 97 | 
 98 | Starting with version 2.9.2, we use a single optimized Dockerfile for all deployments:
 99 | - The previous `Dockerfile.n8n` has been removed as redundant
100 | - N8N_MODE functionality is enabled via the `N8N_MODE=true` environment variable
101 | - This reduces image size by 500MB+ and improves build times from 8+ minutes to 1-2 minutes
102 | - All examples now use the standard `Dockerfile`
103 | 
104 | ## Production Deployment
105 | 
106 | > **⚠️ Critical**: Docker caches images locally. Always run `docker pull ghcr.io/czlonkowski/n8n-mcp:latest` before deploying to ensure you have the latest version. This simple step prevents most deployment issues.
107 | 
108 | ### Same Server as n8n
109 | 
110 | If you're running n8n-MCP on the same server as your n8n instance:
111 | 
112 | ### Using Pre-built Image (Recommended)
113 | 
114 | The pre-built images are automatically updated with each release and are the easiest way to get started.
115 | 
116 | **IMPORTANT**: Always pull the latest image to avoid using cached versions:
117 | 
118 | ```bash
119 | # ALWAYS pull the latest image first
120 | docker pull ghcr.io/czlonkowski/n8n-mcp:latest
121 | 
122 | # Generate a secure token (save this!)
123 | AUTH_TOKEN=$(openssl rand -hex 32)
124 | echo "Your AUTH_TOKEN: $AUTH_TOKEN"
125 | 
126 | # Create a Docker network if n8n uses one
127 | docker network create n8n-net
128 | 
129 | # Run n8n-MCP container
130 | docker run -d \
131 |   --name n8n-mcp \
132 |   --network n8n-net \
133 |   -p 3000:3000 \
134 |   -e N8N_MODE=true \
135 |   -e MCP_MODE=http \
136 |   -e N8N_API_URL=http://n8n:5678 \
137 |   -e N8N_API_KEY=your-n8n-api-key \
138 |   -e MCP_AUTH_TOKEN=$AUTH_TOKEN \
139 |   -e AUTH_TOKEN=$AUTH_TOKEN \
140 |   -e LOG_LEVEL=info \
141 |   --restart unless-stopped \
142 |   ghcr.io/czlonkowski/n8n-mcp:latest
143 | ```
144 | 
145 | ### Building from Source (Advanced Users)
146 | 
147 | Only build from source if you need custom modifications or are contributing to development:
148 | 
149 | ```bash
150 | # Clone and build
151 | git clone https://github.com/czlonkowski/n8n-mcp.git
152 | cd n8n-mcp
153 | 
154 | # Build Docker image
155 | docker build -t n8n-mcp:latest .
156 | 
157 | # Run using your local image
158 | docker run -d \
159 |   --name n8n-mcp \
160 |   -p 3000:3000 \
161 |   -e N8N_MODE=true \
162 |   -e MCP_MODE=http \
163 |   -e MCP_AUTH_TOKEN=$(openssl rand -hex 32) \
164 |   -e AUTH_TOKEN=$(openssl rand -hex 32) \
165 |   # ... other settings
166 |   n8n-mcp:latest
167 | ```
168 | 
169 | ### Using systemd (for native installation)
170 | 
171 | ```bash
172 | # Create service file
173 | sudo cat > /etc/systemd/system/n8n-mcp.service << EOF
174 | [Unit]
175 | Description=n8n-MCP Server
176 | After=network.target
177 | 
178 | [Service]
179 | Type=simple
180 | User=nodejs
181 | WorkingDirectory=/opt/n8n-mcp
182 | Environment="N8N_MODE=true"
183 | Environment="MCP_MODE=http"
184 | Environment="N8N_API_URL=http://localhost:5678"
185 | Environment="N8N_API_KEY=your-n8n-api-key"
186 | Environment="MCP_AUTH_TOKEN=your-secure-token-32-chars-min"
187 | Environment="AUTH_TOKEN=your-secure-token-32-chars-min"
188 | Environment="PORT=3000"
189 | ExecStart=/usr/bin/node /opt/n8n-mcp/dist/mcp/index.js
190 | Restart=on-failure
191 | 
192 | [Install]
193 | WantedBy=multi-user.target
194 | EOF
195 | 
196 | # Enable and start
197 | sudo systemctl enable n8n-mcp
198 | sudo systemctl start n8n-mcp
199 | ```
200 | 
201 | ### Different Server (Cloud Deployment)
202 | 
203 | Deploy n8n-MCP on a separate server from your n8n instance:
204 | 
205 | #### Quick Docker Deployment (Recommended)
206 | 
207 | **Always pull the latest image to ensure you have the current version:**
208 | 
209 | ```bash
210 | # On your cloud server (Hetzner, AWS, DigitalOcean, etc.)
211 | # ALWAYS pull the latest image first
212 | docker pull ghcr.io/czlonkowski/n8n-mcp:latest
213 | 
214 | # Generate auth tokens
215 | AUTH_TOKEN=$(openssl rand -hex 32)
216 | echo "Save this AUTH_TOKEN: $AUTH_TOKEN"
217 | 
218 | # Run the container
219 | docker run -d \
220 |   --name n8n-mcp \
221 |   -p 3000:3000 \
222 |   -e N8N_MODE=true \
223 |   -e MCP_MODE=http \
224 |   -e N8N_API_URL=https://your-n8n-instance.com \
225 |   -e N8N_API_KEY=your-n8n-api-key \
226 |   -e MCP_AUTH_TOKEN=$AUTH_TOKEN \
227 |   -e AUTH_TOKEN=$AUTH_TOKEN \
228 |   -e LOG_LEVEL=info \
229 |   --restart unless-stopped \
230 |   ghcr.io/czlonkowski/n8n-mcp:latest
231 | ```
232 | 
233 | #### Building from Source (Advanced)
234 | 
235 | Only needed if you're modifying the code:
236 | 
237 | ```bash
238 | # Clone and build
239 | git clone https://github.com/czlonkowski/n8n-mcp.git
240 | cd n8n-mcp
241 | docker build -t n8n-mcp:latest .
242 | 
243 | # Run using local image
244 | docker run -d \
245 |   --name n8n-mcp \
246 |   -p 3000:3000 \
247 |   # ... same environment variables as above
248 |   n8n-mcp:latest
249 | ```
250 | 
251 | #### Full Production Setup (Hetzner/AWS/DigitalOcean)
252 | 
253 | 1. **Server Requirements**:
254 |    - **Minimal**: 1 vCPU, 1GB RAM (CX11 on Hetzner)
255 |    - **Recommended**: 2 vCPU, 2GB RAM
256 |    - **OS**: Ubuntu 22.04 LTS
257 | 
258 | 2. **Initial Setup**:
259 | ```bash
260 | # SSH into your server
261 | ssh root@your-server-ip
262 | 
263 | # Update and install Docker
264 | apt update && apt upgrade -y
265 | curl -fsSL https://get.docker.com | sh
266 | ```
267 | 
268 | 3. **Deploy n8n-MCP with SSL** (using Caddy for automatic HTTPS):
269 | 
270 | **Using Docker Compose (Recommended)**
271 | ```bash
272 | # Create docker-compose.yml
273 | cat > docker-compose.yml << 'EOF'
274 | version: '3.8'
275 | 
276 | services:
277 |   n8n-mcp:
278 |     image: ghcr.io/czlonkowski/n8n-mcp:latest
279 |     pull_policy: always  # Always pull latest image
280 |     container_name: n8n-mcp
281 |     restart: unless-stopped
282 |     environment:
283 |       - N8N_MODE=true
284 |       - MCP_MODE=http
285 |       - N8N_API_URL=${N8N_API_URL}
286 |       - N8N_API_KEY=${N8N_API_KEY}
287 |       - MCP_AUTH_TOKEN=${MCP_AUTH_TOKEN}
288 |       - AUTH_TOKEN=${AUTH_TOKEN}
289 |       - PORT=3000
290 |       - LOG_LEVEL=info
291 |     networks:
292 |       - web
293 | 
294 |   caddy:
295 |     image: caddy:2-alpine
296 |     container_name: caddy
297 |     restart: unless-stopped
298 |     ports:
299 |       - "80:80"
300 |       - "443:443"
301 |     volumes:
302 |       - ./Caddyfile:/etc/caddy/Caddyfile
303 |       - caddy_data:/data
304 |       - caddy_config:/config
305 |     networks:
306 |       - web
307 | 
308 | networks:
309 |   web:
310 |     driver: bridge
311 | 
312 | volumes:
313 |   caddy_data:
314 |   caddy_config:
315 | EOF
316 | ```
317 | 
318 | **Note**: The `pull_policy: always` ensures you always get the latest version.
319 | 
320 | **Building from Source (if needed)**
321 | ```bash
322 | # Only if you need custom modifications
323 | git clone https://github.com/czlonkowski/n8n-mcp.git
324 | cd n8n-mcp
325 | docker build -t n8n-mcp:local .
326 | 
327 | # Then update docker-compose.yml to use:
328 | # image: n8n-mcp:local
329 |     container_name: n8n-mcp
330 |     restart: unless-stopped
331 |     environment:
332 |       - N8N_MODE=true
333 |       - MCP_MODE=http
334 |       - N8N_API_URL=${N8N_API_URL}
335 |       - N8N_API_KEY=${N8N_API_KEY}
336 |       - MCP_AUTH_TOKEN=${MCP_AUTH_TOKEN}
337 |       - AUTH_TOKEN=${AUTH_TOKEN}
338 |       - PORT=3000
339 |       - LOG_LEVEL=info
340 |     networks:
341 |       - web
342 | 
343 |   caddy:
344 |     image: caddy:2-alpine
345 |     container_name: caddy
346 |     restart: unless-stopped
347 |     ports:
348 |       - "80:80"
349 |       - "443:443"
350 |     volumes:
351 |       - ./Caddyfile:/etc/caddy/Caddyfile
352 |       - caddy_data:/data
353 |       - caddy_config:/config
354 |     networks:
355 |       - web
356 | 
357 | networks:
358 |   web:
359 |     driver: bridge
360 | 
361 | volumes:
362 |   caddy_data:
363 |   caddy_config:
364 | EOF
365 | ```
366 | 
367 | **Complete the Setup**
368 | ```bash
369 | # Create Caddyfile
370 | cat > Caddyfile << 'EOF'
371 | mcp.yourdomain.com {
372 |     reverse_proxy n8n-mcp:3000
373 | }
374 | EOF
375 | 
376 | # Create .env file
377 | AUTH_TOKEN=$(openssl rand -hex 32)
378 | cat > .env << EOF
379 | N8N_API_URL=https://your-n8n-instance.com
380 | N8N_API_KEY=your-n8n-api-key-here
381 | MCP_AUTH_TOKEN=$AUTH_TOKEN
382 | AUTH_TOKEN=$AUTH_TOKEN
383 | EOF
384 | 
385 | # Save the AUTH_TOKEN!
386 | echo "Your AUTH_TOKEN is: $AUTH_TOKEN"
387 | echo "Save this token - you'll need it in n8n MCP Client Tool configuration"
388 | 
389 | # Start services
390 | docker compose up -d
391 | ```
392 | 
393 | #### Cloud Provider Tips
394 | 
395 | **AWS EC2**:
396 | - Security Group: Open port 3000 (or 443 with HTTPS)
397 | - Instance Type: t3.micro is sufficient
398 | - Use Elastic IP for stable addressing
399 | 
400 | **DigitalOcean**:
401 | - Droplet: Basic ($6/month) is enough
402 | - Enable backups for production use
403 | 
404 | **Google Cloud**:
405 | - Machine Type: e2-micro (free tier eligible)
406 | - Use Cloud Load Balancer for SSL
407 | 
408 | ## Connecting n8n to n8n-MCP
409 | 
410 | ### Configure n8n MCP Client Tool
411 | 
412 | 1. **In your n8n workflow**, add the **MCP Client Tool** node
413 | 
414 | 2. **Configure the connection**:
415 |    ```
416 |    Server URL (MUST include /mcp endpoint): 
417 |    - Same server: http://localhost:3000/mcp
418 |    - Docker network: http://n8n-mcp:3000/mcp
419 |    - Different server: https://mcp.yourdomain.com/mcp
420 |    
421 |    Auth Token: [Your MCP_AUTH_TOKEN/AUTH_TOKEN value]
422 |    
423 |    Transport: HTTP Streamable (SSE)
424 |    ```
425 |    
426 |    ⚠️ **Critical**: The Server URL must include the `/mcp` endpoint path. Without this, the connection will fail.
427 | 
428 | 3. **Test the connection** by selecting a simple tool like `list_nodes`
429 | 
430 | ### Available Tools
431 | 
432 | Once connected, you can use these MCP tools in n8n:
433 | 
434 | **Documentation Tools** (No API key required):
435 | - `list_nodes` - List all n8n nodes with filtering
436 | - `search_nodes` - Search nodes by keyword
437 | - `get_node_info` - Get detailed node information
438 | - `get_node_essentials` - Get only essential properties
439 | - `validate_workflow` - Validate workflow configurations
440 | - `get_node_documentation` - Get human-readable docs
441 | 
442 | **Management Tools** (Requires n8n API key):
443 | - `n8n_create_workflow` - Create new workflows
444 | - `n8n_update_workflow` - Update existing workflows
445 | - `n8n_get_workflow` - Retrieve workflow details
446 | - `n8n_list_workflows` - List all workflows
447 | - `n8n_trigger_webhook_workflow` - Trigger webhook workflows
448 | 
449 | ### Using with AI Agents
450 | 
451 | Connect n8n-MCP to AI Agent nodes for intelligent automation:
452 | 
453 | 1. **Add an AI Agent node** (e.g., OpenAI, Anthropic)
454 | 2. **Connect MCP Client Tool** to the Agent's tool input
455 | 3. **Configure prompts** for workflow creation:
456 | 
457 | ```
458 | You are an n8n workflow expert. Use the MCP tools to:
459 | 1. Search for appropriate nodes using search_nodes
460 | 2. Get configuration details with get_node_essentials
461 | 3. Validate configurations with validate_workflow
462 | 4. Create the workflow if all validations pass
463 | ```
464 | 
465 | ## Security & Best Practices
466 | 
467 | ### Authentication
468 | - **MCP_AUTH_TOKEN**: Always use a strong, random token (32+ characters)
469 | - **N8N_API_KEY**: Only required for workflow management features
470 | - Store tokens in environment variables or secure vaults
471 | 
472 | ### Network Security
473 | - **Use HTTPS** in production (Caddy/Nginx/Traefik)
474 | - **Firewall**: Only expose necessary ports (3000 or 443)
475 | - **IP Whitelisting**: Consider restricting access to known n8n instances
476 | 
477 | ### Docker Security
478 | - **Always pull latest images**: Docker caches images locally, so run `docker pull` before deployment
479 | - Run containers with `--read-only` flag if possible
480 | - Use specific image versions instead of `:latest` in production
481 | - Regular updates: `docker pull ghcr.io/czlonkowski/n8n-mcp:latest`
482 | 
483 | ## Troubleshooting
484 | 
485 | ### Docker Image Issues
486 | 
487 | **Using Outdated Cached Images**
488 | - **Symptom**: Missing features, old bugs reappearing, features not working as documented
489 | - **Cause**: Docker uses locally cached images instead of pulling the latest version
490 | - **Solution**: Always run `docker pull ghcr.io/czlonkowski/n8n-mcp:latest` before deployment
491 | - **Verification**: Check image age with `docker images | grep n8n-mcp`
492 | 
493 | ### Common Configuration Issues
494 | 
495 | **Missing `MCP_MODE=http` Environment Variable**
496 | - **Symptom**: n8n MCP Client Tool cannot connect, server doesn't respond on `/mcp` endpoint
497 | - **Solution**: Add `MCP_MODE=http` to your environment variables
498 | - **Why**: Without this, the server runs in stdio mode which is incompatible with n8n
499 | 
500 | **Server URL Missing `/mcp` Endpoint**
501 | - **Symptom**: "Connection refused" or "Invalid response" in n8n MCP Client Tool
502 | - **Solution**: Ensure your Server URL includes `/mcp` (e.g., `http://localhost:3000/mcp`)
503 | - **Why**: n8n connects to the `/mcp` endpoint specifically, not the root URL
504 | 
505 | **Mismatched Auth Tokens**
506 | - **Symptom**: "Authentication failed" or "Invalid auth token"
507 | - **Solution**: Ensure both `MCP_AUTH_TOKEN` and `AUTH_TOKEN` have the same value
508 | - **Why**: Both variables must match for proper authentication
509 | 
510 | ### Connection Issues
511 | 
512 | **"Connection refused" in n8n MCP Client Tool**
513 | 1. **Check n8n-MCP is running**:
514 |    ```bash
515 |    # Docker
516 |    docker ps | grep n8n-mcp
517 |    docker logs n8n-mcp --tail 20
518 |    
519 |    # Systemd
520 |    systemctl status n8n-mcp
521 |    journalctl -u n8n-mcp --tail 20
522 |    ```
523 | 
524 | 2. **Verify endpoints are accessible**:
525 |    ```bash
526 |    # Health check (should return status info)
527 |    curl http://your-server:3000/health
528 |    
529 |    # MCP endpoint (should return protocol version)
530 |    curl http://your-server:3000/mcp
531 |    ```
532 | 
533 | 3. **Check firewall and networking**:
534 |    ```bash
535 |    # Test port accessibility from n8n server
536 |    telnet your-mcp-server 3000
537 |    
538 |    # Check firewall rules (Ubuntu/Debian)
539 |    sudo ufw status
540 |    
541 |    # Check if port is bound correctly
542 |    netstat -tlnp | grep :3000
543 |    ```
544 | 
545 | **"Invalid auth token" or "Authentication failed"**
546 | 1. **Verify token format**:
547 |    ```bash
548 |    # Check token length (should be 64 chars for hex-32)
549 |    echo $MCP_AUTH_TOKEN | wc -c
550 |    
551 |    # Verify both tokens match
552 |    echo "MCP_AUTH_TOKEN: $MCP_AUTH_TOKEN"
553 |    echo "AUTH_TOKEN: $AUTH_TOKEN"
554 |    ```
555 | 
556 | 2. **Common token issues**:
557 |    - Token too short (minimum 32 characters)
558 |    - Extra whitespace or newlines in token
559 |    - Different values for `MCP_AUTH_TOKEN` and `AUTH_TOKEN`
560 |    - Special characters not properly escaped in environment files
561 | 
562 | **"Cannot connect to n8n API"**
563 | 1. **Verify n8n configuration**:
564 |    ```bash
565 |    # Test n8n API accessibility
566 |    curl -H "X-N8N-API-KEY: your-api-key" \
567 |         https://your-n8n-instance.com/api/v1/workflows
568 |    ```
569 | 
570 | 2. **Common n8n API issues**:
571 |    - `N8N_API_URL` missing protocol (http:// or https://)
572 |    - n8n API key expired or invalid
573 |    - n8n instance not accessible from n8n-MCP server
574 |    - n8n API disabled in settings
575 | 
576 | ### Version Compatibility Issues
577 | 
578 | **"Features Not Working as Expected"**
579 | - **Symptom**: Missing features, old bugs, or compatibility issues
580 | - **Solution**: Pull the latest image: `docker pull ghcr.io/czlonkowski/n8n-mcp:latest`
581 | - **Check**: Verify image date with `docker inspect ghcr.io/czlonkowski/n8n-mcp:latest | grep Created`
582 | 
583 | **"Protocol version mismatch"**
584 | - n8n-MCP automatically uses version 2024-11-05 for n8n compatibility
585 | - Update to latest n8n-MCP version if issues persist
586 | - Verify `/mcp` endpoint returns correct version
587 | 
588 | ### Environment Variable Issues
589 | 
590 | **Complete Environment Variable Checklist**:
591 | ```bash
592 | # Required for all deployments
593 | export N8N_MODE=true                                    # Enables n8n integration
594 | export MCP_MODE=http                                   # Enables HTTP mode for n8n
595 | export MCP_AUTH_TOKEN=your-secure-32-char-token       # Auth token
596 | export AUTH_TOKEN=your-secure-32-char-token           # Same value as MCP_AUTH_TOKEN
597 | 
598 | # Required for workflow management features
599 | export N8N_API_URL=https://your-n8n-instance.com      # Your n8n URL
600 | export N8N_API_KEY=your-n8n-api-key                   # Your n8n API key
601 | 
602 | # Optional
603 | export PORT=3000                                       # HTTP port (default: 3000)
604 | export LOG_LEVEL=info                                  # Logging level
605 | ```
606 | 
607 | ### Docker-Specific Issues
608 | 
609 | **Container Build Failures**
610 | ```bash
611 | # Clear Docker cache and rebuild
612 | docker system prune -f
613 | docker build --no-cache -t n8n-mcp:latest .
614 | ```
615 | 
616 | **Container Runtime Issues**
617 | ```bash
618 | # Check container logs for detailed errors
619 | docker logs n8n-mcp -f --timestamps
620 | 
621 | # Inspect container environment
622 | docker exec n8n-mcp env | grep -E "(N8N|MCP|AUTH)"
623 | 
624 | # Test container connectivity
625 | docker exec n8n-mcp curl -f http://localhost:3000/health
626 | ```
627 | 
628 | ### Network and SSL Issues
629 | 
630 | **HTTPS/SSL Problems**
631 | ```bash
632 | # Test SSL certificate
633 | openssl s_client -connect mcp.yourdomain.com:443
634 | 
635 | # Check Caddy logs
636 | docker logs caddy -f --tail 50
637 | ```
638 | 
639 | **Docker Network Issues**
640 | ```bash
641 | # Check if containers can communicate
642 | docker network ls
643 | docker network inspect bridge
644 | 
645 | # Test inter-container connectivity
646 | docker exec n8n curl http://n8n-mcp:3000/health
647 | ```
648 | 
649 | ### Debugging Steps
650 | 
651 | 1. **Enable comprehensive logging**:
652 | ```bash
653 | # For Docker
654 | docker run -d \
655 |   --name n8n-mcp \
656 |   -e DEBUG_MCP=true \
657 |   -e LOG_LEVEL=debug \
658 |   -e N8N_MODE=true \
659 |   -e MCP_MODE=http \
660 |   # ... other settings
661 | 
662 | # For systemd, add to service file:
663 | Environment="DEBUG_MCP=true"
664 | Environment="LOG_LEVEL=debug"
665 | ```
666 | 
667 | 2. **Test all endpoints systematically**:
668 | ```bash
669 | # 1. Health check (basic server functionality)
670 | curl -v http://localhost:3000/health
671 | 
672 | # 2. MCP protocol endpoint (what n8n connects to)
673 | curl -v http://localhost:3000/mcp
674 | 
675 | # 3. Test authentication (if working, returns tools list)
676 | curl -X POST http://localhost:3000/mcp \
677 |   -H "Authorization: Bearer YOUR_AUTH_TOKEN" \
678 |   -H "Content-Type: application/json" \
679 |   -d '{"jsonrpc":"2.0","method":"tools/list","id":1}'
680 | 
681 | # 4. Test a simple tool (documentation only, no n8n API needed)
682 | curl -X POST http://localhost:3000/mcp \
683 |   -H "Authorization: Bearer YOUR_AUTH_TOKEN" \
684 |   -H "Content-Type: application/json" \
685 |   -d '{"jsonrpc":"2.0","method":"tools/call","params":{"name":"get_database_statistics","arguments":{}},"id":2}'
686 | ```
687 | 
688 | 3. **Common log patterns to look for**:
689 | ```bash
690 | # Success patterns
691 | grep "Server started" /var/log/n8n-mcp.log
692 | grep "Protocol version" /var/log/n8n-mcp.log
693 | 
694 | # Error patterns
695 | grep -i "error\|failed\|invalid" /var/log/n8n-mcp.log
696 | grep -i "auth\|token" /var/log/n8n-mcp.log
697 | grep -i "connection\|network" /var/log/n8n-mcp.log
698 | ```
699 | 
700 | ### Getting Help
701 | 
702 | If you're still experiencing issues:
703 | 
704 | 1. **Gather diagnostic information**:
705 | ```bash
706 | # System info
707 | docker --version
708 | docker-compose --version
709 | uname -a
710 | 
711 | # n8n-MCP version
712 | docker exec n8n-mcp node dist/index.js --version
713 | 
714 | # Environment check
715 | docker exec n8n-mcp env | grep -E "(N8N|MCP|AUTH)" | sort
716 | 
717 | # Container status
718 | docker ps | grep n8n-mcp
719 | docker stats n8n-mcp --no-stream
720 | ```
721 | 
722 | 2. **Create a minimal test setup**:
723 | ```bash
724 | # Test with minimal configuration
725 | docker run -d \
726 |   --name n8n-mcp-test \
727 |   -p 3001:3000 \
728 |   -e N8N_MODE=true \
729 |   -e MCP_MODE=http \
730 |   -e MCP_AUTH_TOKEN=test-token-minimum-32-chars-long \
731 |   -e AUTH_TOKEN=test-token-minimum-32-chars-long \
732 |   -e LOG_LEVEL=debug \
733 |   n8n-mcp:latest
734 | 
735 | # Test basic functionality
736 | curl http://localhost:3001/health
737 | curl http://localhost:3001/mcp
738 | ```
739 | 
740 | 3. **Report issues**: Include the diagnostic information when opening an issue on [GitHub](https://github.com/czlonkowski/n8n-mcp/issues)
741 | 
742 | ## Performance Tips
743 | 
744 | - **Minimal deployment**: 1 vCPU, 1GB RAM is sufficient
745 | - **Database**: Pre-built SQLite database (~15MB) loads quickly
746 | - **Response time**: Average 12ms for queries
747 | - **Caching**: Built-in 15-minute cache for repeated queries
748 | 
749 | ## Next Steps
750 | 
751 | - Test your setup with the [MCP Client Tool in n8n](https://docs.n8n.io/integrations/builtin/app-nodes/n8n-nodes-langchain.mcpclienttool/)
752 | - Explore [available MCP tools](../README.md#-available-mcp-tools)
753 | - Build AI-powered workflows with [AI Agent nodes](https://docs.n8n.io/integrations/builtin/cluster-nodes/sub-nodes/n8n-nodes-langchain.lmagent/)
754 | - Join the [n8n Community](https://community.n8n.io) for ideas and support
755 | 
756 | ---
757 | 
758 | Need help? Open an issue on [GitHub](https://github.com/czlonkowski/n8n-mcp/issues) or check the [n8n forums](https://community.n8n.io)
```

--------------------------------------------------------------------------------
/tests/unit/mappers/docs-mapper.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
  2 | import { DocsMapper } from '@/mappers/docs-mapper';
  3 | import { promises as fs } from 'fs';
  4 | import path from 'path';
  5 | 
  6 | // Mock fs promises
  7 | vi.mock('fs', () => ({
  8 |   promises: {
  9 |     readFile: vi.fn()
 10 |   }
 11 | }));
 12 | 
 13 | // Mock process.cwd()
 14 | const originalCwd = process.cwd;
 15 | beforeEach(() => {
 16 |   process.cwd = vi.fn(() => '/mocked/path');
 17 | });
 18 | 
 19 | afterEach(() => {
 20 |   process.cwd = originalCwd;
 21 |   vi.clearAllMocks();
 22 | });
 23 | 
 24 | describe('DocsMapper', () => {
 25 |   let docsMapper: DocsMapper;
 26 |   let consoleLogSpy: any;
 27 | 
 28 |   beforeEach(() => {
 29 |     docsMapper = new DocsMapper();
 30 |     consoleLogSpy = vi.spyOn(console, 'log').mockImplementation(() => {});
 31 |   });
 32 | 
 33 |   afterEach(() => {
 34 |     consoleLogSpy.mockRestore();
 35 |   });
 36 | 
 37 |   describe('fetchDocumentation', () => {
 38 |     describe('successful documentation fetch', () => {
 39 |       it('should fetch documentation for httpRequest node', async () => {
 40 |         const mockContent = '# HTTP Request Node\n\nDocumentation content';
 41 |         vi.mocked(fs.readFile).mockResolvedValueOnce(mockContent);
 42 | 
 43 |         const result = await docsMapper.fetchDocumentation('httpRequest');
 44 | 
 45 |         expect(result).toBe(mockContent);
 46 |         expect(fs.readFile).toHaveBeenCalledWith(
 47 |           expect.stringContaining('httprequest.md'),
 48 |           'utf-8'
 49 |         );
 50 |         expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: httpRequest -> httprequest');
 51 |         expect(consoleLogSpy).toHaveBeenCalledWith(expect.stringContaining('✓ Found docs at:'));
 52 |       });
 53 | 
 54 |       it('should apply known fixes for node types', async () => {
 55 |         const mockContent = '# Webhook Node\n\nDocumentation';
 56 |         vi.mocked(fs.readFile).mockResolvedValueOnce(mockContent);
 57 | 
 58 |         const result = await docsMapper.fetchDocumentation('webhook');
 59 | 
 60 |         expect(result).toBe(mockContent);
 61 |         expect(fs.readFile).toHaveBeenCalledWith(
 62 |           expect.stringContaining('webhook.md'),
 63 |           'utf-8'
 64 |         );
 65 |       });
 66 | 
 67 |       it('should handle node types with package prefix', async () => {
 68 |         const mockContent = '# Code Node\n\nDocumentation';
 69 |         vi.mocked(fs.readFile).mockResolvedValueOnce(mockContent);
 70 | 
 71 |         const result = await docsMapper.fetchDocumentation('n8n-nodes-base.code');
 72 | 
 73 |         expect(result).toBe(mockContent);
 74 |         expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: n8n-nodes-base.code -> code');
 75 |       });
 76 | 
 77 |       it('should try multiple paths until finding documentation', async () => {
 78 |         const mockContent = '# Slack Node\n\nDocumentation';
 79 |         // First few attempts fail
 80 |         vi.mocked(fs.readFile)
 81 |           .mockRejectedValueOnce(new Error('Not found'))
 82 |           .mockRejectedValueOnce(new Error('Not found'))
 83 |           .mockResolvedValueOnce(mockContent);
 84 | 
 85 |         const result = await docsMapper.fetchDocumentation('slack');
 86 | 
 87 |         expect(result).toBe(mockContent);
 88 |         expect(fs.readFile).toHaveBeenCalledTimes(3);
 89 |       });
 90 | 
 91 |       it('should check directory paths with index.md', async () => {
 92 |         const mockContent = '# Complex Node\n\nDocumentation';
 93 |         // Simulate finding in a directory structure - reject enough times to reach index.md paths
 94 |         vi.mocked(fs.readFile)
 95 |           .mockRejectedValueOnce(new Error('Not found')) // core-nodes direct
 96 |           .mockRejectedValueOnce(new Error('Not found')) // app-nodes direct
 97 |           .mockRejectedValueOnce(new Error('Not found')) // trigger-nodes direct
 98 |           .mockRejectedValueOnce(new Error('Not found')) // langchain root direct
 99 |           .mockRejectedValueOnce(new Error('Not found')) // langchain sub direct
100 |           .mockResolvedValueOnce(mockContent); // Found in directory/index.md
101 | 
102 |         const result = await docsMapper.fetchDocumentation('complexNode');
103 | 
104 |         expect(result).toBe(mockContent);
105 |         // Check that it eventually tried an index.md path
106 |         expect(fs.readFile).toHaveBeenCalledTimes(6);
107 |         const calls = vi.mocked(fs.readFile).mock.calls;
108 |         const indexCalls = calls.filter(call => (call[0] as string).includes('index.md'));
109 |         expect(indexCalls.length).toBeGreaterThan(0);
110 |       });
111 |     });
112 | 
113 |     describe('documentation not found', () => {
114 |       it('should return null when documentation is not found', async () => {
115 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('ENOENT: no such file'));
116 | 
117 |         const result = await docsMapper.fetchDocumentation('nonExistentNode');
118 | 
119 |         expect(result).toBeNull();
120 |         expect(consoleLogSpy).toHaveBeenCalledWith('  ✗ No docs found for nonexistentnode');
121 |       });
122 | 
123 |       it('should return null for empty node type', async () => {
124 |         const result = await docsMapper.fetchDocumentation('');
125 | 
126 |         expect(result).toBeNull();
127 |         expect(consoleLogSpy).toHaveBeenCalledWith('⚠️  Could not extract node name from: ');
128 |       });
129 | 
130 |       it('should handle invalid node type format', async () => {
131 |         const result = await docsMapper.fetchDocumentation('.');
132 | 
133 |         expect(result).toBeNull();
134 |         expect(consoleLogSpy).toHaveBeenCalledWith('⚠️  Could not extract node name from: .');
135 |       });
136 |     });
137 | 
138 |     describe('path construction', () => {
139 |       it('should construct correct paths for core nodes', async () => {
140 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
141 | 
142 |         await docsMapper.fetchDocumentation('testNode');
143 | 
144 |         // Check that it tried core-nodes path
145 |         expect(fs.readFile).toHaveBeenCalledWith(
146 |           path.join('/mocked/path', 'n8n-docs', 'docs/integrations/builtin/core-nodes/n8n-nodes-base.testnode.md'),
147 |           'utf-8'
148 |         );
149 |       });
150 | 
151 |       it('should construct correct paths for app nodes', async () => {
152 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
153 | 
154 |         await docsMapper.fetchDocumentation('appNode');
155 | 
156 |         // Check that it tried app-nodes path
157 |         expect(fs.readFile).toHaveBeenCalledWith(
158 |           path.join('/mocked/path', 'n8n-docs', 'docs/integrations/builtin/app-nodes/n8n-nodes-base.appnode.md'),
159 |           'utf-8'
160 |         );
161 |       });
162 | 
163 |       it('should construct correct paths for trigger nodes', async () => {
164 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
165 | 
166 |         await docsMapper.fetchDocumentation('triggerNode');
167 | 
168 |         // Check that it tried trigger-nodes path
169 |         expect(fs.readFile).toHaveBeenCalledWith(
170 |           path.join('/mocked/path', 'n8n-docs', 'docs/integrations/builtin/trigger-nodes/n8n-nodes-base.triggernode.md'),
171 |           'utf-8'
172 |         );
173 |       });
174 | 
175 |       it('should construct correct paths for langchain nodes', async () => {
176 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
177 | 
178 |         await docsMapper.fetchDocumentation('aiNode');
179 | 
180 |         // Check that it tried langchain paths
181 |         expect(fs.readFile).toHaveBeenCalledWith(
182 |           expect.stringContaining('cluster-nodes/root-nodes/n8n-nodes-langchain.ainode'),
183 |           'utf-8'
184 |         );
185 |         expect(fs.readFile).toHaveBeenCalledWith(
186 |           expect.stringContaining('cluster-nodes/sub-nodes/n8n-nodes-langchain.ainode'),
187 |           'utf-8'
188 |         );
189 |       });
190 |     });
191 | 
192 |     describe('error handling', () => {
193 |       it('should handle file system errors gracefully', async () => {
194 |         const customError = new Error('Permission denied');
195 |         vi.mocked(fs.readFile).mockRejectedValue(customError);
196 | 
197 |         const result = await docsMapper.fetchDocumentation('testNode');
198 | 
199 |         expect(result).toBeNull();
200 |         // Should have tried all possible paths
201 |         expect(fs.readFile).toHaveBeenCalledTimes(10); // 5 direct paths + 5 directory paths
202 |       });
203 | 
204 |       it('should handle non-Error exceptions', async () => {
205 |         vi.mocked(fs.readFile).mockRejectedValue('String error');
206 | 
207 |         const result = await docsMapper.fetchDocumentation('testNode');
208 | 
209 |         expect(result).toBeNull();
210 |       });
211 |     });
212 | 
213 |     describe('KNOWN_FIXES mapping', () => {
214 |       it('should apply fix for httpRequest', async () => {
215 |         vi.mocked(fs.readFile).mockResolvedValueOnce('content');
216 | 
217 |         await docsMapper.fetchDocumentation('httpRequest');
218 | 
219 |         expect(fs.readFile).toHaveBeenCalledWith(
220 |           expect.stringContaining('httprequest.md'),
221 |           'utf-8'
222 |         );
223 |       });
224 | 
225 |       it('should apply fix for respondToWebhook', async () => {
226 |         vi.mocked(fs.readFile).mockResolvedValueOnce('content');
227 | 
228 |         await docsMapper.fetchDocumentation('respondToWebhook');
229 | 
230 |         expect(fs.readFile).toHaveBeenCalledWith(
231 |           expect.stringContaining('respondtowebhook.md'),
232 |           'utf-8'
233 |         );
234 |       });
235 | 
236 |       it('should preserve casing for unknown nodes', async () => {
237 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
238 | 
239 |         await docsMapper.fetchDocumentation('CustomNode');
240 | 
241 |         expect(fs.readFile).toHaveBeenCalledWith(
242 |           expect.stringContaining('customnode.md'), // toLowerCase applied
243 |           'utf-8'
244 |         );
245 |       });
246 |     });
247 | 
248 |     describe('logging', () => {
249 |       it('should log search progress', async () => {
250 |         vi.mocked(fs.readFile).mockResolvedValueOnce('content');
251 | 
252 |         await docsMapper.fetchDocumentation('testNode');
253 | 
254 |         expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: testNode -> testnode');
255 |         expect(consoleLogSpy).toHaveBeenCalledWith(expect.stringContaining('✓ Found docs at:'));
256 |       });
257 | 
258 |       it('should log when documentation is not found', async () => {
259 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
260 | 
261 |         await docsMapper.fetchDocumentation('missingNode');
262 | 
263 |         expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: missingNode -> missingnode');
264 |         expect(consoleLogSpy).toHaveBeenCalledWith('  ✗ No docs found for missingnode');
265 |       });
266 |     });
267 | 
268 |     describe('edge cases', () => {
269 |       it('should handle very long node names', async () => {
270 |         const longNodeName = 'a'.repeat(100);
271 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
272 | 
273 |         const result = await docsMapper.fetchDocumentation(longNodeName);
274 | 
275 |         expect(result).toBeNull();
276 |         expect(fs.readFile).toHaveBeenCalled();
277 |       });
278 | 
279 |       it('should handle node names with special characters', async () => {
280 |         vi.mocked(fs.readFile).mockRejectedValue(new Error('Not found'));
281 | 
282 |         const result = await docsMapper.fetchDocumentation('node-with-dashes_and_underscores');
283 | 
284 |         expect(result).toBeNull();
285 |         expect(fs.readFile).toHaveBeenCalledWith(
286 |           expect.stringContaining('node-with-dashes_and_underscores.md'),
287 |           'utf-8'
288 |         );
289 |       });
290 | 
291 |       it('should handle multiple dots in node type', async () => {
292 |         vi.mocked(fs.readFile).mockResolvedValueOnce('content');
293 | 
294 |         const result = await docsMapper.fetchDocumentation('com.example.nodes.custom');
295 | 
296 |         expect(result).toBe('content');
297 |         expect(consoleLogSpy).toHaveBeenCalledWith('📄 Looking for docs for: com.example.nodes.custom -> custom');
298 |       });
299 |     });
300 |   });
301 | 
302 |   describe('enhanceLoopNodeDocumentation - SplitInBatches', () => {
303 |     it('should enhance SplitInBatches documentation with output guidance', async () => {
304 |       const originalContent = `# Split In Batches Node
305 | 
306 | This node splits data into batches.
307 | 
308 | ## When to use
309 | 
310 | Use this node when you need to process large datasets in smaller chunks.
311 | 
312 | ## Parameters
313 | 
314 | - batchSize: Number of items per batch
315 | `;
316 | 
317 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
318 | 
319 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
320 | 
321 |       expect(result).not.toBeNull();
322 |       expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
323 |       expect(result!).toContain('⚠️ OUTPUT INDICES ARE COUNTERINTUITIVE ⚠️');
324 |       expect(result!).toContain('Output 0 (index 0) = "done"');
325 |       expect(result!).toContain('Output 1 (index 1) = "loop"');
326 |       expect(result!).toContain('Correct Connection Pattern:');
327 |       expect(result!).toContain('Common Mistake:');
328 |       expect(result!).toContain('AI assistants often connect these backwards');
329 |       
330 |       // Should insert before "When to use" section
331 |       const insertionIndex = result!.indexOf('## When to use');
332 |       const guidanceIndex = result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION');
333 |       expect(guidanceIndex).toBeLessThan(insertionIndex);
334 |       expect(guidanceIndex).toBeGreaterThan(0);
335 |     });
336 | 
337 |     it('should enhance SplitInBatches documentation when no "When to use" section exists', async () => {
338 |       const originalContent = `# Split In Batches Node
339 | 
340 | This node splits data into batches.
341 | 
342 | ## Parameters
343 | 
344 | - batchSize: Number of items per batch
345 | `;
346 | 
347 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
348 | 
349 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
350 | 
351 |       expect(result).not.toBeNull();
352 |       expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
353 |       // Should be inserted at the beginning since no "When to use" section
354 |       expect(result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION')).toBeLessThan(
355 |         result!.indexOf('# Split In Batches Node')
356 |       );
357 |     });
358 | 
359 |     it('should handle splitInBatches in various node type formats', async () => {
360 |       const testCases = [
361 |         'splitInBatches',
362 |         'n8n-nodes-base.splitInBatches',
363 |         'nodes-base.splitInBatches'
364 |       ];
365 | 
366 |       for (const nodeType of testCases) {
367 |         const originalContent = '# Split In Batches\nOriginal content';
368 |         vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
369 | 
370 |         const result = await docsMapper.fetchDocumentation(nodeType);
371 | 
372 |         expect(result).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
373 |         expect(result).toContain('Output 0 (index 0) = "done"');
374 |       }
375 |     });
376 | 
377 |     it('should provide specific guidance for correct connection patterns', async () => {
378 |       const originalContent = '# Split In Batches\n## When to use\nContent';
379 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
380 | 
381 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
382 | 
383 |       expect(result).toContain('Connect nodes that PROCESS items inside the loop to **Output 1 ("loop")**');
384 |       expect(result).toContain('Connect nodes that run AFTER the loop completes to **Output 0 ("done")**');
385 |       expect(result).toContain('The last processing node in the loop must connect back to the SplitInBatches node');
386 |     });
387 | 
388 |     it('should explain the common AI assistant mistake', async () => {
389 |       const originalContent = '# Split In Batches\n## When to use\nContent';
390 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
391 | 
392 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
393 | 
394 |       expect(result).toContain('AI assistants often connect these backwards');
395 |       expect(result).toContain('logical flow (loop first, then done) doesn\'t match the technical indices (done=0, loop=1)');
396 |     });
397 | 
398 |     it('should not enhance non-splitInBatches nodes with loop guidance', async () => {
399 |       const originalContent = '# HTTP Request Node\nContent';
400 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
401 | 
402 |       const result = await docsMapper.fetchDocumentation('httpRequest');
403 | 
404 |       expect(result).not.toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
405 |       expect(result).not.toContain('counterintuitive');
406 |       expect(result).toBe(originalContent); // Should be unchanged
407 |     });
408 |   });
409 | 
410 |   describe('enhanceLoopNodeDocumentation - IF node', () => {
411 |     it('should enhance IF node documentation with output guidance', async () => {
412 |       const originalContent = `# IF Node
413 | 
414 | Route items based on conditions.
415 | 
416 | ## Node parameters
417 | 
418 | Configure your conditions here.
419 | `;
420 | 
421 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
422 | 
423 |       const result = await docsMapper.fetchDocumentation('n8n-nodes-base.if');
424 | 
425 |       expect(result).not.toBeNull();
426 |       expect(result!).toContain('Output Connection Information');
427 |       expect(result!).toContain('Output 0 (index 0) = "true"');
428 |       expect(result!).toContain('Output 1 (index 1) = "false"');
429 |       expect(result!).toContain('Items that match the condition');
430 |       expect(result!).toContain('Items that do not match the condition');
431 | 
432 |       // Should insert before "Node parameters" section
433 |       const parametersIndex = result!.indexOf('## Node parameters');
434 |       const outputInfoIndex = result!.indexOf('Output Connection Information');
435 |       expect(outputInfoIndex).toBeLessThan(parametersIndex);
436 |       expect(outputInfoIndex).toBeGreaterThan(0);
437 |     });
438 | 
439 |     it('should handle IF node when no "Node parameters" section exists', async () => {
440 |       const originalContent = `# IF Node
441 | 
442 | Route items based on conditions.
443 | 
444 | ## Usage
445 | 
446 | Use this node to route data.
447 | `;
448 | 
449 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
450 | 
451 |       const result = await docsMapper.fetchDocumentation('n8n-nodes-base.if');
452 | 
453 |       // When no "Node parameters" section exists, no enhancement is applied
454 |       expect(result).toBe(originalContent);
455 |     });
456 | 
457 |     it('should handle various IF node type formats', async () => {
458 |       const testCases = [
459 |         'if',
460 |         'n8n-nodes-base.if',
461 |         'nodes-base.if'
462 |       ];
463 | 
464 |       for (const nodeType of testCases) {
465 |         const originalContent = '# IF Node\n## Node parameters\nContent';
466 |         vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
467 | 
468 |         const result = await docsMapper.fetchDocumentation(nodeType);
469 | 
470 |         if (nodeType.includes('.if')) {
471 |           expect(result).toContain('Output Connection Information');
472 |           expect(result).toContain('Output 0 (index 0) = "true"');
473 |           expect(result).toContain('Output 1 (index 1) = "false"');
474 |         } else {
475 |           // For 'if' without dot, no enhancement is applied
476 |           expect(result).toBe(originalContent);
477 |         }
478 |       }
479 |     });
480 |   });
481 | 
482 |   describe('enhanceLoopNodeDocumentation - edge cases', () => {
483 |     it('should handle content without clear insertion points', async () => {
484 |       const originalContent = 'Simple content without markdown sections';
485 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
486 | 
487 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
488 | 
489 |       expect(result).not.toBeNull();
490 |       expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
491 |       // Should be prepended when no insertion point found (but there's a newline before original content)
492 |       const guidanceIndex = result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION');
493 |       expect(guidanceIndex).toBeLessThan(result!.indexOf('Simple content'));
494 |       expect(guidanceIndex).toBeLessThanOrEqual(5); // Allow for some whitespace
495 |     });
496 | 
497 |     it('should handle empty content', async () => {
498 |       const originalContent = '';
499 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
500 | 
501 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
502 | 
503 |       expect(result).not.toBeNull();
504 |       expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
505 |       expect(result!.length).toBeGreaterThan(0);
506 |     });
507 | 
508 |     it('should handle content with multiple "When to use" sections', async () => {
509 |       const originalContent = `# Split In Batches
510 | 
511 | ## When to use (overview)
512 | 
513 | General usage.
514 | 
515 | ## When to use (detailed)
516 | 
517 | Detailed usage.
518 | `;
519 |       vi.mocked(fs.readFile).mockResolvedValueOnce(originalContent);
520 | 
521 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
522 | 
523 |       expect(result).not.toBeNull();
524 |       expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
525 |       // Should insert before first occurrence
526 |       const firstWhenToUse = result!.indexOf('## When to use (overview)');
527 |       const guidanceIndex = result!.indexOf('CRITICAL OUTPUT CONNECTION INFORMATION');
528 |       expect(guidanceIndex).toBeLessThan(firstWhenToUse);
529 |     });
530 | 
531 |     it('should not double-enhance already enhanced content', async () => {
532 |       const alreadyEnhancedContent = `# Split In Batches
533 | 
534 | ## CRITICAL OUTPUT CONNECTION INFORMATION
535 | 
536 | Already enhanced.
537 | 
538 | ## When to use
539 | 
540 | Content here.
541 | `;
542 |       vi.mocked(fs.readFile).mockResolvedValueOnce(alreadyEnhancedContent);
543 | 
544 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
545 | 
546 |       // Should still add enhancement (method doesn't check for existing enhancements)
547 |       expect(result).not.toBeNull();
548 |       const criticalSections = (result!.match(/CRITICAL OUTPUT CONNECTION INFORMATION/g) || []).length;
549 |       expect(criticalSections).toBe(2); // Original + new enhancement
550 |     });
551 | 
552 |     it('should handle very large content efficiently', async () => {
553 |       const largeContent = 'a'.repeat(100000) + '\n## When to use\n' + 'b'.repeat(100000);
554 |       vi.mocked(fs.readFile).mockResolvedValueOnce(largeContent);
555 | 
556 |       const result = await docsMapper.fetchDocumentation('splitInBatches');
557 | 
558 |       expect(result).not.toBeNull();
559 |       expect(result!).toContain('CRITICAL OUTPUT CONNECTION INFORMATION');
560 |       expect(result!.length).toBeGreaterThan(largeContent.length);
561 |     });
562 |   });
563 | 
564 |   describe('DocsMapper instance', () => {
565 |     it('should use consistent docsPath across instances', () => {
566 |       const mapper1 = new DocsMapper();
567 |       const mapper2 = new DocsMapper();
568 | 
569 |       // Both should construct the same base path
570 |       expect(mapper1['docsPath']).toBe(mapper2['docsPath']);
571 |       expect(mapper1['docsPath']).toBe(path.join('/mocked/path', 'n8n-docs'));
572 |     });
573 | 
574 |     it('should maintain KNOWN_FIXES as readonly', () => {
575 |       const mapper = new DocsMapper();
576 |       
577 |       // KNOWN_FIXES should be accessible but not modifiable
578 |       expect(mapper['KNOWN_FIXES']).toBeDefined();
579 |       expect(mapper['KNOWN_FIXES']['httpRequest']).toBe('httprequest');
580 |     });
581 |   });
582 | });
```

--------------------------------------------------------------------------------
/tests/unit/telemetry/telemetry-manager.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, vi, afterEach } from 'vitest';
  2 | import { TelemetryManager, telemetry } from '../../../src/telemetry/telemetry-manager';
  3 | import { TelemetryConfigManager } from '../../../src/telemetry/config-manager';
  4 | import { TelemetryEventTracker } from '../../../src/telemetry/event-tracker';
  5 | import { TelemetryBatchProcessor } from '../../../src/telemetry/batch-processor';
  6 | import { createClient } from '@supabase/supabase-js';
  7 | import { TELEMETRY_BACKEND } from '../../../src/telemetry/telemetry-types';
  8 | import { TelemetryError, TelemetryErrorType } from '../../../src/telemetry/telemetry-error';
  9 | 
 10 | // Mock all dependencies
 11 | vi.mock('../../../src/utils/logger', () => ({
 12 |   logger: {
 13 |     debug: vi.fn(),
 14 |     info: vi.fn(),
 15 |     warn: vi.fn(),
 16 |     error: vi.fn(),
 17 |   }
 18 | }));
 19 | 
 20 | vi.mock('@supabase/supabase-js', () => ({
 21 |   createClient: vi.fn()
 22 | }));
 23 | 
 24 | vi.mock('../../../src/telemetry/config-manager');
 25 | vi.mock('../../../src/telemetry/event-tracker');
 26 | vi.mock('../../../src/telemetry/batch-processor');
 27 | vi.mock('../../../src/telemetry/workflow-sanitizer');
 28 | 
 29 | describe('TelemetryManager', () => {
 30 |   let mockConfigManager: any;
 31 |   let mockSupabaseClient: any;
 32 |   let mockEventTracker: any;
 33 |   let mockBatchProcessor: any;
 34 |   let manager: TelemetryManager;
 35 | 
 36 |   beforeEach(() => {
 37 |     // Reset singleton using the new method
 38 |     TelemetryManager.resetInstance();
 39 | 
 40 |     // Mock TelemetryConfigManager
 41 |     mockConfigManager = {
 42 |       isEnabled: vi.fn().mockReturnValue(true),
 43 |       getUserId: vi.fn().mockReturnValue('test-user-123'),
 44 |       disable: vi.fn(),
 45 |       enable: vi.fn(),
 46 |       getStatus: vi.fn().mockReturnValue('enabled')
 47 |     };
 48 |     vi.mocked(TelemetryConfigManager.getInstance).mockReturnValue(mockConfigManager);
 49 | 
 50 |     // Mock Supabase client
 51 |     mockSupabaseClient = {
 52 |       from: vi.fn().mockReturnValue({
 53 |         insert: vi.fn().mockResolvedValue({ data: null, error: null })
 54 |       })
 55 |     };
 56 |     vi.mocked(createClient).mockReturnValue(mockSupabaseClient);
 57 | 
 58 |     // Mock EventTracker
 59 |     mockEventTracker = {
 60 |       trackToolUsage: vi.fn(),
 61 |       trackWorkflowCreation: vi.fn().mockResolvedValue(undefined),
 62 |       trackError: vi.fn(),
 63 |       trackEvent: vi.fn(),
 64 |       trackSessionStart: vi.fn(),
 65 |       trackSearchQuery: vi.fn(),
 66 |       trackValidationDetails: vi.fn(),
 67 |       trackToolSequence: vi.fn(),
 68 |       trackNodeConfiguration: vi.fn(),
 69 |       trackPerformanceMetric: vi.fn(),
 70 |       updateToolSequence: vi.fn(),
 71 |       getEventQueue: vi.fn().mockReturnValue([]),
 72 |       getWorkflowQueue: vi.fn().mockReturnValue([]),
 73 |       clearEventQueue: vi.fn(),
 74 |       clearWorkflowQueue: vi.fn(),
 75 |       getStats: vi.fn().mockReturnValue({
 76 |         rateLimiter: { currentEvents: 0, droppedEvents: 0 },
 77 |         validator: { successes: 0, errors: 0 },
 78 |         eventQueueSize: 0,
 79 |         workflowQueueSize: 0,
 80 |         performanceMetrics: {}
 81 |       })
 82 |     };
 83 |     vi.mocked(TelemetryEventTracker).mockImplementation(() => mockEventTracker);
 84 | 
 85 |     // Mock BatchProcessor
 86 |     mockBatchProcessor = {
 87 |       start: vi.fn(),
 88 |       stop: vi.fn(),
 89 |       flush: vi.fn().mockResolvedValue(undefined),
 90 |       getMetrics: vi.fn().mockReturnValue({
 91 |         eventsTracked: 0,
 92 |         eventsDropped: 0,
 93 |         eventsFailed: 0,
 94 |         batchesSent: 0,
 95 |         batchesFailed: 0,
 96 |         averageFlushTime: 0,
 97 |         rateLimitHits: 0,
 98 |         circuitBreakerState: { state: 'closed', failureCount: 0, canRetry: true },
 99 |         deadLetterQueueSize: 0
100 |       }),
101 |       resetMetrics: vi.fn()
102 |     };
103 |     vi.mocked(TelemetryBatchProcessor).mockImplementation(() => mockBatchProcessor);
104 | 
105 |     vi.clearAllMocks();
106 |   });
107 | 
108 |   afterEach(() => {
109 |     // Clean up global state
110 |     TelemetryManager.resetInstance();
111 |   });
112 | 
113 |   describe('singleton behavior', () => {
114 |     it('should create only one instance', () => {
115 |       const instance1 = TelemetryManager.getInstance();
116 |       const instance2 = TelemetryManager.getInstance();
117 | 
118 |       expect(instance1).toBe(instance2);
119 |     });
120 | 
121 |     it.skip('should use global singleton for telemetry export', async () => {
122 |       // Skip: Testing module import behavior with mocks is complex
123 |       // The core singleton behavior is tested in other tests
124 |       const instance = TelemetryManager.getInstance();
125 | 
126 |       // Import the telemetry export
127 |       const { telemetry: telemetry1 } = await import('../../../src/telemetry/telemetry-manager');
128 | 
129 |       // Both should reference the same global singleton
130 |       expect(telemetry1).toBe(instance);
131 |     });
132 |   });
133 | 
134 |   describe('initialization', () => {
135 |     beforeEach(() => {
136 |       manager = TelemetryManager.getInstance();
137 |     });
138 | 
139 |     it('should initialize successfully when enabled', () => {
140 |       // Trigger initialization by calling a tracking method
141 |       manager.trackEvent('test', {});
142 | 
143 |       expect(mockConfigManager.isEnabled).toHaveBeenCalled();
144 |       expect(createClient).toHaveBeenCalledWith(
145 |         TELEMETRY_BACKEND.URL,
146 |         TELEMETRY_BACKEND.ANON_KEY,
147 |         expect.objectContaining({
148 |           auth: {
149 |             persistSession: false,
150 |             autoRefreshToken: false
151 |           }
152 |         })
153 |       );
154 |       expect(mockBatchProcessor.start).toHaveBeenCalled();
155 |     });
156 | 
157 |     it('should use environment variables if provided', () => {
158 |       process.env.SUPABASE_URL = 'https://custom.supabase.co';
159 |       process.env.SUPABASE_ANON_KEY = 'custom-anon-key';
160 | 
161 |       // Reset instance to trigger re-initialization
162 |       TelemetryManager.resetInstance();
163 |       manager = TelemetryManager.getInstance();
164 | 
165 |       // Trigger initialization
166 |       manager.trackEvent('test', {});
167 | 
168 |       expect(createClient).toHaveBeenCalledWith(
169 |         'https://custom.supabase.co',
170 |         'custom-anon-key',
171 |         expect.any(Object)
172 |       );
173 | 
174 |       // Clean up
175 |       delete process.env.SUPABASE_URL;
176 |       delete process.env.SUPABASE_ANON_KEY;
177 |     });
178 | 
179 |     it('should not initialize when disabled', () => {
180 |       mockConfigManager.isEnabled.mockReturnValue(false);
181 | 
182 |       // Reset instance to trigger re-initialization
183 |       TelemetryManager.resetInstance();
184 |       manager = TelemetryManager.getInstance();
185 | 
186 |       expect(createClient).not.toHaveBeenCalled();
187 |       expect(mockBatchProcessor.start).not.toHaveBeenCalled();
188 |     });
189 | 
190 |     it('should handle initialization errors', () => {
191 |       vi.mocked(createClient).mockImplementation(() => {
192 |         throw new Error('Supabase initialization failed');
193 |       });
194 | 
195 |       // Reset instance to trigger re-initialization
196 |       TelemetryManager.resetInstance();
197 |       manager = TelemetryManager.getInstance();
198 | 
199 |       expect(mockBatchProcessor.start).not.toHaveBeenCalled();
200 |     });
201 |   });
202 | 
203 |   describe('event tracking methods', () => {
204 |     beforeEach(() => {
205 |       manager = TelemetryManager.getInstance();
206 |     });
207 | 
208 |     it('should track tool usage with sequence update', () => {
209 |       manager.trackToolUsage('httpRequest', true, 500);
210 | 
211 |       expect(mockEventTracker.trackToolUsage).toHaveBeenCalledWith('httpRequest', true, 500);
212 |       expect(mockEventTracker.updateToolSequence).toHaveBeenCalledWith('httpRequest');
213 |     });
214 | 
215 |     it('should track workflow creation and auto-flush', async () => {
216 |       const workflow = { nodes: [], connections: {} };
217 | 
218 |       await manager.trackWorkflowCreation(workflow, true);
219 | 
220 |       expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
221 |       expect(mockBatchProcessor.flush).toHaveBeenCalled();
222 |     });
223 | 
224 |     it('should handle workflow creation errors', async () => {
225 |       const workflow = { nodes: [], connections: {} };
226 |       const error = new Error('Workflow tracking failed');
227 |       mockEventTracker.trackWorkflowCreation.mockRejectedValue(error);
228 | 
229 |       await manager.trackWorkflowCreation(workflow, true);
230 | 
231 |       // Should not throw, but should handle error internally
232 |       expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
233 |     });
234 | 
235 |     it('should track errors', () => {
236 |       manager.trackError('ValidationError', 'Node configuration invalid', 'httpRequest', 'Required field "url" is missing');
237 | 
238 |       expect(mockEventTracker.trackError).toHaveBeenCalledWith(
239 |         'ValidationError',
240 |         'Node configuration invalid',
241 |         'httpRequest',
242 |         'Required field "url" is missing'
243 |       );
244 |     });
245 | 
246 |     it('should track generic events', () => {
247 |       const properties = { key: 'value', count: 42 };
248 |       manager.trackEvent('custom_event', properties);
249 | 
250 |       expect(mockEventTracker.trackEvent).toHaveBeenCalledWith('custom_event', properties);
251 |     });
252 | 
253 |     it('should track session start', () => {
254 |       manager.trackSessionStart();
255 | 
256 |       expect(mockEventTracker.trackSessionStart).toHaveBeenCalled();
257 |     });
258 | 
259 |     it('should track search queries', () => {
260 |       manager.trackSearchQuery('httpRequest nodes', 5, 'nodes');
261 | 
262 |       expect(mockEventTracker.trackSearchQuery).toHaveBeenCalledWith(
263 |         'httpRequest nodes',
264 |         5,
265 |         'nodes'
266 |       );
267 |     });
268 | 
269 |     it('should track validation details', () => {
270 |       const details = { field: 'url', value: 'invalid' };
271 |       manager.trackValidationDetails('nodes-base.httpRequest', 'required_field_missing', details);
272 | 
273 |       expect(mockEventTracker.trackValidationDetails).toHaveBeenCalledWith(
274 |         'nodes-base.httpRequest',
275 |         'required_field_missing',
276 |         details
277 |       );
278 |     });
279 | 
280 |     it('should track tool sequences', () => {
281 |       manager.trackToolSequence('httpRequest', 'webhook', 5000);
282 | 
283 |       expect(mockEventTracker.trackToolSequence).toHaveBeenCalledWith(
284 |         'httpRequest',
285 |         'webhook',
286 |         5000
287 |       );
288 |     });
289 | 
290 |     it('should track node configuration', () => {
291 |       manager.trackNodeConfiguration('nodes-base.httpRequest', 5, false);
292 | 
293 |       expect(mockEventTracker.trackNodeConfiguration).toHaveBeenCalledWith(
294 |         'nodes-base.httpRequest',
295 |         5,
296 |         false
297 |       );
298 |     });
299 | 
300 |     it('should track performance metrics', () => {
301 |       const metadata = { operation: 'database_query' };
302 |       manager.trackPerformanceMetric('search_nodes', 1500, metadata);
303 | 
304 |       expect(mockEventTracker.trackPerformanceMetric).toHaveBeenCalledWith(
305 |         'search_nodes',
306 |         1500,
307 |         metadata
308 |       );
309 |     });
310 |   });
311 | 
312 |   describe('flush()', () => {
313 |     beforeEach(() => {
314 |       manager = TelemetryManager.getInstance();
315 |     });
316 | 
317 |     it('should flush events and workflows', async () => {
318 |       const mockEvents = [{ user_id: 'user1', event: 'test', properties: {} }];
319 |       const mockWorkflows = [{ user_id: 'user1', workflow_hash: 'hash1' }];
320 | 
321 |       mockEventTracker.getEventQueue.mockReturnValue(mockEvents);
322 |       mockEventTracker.getWorkflowQueue.mockReturnValue(mockWorkflows);
323 | 
324 |       await manager.flush();
325 | 
326 |       expect(mockEventTracker.getEventQueue).toHaveBeenCalled();
327 |       expect(mockEventTracker.getWorkflowQueue).toHaveBeenCalled();
328 |       expect(mockEventTracker.clearEventQueue).toHaveBeenCalled();
329 |       expect(mockEventTracker.clearWorkflowQueue).toHaveBeenCalled();
330 |       expect(mockBatchProcessor.flush).toHaveBeenCalledWith(mockEvents, mockWorkflows);
331 |     });
332 | 
333 |     it('should not flush when disabled', async () => {
334 |       mockConfigManager.isEnabled.mockReturnValue(false);
335 | 
336 |       await manager.flush();
337 | 
338 |       expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
339 |     });
340 | 
341 |     it('should not flush without Supabase client', async () => {
342 |       // Simulate initialization failure
343 |       vi.mocked(createClient).mockImplementation(() => {
344 |         throw new Error('Init failed');
345 |       });
346 | 
347 |       // Reset instance to trigger re-initialization with failure
348 |       (TelemetryManager as any).instance = undefined;
349 |       manager = TelemetryManager.getInstance();
350 | 
351 |       await manager.flush();
352 | 
353 |       expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
354 |     });
355 | 
356 |     it('should handle flush errors gracefully', async () => {
357 |       const error = new Error('Flush failed');
358 |       mockBatchProcessor.flush.mockRejectedValue(error);
359 | 
360 |       await manager.flush();
361 | 
362 |       // Should not throw, error should be handled internally
363 |       expect(mockBatchProcessor.flush).toHaveBeenCalled();
364 |     });
365 | 
366 |     it('should handle TelemetryError specifically', async () => {
367 |       const telemetryError = new TelemetryError(
368 |         TelemetryErrorType.NETWORK_ERROR,
369 |         'Network failed',
370 |         { attempt: 1 },
371 |         true
372 |       );
373 |       mockBatchProcessor.flush.mockRejectedValue(telemetryError);
374 | 
375 |       await manager.flush();
376 | 
377 |       expect(mockBatchProcessor.flush).toHaveBeenCalled();
378 |     });
379 |   });
380 | 
381 |   describe('enable/disable functionality', () => {
382 |     beforeEach(() => {
383 |       manager = TelemetryManager.getInstance();
384 |     });
385 | 
386 |     it('should disable telemetry', () => {
387 |       manager.disable();
388 | 
389 |       expect(mockConfigManager.disable).toHaveBeenCalled();
390 |       expect(mockBatchProcessor.stop).toHaveBeenCalled();
391 |     });
392 | 
393 |     it('should enable telemetry', () => {
394 |       // Disable first to clear state
395 |       manager.disable();
396 |       vi.clearAllMocks();
397 | 
398 |       // Now enable
399 |       manager.enable();
400 | 
401 |       expect(mockConfigManager.enable).toHaveBeenCalled();
402 |       // Should initialize (createClient called once)
403 |       expect(createClient).toHaveBeenCalledTimes(1);
404 |     });
405 | 
406 |     it('should get status from config manager', () => {
407 |       const status = manager.getStatus();
408 | 
409 |       expect(mockConfigManager.getStatus).toHaveBeenCalled();
410 |       expect(status).toBe('enabled');
411 |     });
412 |   });
413 | 
414 |   describe('getMetrics()', () => {
415 |     beforeEach(() => {
416 |       manager = TelemetryManager.getInstance();
417 |       // Trigger initialization for enabled tests
418 |       manager.trackEvent('test', {});
419 |     });
420 | 
421 |     it('should return comprehensive metrics when enabled', () => {
422 |       const metrics = manager.getMetrics();
423 | 
424 |       expect(metrics).toEqual({
425 |         status: 'enabled',
426 |         initialized: true,
427 |         tracking: expect.any(Object),
428 |         processing: expect.any(Object),
429 |         errors: expect.any(Object),
430 |         performance: expect.any(Object),
431 |         overhead: expect.any(Object)
432 |       });
433 | 
434 |       expect(mockEventTracker.getStats).toHaveBeenCalled();
435 |       expect(mockBatchProcessor.getMetrics).toHaveBeenCalled();
436 |     });
437 | 
438 |     it('should return disabled status when disabled', () => {
439 |       mockConfigManager.isEnabled.mockReturnValue(false);
440 |       // Reset to get a fresh instance without initialization
441 |       TelemetryManager.resetInstance();
442 |       manager = TelemetryManager.getInstance();
443 | 
444 |       const metrics = manager.getMetrics();
445 | 
446 |       expect(metrics.status).toBe('disabled');
447 |       expect(metrics.initialized).toBe(false); // Not initialized when disabled
448 |     });
449 | 
450 |     it('should reflect initialization failure', () => {
451 |       // Simulate initialization failure
452 |       vi.mocked(createClient).mockImplementation(() => {
453 |         throw new Error('Init failed');
454 |       });
455 | 
456 |       // Reset instance to trigger re-initialization with failure
457 |       (TelemetryManager as any).instance = undefined;
458 |       manager = TelemetryManager.getInstance();
459 | 
460 |       const metrics = manager.getMetrics();
461 | 
462 |       expect(metrics.initialized).toBe(false);
463 |     });
464 |   });
465 | 
466 |   describe('error handling and aggregation', () => {
467 |     beforeEach(() => {
468 |       manager = TelemetryManager.getInstance();
469 |     });
470 | 
471 |     it('should aggregate initialization errors', () => {
472 |       vi.mocked(createClient).mockImplementation(() => {
473 |         throw new Error('Supabase connection failed');
474 |       });
475 | 
476 |       // Reset instance to trigger re-initialization with error
477 |       TelemetryManager.resetInstance();
478 |       manager = TelemetryManager.getInstance();
479 | 
480 |       // Trigger initialization which will fail
481 |       manager.trackEvent('test', {});
482 | 
483 |       const metrics = manager.getMetrics();
484 |       expect(metrics.errors.totalErrors).toBeGreaterThan(0);
485 |     });
486 | 
487 |     it('should aggregate workflow tracking errors', async () => {
488 |       const error = new TelemetryError(
489 |         TelemetryErrorType.VALIDATION_ERROR,
490 |         'Workflow validation failed'
491 |       );
492 |       mockEventTracker.trackWorkflowCreation.mockRejectedValue(error);
493 | 
494 |       const workflow = { nodes: [], connections: {} };
495 |       await manager.trackWorkflowCreation(workflow, true);
496 | 
497 |       const metrics = manager.getMetrics();
498 |       expect(metrics.errors.totalErrors).toBeGreaterThan(0);
499 |     });
500 | 
501 |     it('should aggregate flush errors', async () => {
502 |       const error = new Error('Network timeout');
503 |       mockBatchProcessor.flush.mockRejectedValue(error);
504 | 
505 |       await manager.flush();
506 | 
507 |       const metrics = manager.getMetrics();
508 |       expect(metrics.errors.totalErrors).toBeGreaterThan(0);
509 |     });
510 |   });
511 | 
512 |   describe('constructor privacy', () => {
513 |     it('should have private constructor', () => {
514 |       // Ensure there's already an instance
515 |       TelemetryManager.getInstance();
516 | 
517 |       // Now trying to instantiate directly should throw
518 |       expect(() => new (TelemetryManager as any)()).toThrow('Use TelemetryManager.getInstance() instead of new TelemetryManager()');
519 |     });
520 |   });
521 | 
522 |   describe('isEnabled() privacy', () => {
523 |     beforeEach(() => {
524 |       manager = TelemetryManager.getInstance();
525 |     });
526 | 
527 |     it('should correctly check enabled state', async () => {
528 |       mockConfigManager.isEnabled.mockReturnValue(true);
529 | 
530 |       await manager.flush();
531 | 
532 |       expect(mockBatchProcessor.flush).toHaveBeenCalled();
533 |     });
534 | 
535 |     it('should prevent operations when not initialized', async () => {
536 |       // Simulate initialization failure
537 |       vi.mocked(createClient).mockImplementation(() => {
538 |         throw new Error('Init failed');
539 |       });
540 | 
541 |       // Reset instance to trigger re-initialization with failure
542 |       (TelemetryManager as any).instance = undefined;
543 |       manager = TelemetryManager.getInstance();
544 | 
545 |       await manager.flush();
546 | 
547 |       expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
548 |     });
549 |   });
550 | 
551 |   describe('dependency injection and callbacks', () => {
552 |     it('should provide correct callbacks to EventTracker', () => {
553 |       const TelemetryEventTrackerMock = vi.mocked(TelemetryEventTracker);
554 | 
555 |       const manager = TelemetryManager.getInstance();
556 |       // Trigger initialization
557 |       manager.trackEvent('test', {});
558 | 
559 |       expect(TelemetryEventTrackerMock).toHaveBeenCalledWith(
560 |         expect.any(Function), // getUserId callback
561 |         expect.any(Function)  // isEnabled callback
562 |       );
563 | 
564 |       // Test the callbacks
565 |       const [getUserIdCallback, isEnabledCallback] = TelemetryEventTrackerMock.mock.calls[0];
566 | 
567 |       expect(getUserIdCallback()).toBe('test-user-123');
568 |       expect(isEnabledCallback()).toBe(true);
569 |     });
570 | 
571 |     it('should provide correct callbacks to BatchProcessor', () => {
572 |       const TelemetryBatchProcessorMock = vi.mocked(TelemetryBatchProcessor);
573 | 
574 |       const manager = TelemetryManager.getInstance();
575 |       // Trigger initialization
576 |       manager.trackEvent('test', {});
577 | 
578 |       expect(TelemetryBatchProcessorMock).toHaveBeenCalledTimes(2); // Once with null, once with Supabase client
579 | 
580 |       const lastCall = TelemetryBatchProcessorMock.mock.calls[TelemetryBatchProcessorMock.mock.calls.length - 1];
581 |       const [supabaseClient, isEnabledCallback] = lastCall;
582 | 
583 |       expect(supabaseClient).toBe(mockSupabaseClient);
584 |       expect(isEnabledCallback()).toBe(true);
585 |     });
586 |   });
587 | 
588 |   describe('Supabase client configuration', () => {
589 |     beforeEach(() => {
590 |       manager = TelemetryManager.getInstance();
591 |       // Trigger initialization
592 |       manager.trackEvent('test', {});
593 |     });
594 | 
595 |     it('should configure Supabase client with correct options', () => {
596 |       expect(createClient).toHaveBeenCalledWith(
597 |         TELEMETRY_BACKEND.URL,
598 |         TELEMETRY_BACKEND.ANON_KEY,
599 |         {
600 |           auth: {
601 |             persistSession: false,
602 |             autoRefreshToken: false
603 |           },
604 |           realtime: {
605 |             params: {
606 |               eventsPerSecond: 1
607 |             }
608 |           }
609 |         }
610 |       );
611 |     });
612 |   });
613 | 
614 |   describe('workflow creation auto-flush behavior', () => {
615 |     beforeEach(() => {
616 |       manager = TelemetryManager.getInstance();
617 |     });
618 | 
619 |     it('should auto-flush after successful workflow tracking', async () => {
620 |       const workflow = { nodes: [], connections: {} };
621 | 
622 |       await manager.trackWorkflowCreation(workflow, true);
623 | 
624 |       expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
625 |       expect(mockBatchProcessor.flush).toHaveBeenCalled();
626 |     });
627 | 
628 |     it('should not auto-flush if workflow tracking fails', async () => {
629 |       const workflow = { nodes: [], connections: {} };
630 |       mockEventTracker.trackWorkflowCreation.mockRejectedValue(new Error('Tracking failed'));
631 | 
632 |       await manager.trackWorkflowCreation(workflow, true);
633 | 
634 |       expect(mockEventTracker.trackWorkflowCreation).toHaveBeenCalledWith(workflow, true);
635 |       // Flush should NOT be called if tracking fails
636 |       expect(mockBatchProcessor.flush).not.toHaveBeenCalled();
637 |     });
638 |   });
639 | 
640 |   describe('global singleton behavior', () => {
641 |     it('should preserve singleton across require() calls', async () => {
642 |       // Get the first instance
643 |       const manager1 = TelemetryManager.getInstance();
644 | 
645 |       // Clear and re-get the instance - should be same due to global state
646 |       TelemetryManager.resetInstance();
647 |       const manager2 = TelemetryManager.getInstance();
648 | 
649 |       // They should be different instances after reset
650 |       expect(manager2).not.toBe(manager1);
651 | 
652 |       // But subsequent calls should return the same instance
653 |       const manager3 = TelemetryManager.getInstance();
654 |       expect(manager3).toBe(manager2);
655 |     });
656 | 
657 |     it.skip('should handle undefined global state gracefully', async () => {
658 |       // Skip: Testing module import behavior with mocks is complex
659 |       // The core singleton behavior is tested in other tests
660 |       // Ensure clean state
661 |       TelemetryManager.resetInstance();
662 | 
663 |       const manager1 = TelemetryManager.getInstance();
664 |       expect(manager1).toBeDefined();
665 | 
666 |       // Import telemetry - it should use the same global instance
667 |       const { telemetry } = await import('../../../src/telemetry/telemetry-manager');
668 |       expect(telemetry).toBeDefined();
669 |       expect(telemetry).toBe(manager1);
670 |     });
671 |   });
672 | });
```

--------------------------------------------------------------------------------
/tests/unit/telemetry/workflow-sanitizer.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect } from 'vitest';
  2 | import { WorkflowSanitizer } from '../../../src/telemetry/workflow-sanitizer';
  3 | 
  4 | describe('WorkflowSanitizer', () => {
  5 |   describe('sanitizeWorkflow', () => {
  6 |     it('should remove API keys from parameters', () => {
  7 |       const workflow = {
  8 |         nodes: [
  9 |           {
 10 |             id: '1',
 11 |             name: 'HTTP Request',
 12 |             type: 'n8n-nodes-base.httpRequest',
 13 |             position: [100, 100],
 14 |             parameters: {
 15 |               url: 'https://api.example.com',
 16 |               apiKey: 'sk-1234567890abcdef1234567890abcdef',
 17 |               headers: {
 18 |                 'Authorization': 'Bearer sk-1234567890abcdef1234567890abcdef'
 19 |               }
 20 |             }
 21 |           }
 22 |         ],
 23 |         connections: {}
 24 |       };
 25 | 
 26 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
 27 | 
 28 |       expect(sanitized.nodes[0].parameters.apiKey).toBe('[REDACTED]');
 29 |       expect(sanitized.nodes[0].parameters.headers.Authorization).toBe('[REDACTED]');
 30 |     });
 31 | 
 32 |     it('should sanitize webhook URLs but keep structure', () => {
 33 |       const workflow = {
 34 |         nodes: [
 35 |           {
 36 |             id: '1',
 37 |             name: 'Webhook',
 38 |             type: 'n8n-nodes-base.webhook',
 39 |             position: [100, 100],
 40 |             parameters: {
 41 |               path: 'my-webhook',
 42 |               webhookUrl: 'https://n8n.example.com/webhook/abc-def-ghi',
 43 |               method: 'POST'
 44 |             }
 45 |           }
 46 |         ],
 47 |         connections: {}
 48 |       };
 49 | 
 50 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
 51 | 
 52 |       expect(sanitized.nodes[0].parameters.webhookUrl).toBe('[REDACTED]');
 53 |       expect(sanitized.nodes[0].parameters.method).toBe('POST'); // Method should remain
 54 |       expect(sanitized.nodes[0].parameters.path).toBe('my-webhook'); // Path should remain
 55 |     });
 56 | 
 57 |     it('should remove credentials entirely', () => {
 58 |       const workflow = {
 59 |         nodes: [
 60 |           {
 61 |             id: '1',
 62 |             name: 'Slack',
 63 |             type: 'n8n-nodes-base.slack',
 64 |             position: [100, 100],
 65 |             parameters: {
 66 |               channel: 'general',
 67 |               text: 'Hello World'
 68 |             },
 69 |             credentials: {
 70 |               slackApi: {
 71 |                 id: 'cred-123',
 72 |                 name: 'My Slack'
 73 |               }
 74 |             }
 75 |           }
 76 |         ],
 77 |         connections: {}
 78 |       };
 79 | 
 80 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
 81 | 
 82 |       expect(sanitized.nodes[0].credentials).toBeUndefined();
 83 |       expect(sanitized.nodes[0].parameters.channel).toBe('general'); // Channel should remain
 84 |       expect(sanitized.nodes[0].parameters.text).toBe('Hello World'); // Text should remain
 85 |     });
 86 | 
 87 |     it('should sanitize URLs in parameters', () => {
 88 |       const workflow = {
 89 |         nodes: [
 90 |           {
 91 |             id: '1',
 92 |             name: 'HTTP Request',
 93 |             type: 'n8n-nodes-base.httpRequest',
 94 |             position: [100, 100],
 95 |             parameters: {
 96 |               url: 'https://api.example.com/endpoint',
 97 |               endpoint: 'https://another.example.com/api',
 98 |               baseUrl: 'https://base.example.com'
 99 |             }
100 |           }
101 |         ],
102 |         connections: {}
103 |       };
104 | 
105 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
106 | 
107 |       expect(sanitized.nodes[0].parameters.url).toBe('[REDACTED]');
108 |       expect(sanitized.nodes[0].parameters.endpoint).toBe('[REDACTED]');
109 |       expect(sanitized.nodes[0].parameters.baseUrl).toBe('[REDACTED]');
110 |     });
111 | 
112 |     it('should calculate workflow metrics correctly', () => {
113 |       const workflow = {
114 |         nodes: [
115 |           {
116 |             id: '1',
117 |             name: 'Webhook',
118 |             type: 'n8n-nodes-base.webhook',
119 |             position: [100, 100],
120 |             parameters: {}
121 |           },
122 |           {
123 |             id: '2',
124 |             name: 'HTTP Request',
125 |             type: 'n8n-nodes-base.httpRequest',
126 |             position: [200, 100],
127 |             parameters: {}
128 |           },
129 |           {
130 |             id: '3',
131 |             name: 'Slack',
132 |             type: 'n8n-nodes-base.slack',
133 |             position: [300, 100],
134 |             parameters: {}
135 |           }
136 |         ],
137 |         connections: {
138 |           '1': {
139 |             main: [[{ node: '2', type: 'main', index: 0 }]]
140 |           },
141 |           '2': {
142 |             main: [[{ node: '3', type: 'main', index: 0 }]]
143 |           }
144 |         }
145 |       };
146 | 
147 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
148 | 
149 |       expect(sanitized.nodeCount).toBe(3);
150 |       expect(sanitized.nodeTypes).toContain('n8n-nodes-base.webhook');
151 |       expect(sanitized.nodeTypes).toContain('n8n-nodes-base.httpRequest');
152 |       expect(sanitized.nodeTypes).toContain('n8n-nodes-base.slack');
153 |       expect(sanitized.hasTrigger).toBe(true);
154 |       expect(sanitized.hasWebhook).toBe(true);
155 |       expect(sanitized.complexity).toBe('simple');
156 |     });
157 | 
158 |     it('should calculate complexity based on node count', () => {
159 |       const createWorkflow = (nodeCount: number) => ({
160 |         nodes: Array.from({ length: nodeCount }, (_, i) => ({
161 |           id: String(i),
162 |           name: `Node ${i}`,
163 |           type: 'n8n-nodes-base.function',
164 |           position: [i * 100, 100],
165 |           parameters: {}
166 |         })),
167 |         connections: {}
168 |       });
169 | 
170 |       const simple = WorkflowSanitizer.sanitizeWorkflow(createWorkflow(5));
171 |       expect(simple.complexity).toBe('simple');
172 | 
173 |       const medium = WorkflowSanitizer.sanitizeWorkflow(createWorkflow(15));
174 |       expect(medium.complexity).toBe('medium');
175 | 
176 |       const complex = WorkflowSanitizer.sanitizeWorkflow(createWorkflow(25));
177 |       expect(complex.complexity).toBe('complex');
178 |     });
179 | 
180 |     it('should generate consistent workflow hash', () => {
181 |       const workflow = {
182 |         nodes: [
183 |           {
184 |             id: '1',
185 |             name: 'Webhook',
186 |             type: 'n8n-nodes-base.webhook',
187 |             position: [100, 100],
188 |             parameters: { path: 'test' }
189 |           }
190 |         ],
191 |         connections: {}
192 |       };
193 | 
194 |       const hash1 = WorkflowSanitizer.generateWorkflowHash(workflow);
195 |       const hash2 = WorkflowSanitizer.generateWorkflowHash(workflow);
196 | 
197 |       expect(hash1).toBe(hash2);
198 |       expect(hash1).toMatch(/^[a-f0-9]{16}$/);
199 |     });
200 | 
201 |     it('should sanitize nested objects in parameters', () => {
202 |       const workflow = {
203 |         nodes: [
204 |           {
205 |             id: '1',
206 |             name: 'Complex Node',
207 |             type: 'n8n-nodes-base.httpRequest',
208 |             position: [100, 100],
209 |             parameters: {
210 |               options: {
211 |                 headers: {
212 |                   'X-API-Key': 'secret-key-1234567890abcdef',
213 |                   'Content-Type': 'application/json'
214 |                 },
215 |                 body: {
216 |                   data: 'some data',
217 |                   token: 'another-secret-token-xyz123'
218 |                 }
219 |               }
220 |             }
221 |           }
222 |         ],
223 |         connections: {}
224 |       };
225 | 
226 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
227 | 
228 |       expect(sanitized.nodes[0].parameters.options.headers['X-API-Key']).toBe('[REDACTED]');
229 |       expect(sanitized.nodes[0].parameters.options.headers['Content-Type']).toBe('application/json');
230 |       expect(sanitized.nodes[0].parameters.options.body.data).toBe('some data');
231 |       expect(sanitized.nodes[0].parameters.options.body.token).toBe('[REDACTED]');
232 |     });
233 | 
234 |     it('should preserve connections structure', () => {
235 |       const workflow = {
236 |         nodes: [
237 |           {
238 |             id: '1',
239 |             name: 'Node 1',
240 |             type: 'n8n-nodes-base.start',
241 |             position: [100, 100],
242 |             parameters: {}
243 |           },
244 |           {
245 |             id: '2',
246 |             name: 'Node 2',
247 |             type: 'n8n-nodes-base.function',
248 |             position: [200, 100],
249 |             parameters: {}
250 |           }
251 |         ],
252 |         connections: {
253 |           '1': {
254 |             main: [[{ node: '2', type: 'main', index: 0 }]],
255 |             error: [[{ node: '2', type: 'error', index: 0 }]]
256 |           }
257 |         }
258 |       };
259 | 
260 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
261 | 
262 |       expect(sanitized.connections).toEqual({
263 |         '1': {
264 |           main: [[{ node: '2', type: 'main', index: 0 }]],
265 |           error: [[{ node: '2', type: 'error', index: 0 }]]
266 |         }
267 |       });
268 |     });
269 | 
270 |     it('should remove sensitive workflow metadata', () => {
271 |       const workflow = {
272 |         id: 'workflow-123',
273 |         name: 'My Workflow',
274 |         nodes: [],
275 |         connections: {},
276 |         settings: {
277 |           errorWorkflow: 'error-workflow-id',
278 |           timezone: 'America/New_York'
279 |         },
280 |         staticData: { some: 'data' },
281 |         pinData: { node1: 'pinned' },
282 |         credentials: { slack: 'cred-123' },
283 |         sharedWorkflows: ['user-456'],
284 |         ownedBy: 'user-123',
285 |         createdBy: 'user-123',
286 |         updatedBy: 'user-456'
287 |       };
288 | 
289 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
290 | 
291 |       // Verify that sensitive workflow-level properties are not in the sanitized output
292 |       // The sanitized workflow should only have specific fields as defined in SanitizedWorkflow interface
293 |       expect(sanitized.nodes).toEqual([]);
294 |       expect(sanitized.connections).toEqual({});
295 |       expect(sanitized.nodeCount).toBe(0);
296 |       expect(sanitized.nodeTypes).toEqual([]);
297 | 
298 |       // Verify these fields don't exist in the sanitized output
299 |       const sanitizedAsAny = sanitized as any;
300 |       expect(sanitizedAsAny.settings).toBeUndefined();
301 |       expect(sanitizedAsAny.staticData).toBeUndefined();
302 |       expect(sanitizedAsAny.pinData).toBeUndefined();
303 |       expect(sanitizedAsAny.credentials).toBeUndefined();
304 |       expect(sanitizedAsAny.sharedWorkflows).toBeUndefined();
305 |       expect(sanitizedAsAny.ownedBy).toBeUndefined();
306 |       expect(sanitizedAsAny.createdBy).toBeUndefined();
307 |       expect(sanitizedAsAny.updatedBy).toBeUndefined();
308 |     });
309 |   });
310 | 
311 |   describe('edge cases and error handling', () => {
312 |     it('should handle null or undefined workflow', () => {
313 |       // The actual implementation will throw because JSON.parse(JSON.stringify(null)) is valid but creates issues
314 |       expect(() => WorkflowSanitizer.sanitizeWorkflow(null as any)).toThrow();
315 |       expect(() => WorkflowSanitizer.sanitizeWorkflow(undefined as any)).toThrow();
316 |     });
317 | 
318 |     it('should handle workflow without nodes', () => {
319 |       const workflow = {
320 |         connections: {}
321 |       };
322 | 
323 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
324 | 
325 |       expect(sanitized.nodeCount).toBe(0);
326 |       expect(sanitized.nodeTypes).toEqual([]);
327 |       expect(sanitized.nodes).toEqual([]);
328 |       expect(sanitized.hasTrigger).toBe(false);
329 |       expect(sanitized.hasWebhook).toBe(false);
330 |     });
331 | 
332 |     it('should handle workflow without connections', () => {
333 |       const workflow = {
334 |         nodes: [
335 |           {
336 |             id: '1',
337 |             name: 'Test Node',
338 |             type: 'n8n-nodes-base.function',
339 |             position: [100, 100],
340 |             parameters: {}
341 |           }
342 |         ]
343 |       };
344 | 
345 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
346 | 
347 |       expect(sanitized.connections).toEqual({});
348 |       expect(sanitized.nodeCount).toBe(1);
349 |     });
350 | 
351 |     it('should handle malformed nodes array', () => {
352 |       const workflow = {
353 |         nodes: [
354 |           {
355 |             id: '2',
356 |             name: 'Valid Node',
357 |             type: 'n8n-nodes-base.function',
358 |             position: [100, 100],
359 |             parameters: {}
360 |           }
361 |         ],
362 |         connections: {}
363 |       };
364 | 
365 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
366 | 
367 |       // Should handle workflow gracefully
368 |       expect(sanitized.nodeCount).toBe(1);
369 |       expect(sanitized.nodes.length).toBe(1);
370 |     });
371 | 
372 |     it('should handle deeply nested objects in parameters', () => {
373 |       const workflow = {
374 |         nodes: [
375 |           {
376 |             id: '1',
377 |             name: 'Deep Node',
378 |             type: 'n8n-nodes-base.httpRequest',
379 |             position: [100, 100],
380 |             parameters: {
381 |               level1: {
382 |                 level2: {
383 |                   level3: {
384 |                     level4: {
385 |                       level5: {
386 |                         secret: 'deep-secret-key-1234567890abcdef',
387 |                         safe: 'safe-value'
388 |                       }
389 |                     }
390 |                   }
391 |                 }
392 |               }
393 |             }
394 |           }
395 |         ],
396 |         connections: {}
397 |       };
398 | 
399 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
400 | 
401 |       expect(sanitized.nodes[0].parameters.level1.level2.level3.level4.level5.secret).toBe('[REDACTED]');
402 |       expect(sanitized.nodes[0].parameters.level1.level2.level3.level4.level5.safe).toBe('safe-value');
403 |     });
404 | 
405 |     it('should handle circular references gracefully', () => {
406 |       const workflow: any = {
407 |         nodes: [
408 |           {
409 |             id: '1',
410 |             name: 'Circular Node',
411 |             type: 'n8n-nodes-base.function',
412 |             position: [100, 100],
413 |             parameters: {}
414 |           }
415 |         ],
416 |         connections: {}
417 |       };
418 | 
419 |       // Create circular reference
420 |       workflow.nodes[0].parameters.selfRef = workflow.nodes[0];
421 | 
422 |       // JSON.stringify throws on circular references, so this should throw
423 |       expect(() => WorkflowSanitizer.sanitizeWorkflow(workflow)).toThrow();
424 |     });
425 | 
426 |     it('should handle extremely large workflows', () => {
427 |       const largeWorkflow = {
428 |         nodes: Array.from({ length: 1000 }, (_, i) => ({
429 |           id: String(i),
430 |           name: `Node ${i}`,
431 |           type: 'n8n-nodes-base.function',
432 |           position: [i * 10, 100],
433 |           parameters: {
434 |             code: `// Node ${i} code here`.repeat(100) // Large parameter
435 |           }
436 |         })),
437 |         connections: {}
438 |       };
439 | 
440 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(largeWorkflow);
441 | 
442 |       expect(sanitized.nodeCount).toBe(1000);
443 |       expect(sanitized.complexity).toBe('complex');
444 |     });
445 | 
446 |     it('should handle various sensitive data patterns', () => {
447 |       const workflow = {
448 |         nodes: [
449 |           {
450 |             id: '1',
451 |             name: 'Sensitive Node',
452 |             type: 'n8n-nodes-base.httpRequest',
453 |             position: [100, 100],
454 |             parameters: {
455 |               // Different patterns of sensitive data
456 |               api_key: 'sk-1234567890abcdef1234567890abcdef',
457 |               accessToken: 'ghp_abcdefghijklmnopqrstuvwxyz123456',
458 |               secret_token: 'secret-123-abc-def',
459 |               authKey: 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9',
460 |               clientSecret: 'abc123def456ghi789',
461 |               webhookUrl: 'https://hooks.example.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXXXXXX',
462 |               databaseUrl: 'postgres://user:password@localhost:5432/db',
463 |               connectionString: 'Server=myServerAddress;Database=myDataBase;Uid=myUsername;Pwd=myPassword;',
464 |               // Safe values that should remain
465 |               timeout: 5000,
466 |               method: 'POST',
467 |               retries: 3,
468 |               name: 'My API Call'
469 |             }
470 |           }
471 |         ],
472 |         connections: {}
473 |       };
474 | 
475 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
476 | 
477 |       const params = sanitized.nodes[0].parameters;
478 |       expect(params.api_key).toBe('[REDACTED]');
479 |       expect(params.accessToken).toBe('[REDACTED]');
480 |       expect(params.secret_token).toBe('[REDACTED]');
481 |       expect(params.authKey).toBe('[REDACTED]');
482 |       expect(params.clientSecret).toBe('[REDACTED]');
483 |       expect(params.webhookUrl).toBe('[REDACTED]');
484 |       expect(params.databaseUrl).toBe('[REDACTED]');
485 |       expect(params.connectionString).toBe('[REDACTED]');
486 | 
487 |       // Safe values should remain
488 |       expect(params.timeout).toBe(5000);
489 |       expect(params.method).toBe('POST');
490 |       expect(params.retries).toBe(3);
491 |       expect(params.name).toBe('My API Call');
492 |     });
493 | 
494 |     it('should handle arrays in parameters', () => {
495 |       const workflow = {
496 |         nodes: [
497 |           {
498 |             id: '1',
499 |             name: 'Array Node',
500 |             type: 'n8n-nodes-base.httpRequest',
501 |             position: [100, 100],
502 |             parameters: {
503 |               headers: [
504 |                 { name: 'Authorization', value: 'Bearer secret-token-123456789' },
505 |                 { name: 'Content-Type', value: 'application/json' },
506 |                 { name: 'X-API-Key', value: 'api-key-abcdefghijklmnopqrstuvwxyz' }
507 |               ],
508 |               methods: ['GET', 'POST']
509 |             }
510 |           }
511 |         ],
512 |         connections: {}
513 |       };
514 | 
515 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
516 | 
517 |       const headers = sanitized.nodes[0].parameters.headers;
518 |       expect(headers[0].value).toBe('[REDACTED]'); // Authorization
519 |       expect(headers[1].value).toBe('application/json'); // Content-Type (safe)
520 |       expect(headers[2].value).toBe('[REDACTED]'); // X-API-Key
521 |       expect(sanitized.nodes[0].parameters.methods).toEqual(['GET', 'POST']); // Array should remain
522 |     });
523 | 
524 |     it('should handle mixed data types in parameters', () => {
525 |       const workflow = {
526 |         nodes: [
527 |           {
528 |             id: '1',
529 |             name: 'Mixed Node',
530 |             type: 'n8n-nodes-base.function',
531 |             position: [100, 100],
532 |             parameters: {
533 |               numberValue: 42,
534 |               booleanValue: true,
535 |               stringValue: 'safe string',
536 |               nullValue: null,
537 |               undefinedValue: undefined,
538 |               dateValue: new Date('2024-01-01'),
539 |               arrayValue: [1, 2, 3],
540 |               nestedObject: {
541 |                 secret: 'secret-key-12345678',
542 |                 safe: 'safe-value'
543 |               }
544 |             }
545 |           }
546 |         ],
547 |         connections: {}
548 |       };
549 | 
550 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
551 | 
552 |       const params = sanitized.nodes[0].parameters;
553 |       expect(params.numberValue).toBe(42);
554 |       expect(params.booleanValue).toBe(true);
555 |       expect(params.stringValue).toBe('safe string');
556 |       expect(params.nullValue).toBeNull();
557 |       expect(params.undefinedValue).toBeUndefined();
558 |       expect(params.arrayValue).toEqual([1, 2, 3]);
559 |       expect(params.nestedObject.secret).toBe('[REDACTED]');
560 |       expect(params.nestedObject.safe).toBe('safe-value');
561 |     });
562 | 
563 |     it('should handle missing node properties gracefully', () => {
564 |       const workflow = {
565 |         nodes: [
566 |           { id: '3', name: 'Complete', type: 'n8n-nodes-base.function' } // Missing position but has required fields
567 |         ],
568 |         connections: {}
569 |       };
570 | 
571 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
572 | 
573 |       expect(sanitized.nodes).toBeDefined();
574 |       expect(sanitized.nodeCount).toBe(1);
575 |     });
576 | 
577 |     it('should handle complex connection structures', () => {
578 |       const workflow = {
579 |         nodes: [
580 |           { id: '1', name: 'Start', type: 'n8n-nodes-base.start', position: [0, 0], parameters: {} },
581 |           { id: '2', name: 'Branch', type: 'n8n-nodes-base.if', position: [100, 0], parameters: {} },
582 |           { id: '3', name: 'Path A', type: 'n8n-nodes-base.function', position: [200, 0], parameters: {} },
583 |           { id: '4', name: 'Path B', type: 'n8n-nodes-base.function', position: [200, 100], parameters: {} },
584 |           { id: '5', name: 'Merge', type: 'n8n-nodes-base.merge', position: [300, 50], parameters: {} }
585 |         ],
586 |         connections: {
587 |           '1': {
588 |             main: [[{ node: '2', type: 'main', index: 0 }]]
589 |           },
590 |           '2': {
591 |             main: [
592 |               [{ node: '3', type: 'main', index: 0 }],
593 |               [{ node: '4', type: 'main', index: 0 }]
594 |             ]
595 |           },
596 |           '3': {
597 |             main: [[{ node: '5', type: 'main', index: 0 }]]
598 |           },
599 |           '4': {
600 |             main: [[{ node: '5', type: 'main', index: 1 }]]
601 |           }
602 |         }
603 |       };
604 | 
605 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
606 | 
607 |       expect(sanitized.connections).toEqual(workflow.connections);
608 |       expect(sanitized.nodeCount).toBe(5);
609 |       expect(sanitized.complexity).toBe('simple'); // 5 nodes = simple
610 |     });
611 | 
612 |     it('should generate different hashes for different workflows', () => {
613 |       const workflow1 = {
614 |         nodes: [{ id: '1', name: 'Node1', type: 'type1', position: [0, 0], parameters: {} }],
615 |         connections: {}
616 |       };
617 | 
618 |       const workflow2 = {
619 |         nodes: [{ id: '1', name: 'Node2', type: 'type2', position: [0, 0], parameters: {} }],
620 |         connections: {}
621 |       };
622 | 
623 |       const hash1 = WorkflowSanitizer.generateWorkflowHash(workflow1);
624 |       const hash2 = WorkflowSanitizer.generateWorkflowHash(workflow2);
625 | 
626 |       expect(hash1).not.toBe(hash2);
627 |       expect(hash1).toMatch(/^[a-f0-9]{16}$/);
628 |       expect(hash2).toMatch(/^[a-f0-9]{16}$/);
629 |     });
630 | 
631 |     it('should handle workflow with only trigger nodes', () => {
632 |       const workflow = {
633 |         nodes: [
634 |           { id: '1', name: 'Cron', type: 'n8n-nodes-base.cron', position: [0, 0], parameters: {} },
635 |           { id: '2', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [100, 0], parameters: {} }
636 |         ],
637 |         connections: {}
638 |       };
639 | 
640 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
641 | 
642 |       expect(sanitized.hasTrigger).toBe(true);
643 |       expect(sanitized.hasWebhook).toBe(true);
644 |       expect(sanitized.nodeTypes).toContain('n8n-nodes-base.cron');
645 |       expect(sanitized.nodeTypes).toContain('n8n-nodes-base.webhook');
646 |     });
647 | 
648 |     it('should handle workflow with special characters in node names and types', () => {
649 |       const workflow = {
650 |         nodes: [
651 |           {
652 |             id: '1',
653 |             name: 'Node with émojis 🚀 and specíal chars',
654 |             type: 'n8n-nodes-base.function',
655 |             position: [0, 0],
656 |             parameters: {
657 |               message: 'Test with émojis 🎉 and URLs https://example.com'
658 |             }
659 |           }
660 |         ],
661 |         connections: {}
662 |       };
663 | 
664 |       const sanitized = WorkflowSanitizer.sanitizeWorkflow(workflow);
665 | 
666 |       expect(sanitized.nodeCount).toBe(1);
667 |       expect(sanitized.nodes[0].name).toBe('Node with émojis 🚀 and specíal chars');
668 |     });
669 |   });
670 | });
```

--------------------------------------------------------------------------------
/tests/unit/services/workflow-validator-mocks.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, vi, type Mock } from 'vitest';
  2 | import { WorkflowValidator } from '@/services/workflow-validator';
  3 | import { NodeRepository } from '@/database/node-repository';
  4 | import { EnhancedConfigValidator } from '@/services/enhanced-config-validator';
  5 | 
  6 | vi.mock('@/utils/logger');
  7 | 
  8 | describe('WorkflowValidator - Mock-based Unit Tests', () => {
  9 |   let validator: WorkflowValidator;
 10 |   let mockNodeRepository: any;
 11 |   let mockGetNode: Mock;
 12 | 
 13 |   beforeEach(() => {
 14 |     vi.clearAllMocks();
 15 | 
 16 |     // Create detailed mock repository with spy functions
 17 |     mockGetNode = vi.fn();
 18 |     mockNodeRepository = {
 19 |       getNode: mockGetNode
 20 |     };
 21 | 
 22 |     validator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator);
 23 | 
 24 |     // Default mock responses
 25 |     mockGetNode.mockImplementation((type: string) => {
 26 |       if (type.includes('httpRequest')) {
 27 |         return {
 28 |           node_type: type,
 29 |           display_name: 'HTTP Request',
 30 |           isVersioned: true,
 31 |           version: 4
 32 |         };
 33 |       } else if (type.includes('set')) {
 34 |         return {
 35 |           node_type: type,
 36 |           display_name: 'Set',
 37 |           isVersioned: true,
 38 |           version: 3
 39 |         };
 40 |       } else if (type.includes('respondToWebhook')) {
 41 |         return {
 42 |           node_type: type,
 43 |           display_name: 'Respond to Webhook',
 44 |           isVersioned: true,
 45 |           version: 1
 46 |         };
 47 |       }
 48 |       return null;
 49 |     });
 50 |   });
 51 | 
 52 |   describe('Error Handler Detection Logic', () => {
 53 |     it('should correctly identify error handlers by node name patterns', async () => {
 54 |       const errorNodeNames = [
 55 |         'Error Handler',
 56 |         'Handle Error',
 57 |         'Catch Exception',
 58 |         'Failure Response',
 59 |         'Error Notification',
 60 |         'Fail Safe',
 61 |         'Exception Handler',
 62 |         'Error Callback'
 63 |       ];
 64 | 
 65 |       const successNodeNames = [
 66 |         'Process Data',
 67 |         'Transform',
 68 |         'Success Handler',
 69 |         'Continue Process',
 70 |         'Normal Flow'
 71 |       ];
 72 | 
 73 |       for (const errorName of errorNodeNames) {
 74 |         const workflow = {
 75 |           nodes: [
 76 |             {
 77 |               id: '1',
 78 |               name: 'Source',
 79 |               type: 'n8n-nodes-base.httpRequest',
 80 |               position: [0, 0],
 81 |               parameters: {}
 82 |             },
 83 |             {
 84 |               id: '2',
 85 |               name: 'Success Path',
 86 |               type: 'n8n-nodes-base.set',
 87 |               position: [200, 0],
 88 |               parameters: {}
 89 |             },
 90 |             {
 91 |               id: '3',
 92 |               name: errorName,
 93 |               type: 'n8n-nodes-base.set',
 94 |               position: [200, 100],
 95 |               parameters: {}
 96 |             }
 97 |           ],
 98 |           connections: {
 99 |             'Source': {
100 |               main: [
101 |                 [
102 |                   { node: 'Success Path', type: 'main', index: 0 },
103 |                   { node: errorName, type: 'main', index: 0 }  // Should be detected as error handler
104 |                 ]
105 |               ]
106 |             }
107 |           }
108 |         };
109 | 
110 |         const result = await validator.validateWorkflow(workflow as any);
111 | 
112 |         // Should detect this as an incorrect error configuration
113 |         const hasError = result.errors.some(e =>
114 |           e.message.includes('Incorrect error output configuration') &&
115 |           e.message.includes(errorName)
116 |         );
117 |         expect(hasError).toBe(true);
118 |       }
119 | 
120 |       // Test that success node names are NOT flagged
121 |       for (const successName of successNodeNames) {
122 |         const workflow = {
123 |           nodes: [
124 |             {
125 |               id: '1',
126 |               name: 'Source',
127 |               type: 'n8n-nodes-base.httpRequest',
128 |               position: [0, 0],
129 |               parameters: {}
130 |             },
131 |             {
132 |               id: '2',
133 |               name: 'First Process',
134 |               type: 'n8n-nodes-base.set',
135 |               position: [200, 0],
136 |               parameters: {}
137 |             },
138 |             {
139 |               id: '3',
140 |               name: successName,
141 |               type: 'n8n-nodes-base.set',
142 |               position: [200, 100],
143 |               parameters: {}
144 |             }
145 |           ],
146 |           connections: {
147 |             'Source': {
148 |               main: [
149 |                 [
150 |                   { node: 'First Process', type: 'main', index: 0 },
151 |                   { node: successName, type: 'main', index: 0 }
152 |                 ]
153 |               ]
154 |             }
155 |           }
156 |         };
157 | 
158 |         const result = await validator.validateWorkflow(workflow as any);
159 | 
160 |         // Should NOT detect this as an error configuration
161 |         const hasError = result.errors.some(e =>
162 |           e.message.includes('Incorrect error output configuration')
163 |         );
164 |         expect(hasError).toBe(false);
165 |       }
166 |     });
167 | 
168 |     it('should correctly identify error handlers by node type patterns', async () => {
169 |       const errorNodeTypes = [
170 |         'n8n-nodes-base.respondToWebhook',
171 |         'n8n-nodes-base.emailSend'
172 |         // Note: slack and webhook are not in the current detection logic
173 |       ];
174 | 
175 |       // Update mock to return appropriate node info for these types
176 |       mockGetNode.mockImplementation((type: string) => {
177 |         return {
178 |           node_type: type,
179 |           display_name: type.split('.').pop() || 'Unknown',
180 |           isVersioned: true,
181 |           version: 1
182 |         };
183 |       });
184 | 
185 |       for (const nodeType of errorNodeTypes) {
186 |         const workflow = {
187 |           nodes: [
188 |             {
189 |               id: '1',
190 |               name: 'Source',
191 |               type: 'n8n-nodes-base.httpRequest',
192 |               position: [0, 0],
193 |               parameters: {}
194 |             },
195 |             {
196 |               id: '2',
197 |               name: 'Success Path',
198 |               type: 'n8n-nodes-base.set',
199 |               position: [200, 0],
200 |               parameters: {}
201 |             },
202 |             {
203 |               id: '3',
204 |               name: 'Response Node',
205 |               type: nodeType,
206 |               position: [200, 100],
207 |               parameters: {}
208 |             }
209 |           ],
210 |           connections: {
211 |             'Source': {
212 |               main: [
213 |                 [
214 |                   { node: 'Success Path', type: 'main', index: 0 },
215 |                   { node: 'Response Node', type: 'main', index: 0 }  // Should be detected
216 |                 ]
217 |               ]
218 |             }
219 |           }
220 |         };
221 | 
222 |         const result = await validator.validateWorkflow(workflow as any);
223 | 
224 |         // Should detect this as an incorrect error configuration
225 |         const hasError = result.errors.some(e =>
226 |           e.message.includes('Incorrect error output configuration') &&
227 |           e.message.includes('Response Node')
228 |         );
229 |         expect(hasError).toBe(true);
230 |       }
231 |     });
232 | 
233 |     it('should handle cases where node repository returns null', async () => {
234 |       // Mock repository to return null for unknown nodes
235 |       mockGetNode.mockImplementation((type: string) => {
236 |         if (type === 'n8n-nodes-base.unknownNode') {
237 |           return null;
238 |         }
239 |         return {
240 |           node_type: type,
241 |           display_name: 'Known Node',
242 |           isVersioned: true,
243 |           version: 1
244 |         };
245 |       });
246 | 
247 |       const workflow = {
248 |         nodes: [
249 |           {
250 |             id: '1',
251 |             name: 'Source',
252 |             type: 'n8n-nodes-base.httpRequest',
253 |             position: [0, 0],
254 |             parameters: {}
255 |           },
256 |           {
257 |             id: '2',
258 |             name: 'Unknown Node',
259 |             type: 'n8n-nodes-base.unknownNode',
260 |             position: [200, 0],
261 |             parameters: {}
262 |           },
263 |           {
264 |             id: '3',
265 |             name: 'Error Handler',
266 |             type: 'n8n-nodes-base.set',
267 |             position: [200, 100],
268 |             parameters: {}
269 |           }
270 |         ],
271 |         connections: {
272 |           'Source': {
273 |             main: [
274 |               [
275 |                 { node: 'Unknown Node', type: 'main', index: 0 },
276 |                 { node: 'Error Handler', type: 'main', index: 0 }
277 |               ]
278 |             ]
279 |           }
280 |         }
281 |       };
282 | 
283 |       const result = await validator.validateWorkflow(workflow as any);
284 | 
285 |       // Should still detect the error configuration based on node name
286 |       const hasError = result.errors.some(e =>
287 |         e.message.includes('Incorrect error output configuration') &&
288 |         e.message.includes('Error Handler')
289 |       );
290 |       expect(hasError).toBe(true);
291 | 
292 |       // Should not crash due to null node info
293 |       expect(result).toHaveProperty('valid');
294 |       expect(Array.isArray(result.errors)).toBe(true);
295 |     });
296 |   });
297 | 
298 |   describe('onError Property Validation Logic', () => {
299 |     it('should validate onError property combinations correctly', async () => {
300 |       const testCases = [
301 |         {
302 |           name: 'onError set but no error connections',
303 |           onError: 'continueErrorOutput',
304 |           hasErrorConnections: false,
305 |           expectedErrorType: 'error',
306 |           expectedMessage: "has onError: 'continueErrorOutput' but no error output connections"
307 |         },
308 |         {
309 |           name: 'error connections but no onError',
310 |           onError: undefined,
311 |           hasErrorConnections: true,
312 |           expectedErrorType: 'warning',
313 |           expectedMessage: 'error output connections in main[1] but missing onError'
314 |         },
315 |         {
316 |           name: 'onError set with error connections',
317 |           onError: 'continueErrorOutput',
318 |           hasErrorConnections: true,
319 |           expectedErrorType: null,
320 |           expectedMessage: null
321 |         },
322 |         {
323 |           name: 'no onError and no error connections',
324 |           onError: undefined,
325 |           hasErrorConnections: false,
326 |           expectedErrorType: null,
327 |           expectedMessage: null
328 |         }
329 |       ];
330 | 
331 |       for (const testCase of testCases) {
332 |         const workflow = {
333 |           nodes: [
334 |             {
335 |               id: '1',
336 |               name: 'Test Node',
337 |               type: 'n8n-nodes-base.httpRequest',
338 |               position: [0, 0],
339 |               parameters: {},
340 |               ...(testCase.onError ? { onError: testCase.onError } : {})
341 |             },
342 |             {
343 |               id: '2',
344 |               name: 'Success Handler',
345 |               type: 'n8n-nodes-base.set',
346 |               position: [200, 0],
347 |               parameters: {}
348 |             },
349 |             {
350 |               id: '3',
351 |               name: 'Error Handler',
352 |               type: 'n8n-nodes-base.set',
353 |               position: [200, 100],
354 |               parameters: {}
355 |             }
356 |           ],
357 |           connections: {
358 |             'Test Node': {
359 |               main: [
360 |                 [
361 |                   { node: 'Success Handler', type: 'main', index: 0 }
362 |                 ],
363 |                 ...(testCase.hasErrorConnections ? [
364 |                   [
365 |                     { node: 'Error Handler', type: 'main', index: 0 }
366 |                   ]
367 |                 ] : [])
368 |               ]
369 |             }
370 |           }
371 |         };
372 | 
373 |         const result = await validator.validateWorkflow(workflow as any);
374 | 
375 |         if (testCase.expectedErrorType === 'error') {
376 |           const hasExpectedError = result.errors.some(e =>
377 |             e.nodeName === 'Test Node' &&
378 |             e.message.includes(testCase.expectedMessage!)
379 |           );
380 |           expect(hasExpectedError).toBe(true);
381 |         } else if (testCase.expectedErrorType === 'warning') {
382 |           const hasExpectedWarning = result.warnings.some(w =>
383 |             w.nodeName === 'Test Node' &&
384 |             w.message.includes(testCase.expectedMessage!)
385 |           );
386 |           expect(hasExpectedWarning).toBe(true);
387 |         } else {
388 |           // Should not have related errors or warnings about onError/error output mismatches
389 |           const hasRelatedError = result.errors.some(e =>
390 |             e.nodeName === 'Test Node' &&
391 |             (e.message.includes("has onError: 'continueErrorOutput' but no error output connections") ||
392 |              e.message.includes('Incorrect error output configuration'))
393 |           );
394 |           const hasRelatedWarning = result.warnings.some(w =>
395 |             w.nodeName === 'Test Node' &&
396 |             w.message.includes('error output connections in main[1] but missing onError')
397 |           );
398 |           expect(hasRelatedError).toBe(false);
399 |           expect(hasRelatedWarning).toBe(false);
400 |         }
401 |       }
402 |     });
403 | 
404 |     it('should handle different onError values correctly', async () => {
405 |       const onErrorValues = [
406 |         'continueErrorOutput',
407 |         'continueRegularOutput',
408 |         'stopWorkflow'
409 |       ];
410 | 
411 |       for (const onErrorValue of onErrorValues) {
412 |         const workflow = {
413 |           nodes: [
414 |             {
415 |               id: '1',
416 |               name: 'Test Node',
417 |               type: 'n8n-nodes-base.httpRequest',
418 |               position: [0, 0],
419 |               parameters: {},
420 |               onError: onErrorValue
421 |             },
422 |             {
423 |               id: '2',
424 |               name: 'Next Node',
425 |               type: 'n8n-nodes-base.set',
426 |               position: [200, 0],
427 |               parameters: {}
428 |             }
429 |           ],
430 |           connections: {
431 |             'Test Node': {
432 |               main: [
433 |                 [
434 |                   { node: 'Next Node', type: 'main', index: 0 }
435 |                 ]
436 |                 // No error connections
437 |               ]
438 |             }
439 |           }
440 |         };
441 | 
442 |         const result = await validator.validateWorkflow(workflow as any);
443 | 
444 |         if (onErrorValue === 'continueErrorOutput') {
445 |           // Should have error about missing error connections
446 |           const hasError = result.errors.some(e =>
447 |             e.nodeName === 'Test Node' &&
448 |             e.message.includes("has onError: 'continueErrorOutput' but no error output connections")
449 |           );
450 |           expect(hasError).toBe(true);
451 |         } else {
452 |           // Should not have error about missing error connections
453 |           const hasError = result.errors.some(e =>
454 |             e.nodeName === 'Test Node' &&
455 |             e.message.includes('but no error output connections')
456 |           );
457 |           expect(hasError).toBe(false);
458 |         }
459 |       }
460 |     });
461 |   });
462 | 
463 |   describe('JSON Format Generation', () => {
464 |     it('should generate valid JSON in error messages', async () => {
465 |       const workflow = {
466 |         nodes: [
467 |           {
468 |             id: '1',
469 |             name: 'API Call',
470 |             type: 'n8n-nodes-base.httpRequest',
471 |             position: [0, 0],
472 |             parameters: {}
473 |           },
474 |           {
475 |             id: '2',
476 |             name: 'Success Process',
477 |             type: 'n8n-nodes-base.set',
478 |             position: [200, 0],
479 |             parameters: {}
480 |           },
481 |           {
482 |             id: '3',
483 |             name: 'Error Handler',
484 |             type: 'n8n-nodes-base.respondToWebhook',
485 |             position: [200, 100],
486 |             parameters: {}
487 |           }
488 |         ],
489 |         connections: {
490 |           'API Call': {
491 |             main: [
492 |               [
493 |                 { node: 'Success Process', type: 'main', index: 0 },
494 |                 { node: 'Error Handler', type: 'main', index: 0 }
495 |               ]
496 |             ]
497 |           }
498 |         }
499 |       };
500 | 
501 |       const result = await validator.validateWorkflow(workflow as any);
502 | 
503 |       const errorConfigError = result.errors.find(e =>
504 |         e.message.includes('Incorrect error output configuration')
505 |       );
506 | 
507 |       expect(errorConfigError).toBeDefined();
508 | 
509 |       // Extract JSON sections from error message
510 |       const incorrectMatch = errorConfigError!.message.match(/INCORRECT \(current\):\n([\s\S]*?)\n\nCORRECT/);
511 |       const correctMatch = errorConfigError!.message.match(/CORRECT \(should be\):\n([\s\S]*?)\n\nAlso add/);
512 | 
513 |       expect(incorrectMatch).toBeDefined();
514 |       expect(correctMatch).toBeDefined();
515 | 
516 |       // Extract just the JSON part (remove comments)
517 |       const incorrectJsonStr = incorrectMatch![1];
518 |       const correctJsonStr = correctMatch![1];
519 | 
520 |       // Remove comments and clean up for JSON parsing
521 |       const cleanIncorrectJson = incorrectJsonStr.replace(/\/\/.*$/gm, '').replace(/,\s*$/, '');
522 |       const cleanCorrectJson = correctJsonStr.replace(/\/\/.*$/gm, '').replace(/,\s*$/, '');
523 | 
524 |       const incorrectJson = `{${cleanIncorrectJson}}`;
525 |       const correctJson = `{${cleanCorrectJson}}`;
526 | 
527 |       expect(() => JSON.parse(incorrectJson)).not.toThrow();
528 |       expect(() => JSON.parse(correctJson)).not.toThrow();
529 | 
530 |       const parsedIncorrect = JSON.parse(incorrectJson);
531 |       const parsedCorrect = JSON.parse(correctJson);
532 | 
533 |       // Validate structure
534 |       expect(parsedIncorrect).toHaveProperty('API Call');
535 |       expect(parsedCorrect).toHaveProperty('API Call');
536 |       expect(parsedIncorrect['API Call']).toHaveProperty('main');
537 |       expect(parsedCorrect['API Call']).toHaveProperty('main');
538 | 
539 |       // Incorrect should have both nodes in main[0]
540 |       expect(Array.isArray(parsedIncorrect['API Call'].main)).toBe(true);
541 |       expect(parsedIncorrect['API Call'].main).toHaveLength(1);
542 |       expect(parsedIncorrect['API Call'].main[0]).toHaveLength(2);
543 | 
544 |       // Correct should have separate arrays
545 |       expect(Array.isArray(parsedCorrect['API Call'].main)).toBe(true);
546 |       expect(parsedCorrect['API Call'].main).toHaveLength(2);
547 |       expect(parsedCorrect['API Call'].main[0]).toHaveLength(1); // Success only
548 |       expect(parsedCorrect['API Call'].main[1]).toHaveLength(1); // Error only
549 |     });
550 | 
551 |     it('should handle special characters in node names in JSON', async () => {
552 |       // Test simpler special characters that are easier to handle in JSON
553 |       const specialNodeNames = [
554 |         'Node with spaces',
555 |         'Node-with-dashes',
556 |         'Node_with_underscores'
557 |       ];
558 | 
559 |       for (const specialName of specialNodeNames) {
560 |         const workflow = {
561 |           nodes: [
562 |             {
563 |               id: '1',
564 |               name: 'Source',
565 |               type: 'n8n-nodes-base.httpRequest',
566 |               position: [0, 0],
567 |               parameters: {}
568 |             },
569 |             {
570 |               id: '2',
571 |               name: 'Success',
572 |               type: 'n8n-nodes-base.set',
573 |               position: [200, 0],
574 |               parameters: {}
575 |             },
576 |             {
577 |               id: '3',
578 |               name: specialName,
579 |               type: 'n8n-nodes-base.respondToWebhook',
580 |               position: [200, 100],
581 |               parameters: {}
582 |             }
583 |           ],
584 |           connections: {
585 |             'Source': {
586 |               main: [
587 |                 [
588 |                   { node: 'Success', type: 'main', index: 0 },
589 |                   { node: specialName, type: 'main', index: 0 }
590 |                 ]
591 |               ]
592 |             }
593 |           }
594 |         };
595 | 
596 |         const result = await validator.validateWorkflow(workflow as any);
597 | 
598 |         const errorConfigError = result.errors.find(e =>
599 |           e.message.includes('Incorrect error output configuration')
600 |         );
601 | 
602 |         expect(errorConfigError).toBeDefined();
603 | 
604 |         // Verify the error message contains the special node name
605 |         expect(errorConfigError!.message).toContain(specialName);
606 | 
607 |         // Verify JSON structure is present (but don't parse due to comments)
608 |         expect(errorConfigError!.message).toContain('INCORRECT (current):');
609 |         expect(errorConfigError!.message).toContain('CORRECT (should be):');
610 |         expect(errorConfigError!.message).toContain('main[0]');
611 |         expect(errorConfigError!.message).toContain('main[1]');
612 |       }
613 |     });
614 |   });
615 | 
616 |   describe('Repository Interaction Patterns', () => {
617 |     it('should call repository getNode with correct parameters', async () => {
618 |       const workflow = {
619 |         nodes: [
620 |           {
621 |             id: '1',
622 |             name: 'HTTP Node',
623 |             type: 'n8n-nodes-base.httpRequest',
624 |             position: [0, 0],
625 |             parameters: {}
626 |           },
627 |           {
628 |             id: '2',
629 |             name: 'Set Node',
630 |             type: 'n8n-nodes-base.set',
631 |             position: [200, 0],
632 |             parameters: {}
633 |           }
634 |         ],
635 |         connections: {
636 |           'HTTP Node': {
637 |             main: [
638 |               [
639 |                 { node: 'Set Node', type: 'main', index: 0 }
640 |               ]
641 |             ]
642 |           }
643 |         }
644 |       };
645 | 
646 |       await validator.validateWorkflow(workflow as any);
647 | 
648 |       // Should have called getNode for each node type (normalized to short form)
649 |       expect(mockGetNode).toHaveBeenCalledWith('nodes-base.httpRequest');
650 |       expect(mockGetNode).toHaveBeenCalledWith('nodes-base.set');
651 |       expect(mockGetNode).toHaveBeenCalledTimes(2);
652 |     });
653 | 
654 |     it('should handle repository errors gracefully', async () => {
655 |       // Mock repository to throw error
656 |       mockGetNode.mockImplementation(() => {
657 |         throw new Error('Database connection failed');
658 |       });
659 | 
660 |       const workflow = {
661 |         nodes: [
662 |           {
663 |             id: '1',
664 |             name: 'Test Node',
665 |             type: 'n8n-nodes-base.httpRequest',
666 |             position: [0, 0],
667 |             parameters: {}
668 |           }
669 |         ],
670 |         connections: {}
671 |       };
672 | 
673 |       // Should not throw error
674 |       const result = await validator.validateWorkflow(workflow as any);
675 | 
676 |       // Should still return a valid result
677 |       expect(result).toHaveProperty('valid');
678 |       expect(Array.isArray(result.errors)).toBe(true);
679 |       expect(Array.isArray(result.warnings)).toBe(true);
680 |     });
681 | 
682 |     it('should optimize repository calls for duplicate node types', async () => {
683 |       const workflow = {
684 |         nodes: [
685 |           {
686 |             id: '1',
687 |             name: 'HTTP 1',
688 |             type: 'n8n-nodes-base.httpRequest',
689 |             position: [0, 0],
690 |             parameters: {}
691 |           },
692 |           {
693 |             id: '2',
694 |             name: 'HTTP 2',
695 |             type: 'n8n-nodes-base.httpRequest',
696 |             position: [200, 0],
697 |             parameters: {}
698 |           },
699 |           {
700 |             id: '3',
701 |             name: 'HTTP 3',
702 |             type: 'n8n-nodes-base.httpRequest',
703 |             position: [400, 0],
704 |             parameters: {}
705 |           }
706 |         ],
707 |         connections: {}
708 |       };
709 | 
710 |       await validator.validateWorkflow(workflow as any);
711 | 
712 |       // Should call getNode for the same type multiple times (current implementation)
713 |       // Note: This test documents current behavior. Could be optimized in the future.
714 |       const httpRequestCalls = mockGetNode.mock.calls.filter(
715 |         call => call[0] === 'nodes-base.httpRequest'
716 |       );
717 |       expect(httpRequestCalls.length).toBeGreaterThan(0);
718 |     });
719 |   });
720 | });
```
Page 31/59FirstPrevNextLast