This is page 14 of 45. Use http://codebase.md/czlonkowski/n8n-mcp?lines=false&page={x} to view the full context. # Directory Structure ``` ├── _config.yml ├── .claude │ └── agents │ ├── code-reviewer.md │ ├── context-manager.md │ ├── debugger.md │ ├── deployment-engineer.md │ ├── mcp-backend-engineer.md │ ├── n8n-mcp-tester.md │ ├── technical-researcher.md │ └── test-automator.md ├── .dockerignore ├── .env.docker ├── .env.example ├── .env.n8n.example ├── .env.test ├── .env.test.example ├── .github │ ├── ABOUT.md │ ├── BENCHMARK_THRESHOLDS.md │ ├── FUNDING.yml │ ├── gh-pages.yml │ ├── secret_scanning.yml │ └── workflows │ ├── benchmark-pr.yml │ ├── benchmark.yml │ ├── docker-build-fast.yml │ ├── docker-build-n8n.yml │ ├── docker-build.yml │ ├── release.yml │ ├── test.yml │ └── update-n8n-deps.yml ├── .gitignore ├── .npmignore ├── ATTRIBUTION.md ├── CHANGELOG.md ├── CLAUDE.md ├── codecov.yml ├── coverage.json ├── data │ ├── .gitkeep │ ├── nodes.db │ ├── nodes.db-shm │ ├── nodes.db-wal │ └── templates.db ├── deploy │ └── quick-deploy-n8n.sh ├── docker │ ├── docker-entrypoint.sh │ ├── n8n-mcp │ ├── parse-config.js │ └── README.md ├── docker-compose.buildkit.yml ├── docker-compose.extract.yml ├── docker-compose.n8n.yml ├── docker-compose.override.yml.example ├── docker-compose.test-n8n.yml ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.railway ├── Dockerfile.test ├── docs │ ├── AUTOMATED_RELEASES.md │ ├── BENCHMARKS.md │ ├── CHANGELOG.md │ ├── CLAUDE_CODE_SETUP.md │ ├── CLAUDE_INTERVIEW.md │ ├── CODECOV_SETUP.md │ ├── CODEX_SETUP.md │ ├── CURSOR_SETUP.md │ ├── DEPENDENCY_UPDATES.md │ ├── DOCKER_README.md │ ├── DOCKER_TROUBLESHOOTING.md │ ├── FINAL_AI_VALIDATION_SPEC.md │ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md │ ├── HTTP_DEPLOYMENT.md │ ├── img │ │ ├── cc_command.png │ │ ├── cc_connected.png │ │ ├── codex_connected.png │ │ ├── cursor_tut.png │ │ ├── Railway_api.png │ │ ├── Railway_server_address.png │ │ ├── vsc_ghcp_chat_agent_mode.png │ │ ├── vsc_ghcp_chat_instruction_files.png │ │ ├── vsc_ghcp_chat_thinking_tool.png │ │ └── windsurf_tut.png │ ├── INSTALLATION.md │ ├── LIBRARY_USAGE.md │ ├── local │ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md │ │ ├── DEEP_DIVE_ANALYSIS_README.md │ │ ├── Deep_dive_p1_p2.md │ │ ├── integration-testing-plan.md │ │ ├── integration-tests-phase1-summary.md │ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md │ │ ├── P0_IMPLEMENTATION_PLAN.md │ │ └── TEMPLATE_MINING_ANALYSIS.md │ ├── MCP_ESSENTIALS_README.md │ ├── MCP_QUICK_START_GUIDE.md │ ├── N8N_DEPLOYMENT.md │ ├── RAILWAY_DEPLOYMENT.md │ ├── README_CLAUDE_SETUP.md │ ├── README.md │ ├── tools-documentation-usage.md │ ├── VS_CODE_PROJECT_SETUP.md │ ├── WINDSURF_SETUP.md │ └── workflow-diff-examples.md ├── examples │ └── enhanced-documentation-demo.js ├── fetch_log.txt ├── LICENSE ├── MEMORY_N8N_UPDATE.md ├── MEMORY_TEMPLATE_UPDATE.md ├── monitor_fetch.sh ├── N8N_HTTP_STREAMABLE_SETUP.md ├── n8n-nodes.db ├── P0-R3-TEST-PLAN.md ├── package-lock.json ├── package.json ├── package.runtime.json ├── PRIVACY.md ├── railway.json ├── README.md ├── renovate.json ├── scripts │ ├── analyze-optimization.sh │ ├── audit-schema-coverage.ts │ ├── build-optimized.sh │ ├── compare-benchmarks.js │ ├── demo-optimization.sh │ ├── deploy-http.sh │ ├── deploy-to-vm.sh │ ├── export-webhook-workflows.ts │ ├── extract-changelog.js │ ├── extract-from-docker.js │ ├── extract-nodes-docker.sh │ ├── extract-nodes-simple.sh │ ├── format-benchmark-results.js │ ├── generate-benchmark-stub.js │ ├── generate-detailed-reports.js │ ├── generate-test-summary.js │ ├── http-bridge.js │ ├── mcp-http-client.js │ ├── migrate-nodes-fts.ts │ ├── migrate-tool-docs.ts │ ├── n8n-docs-mcp.service │ ├── nginx-n8n-mcp.conf │ ├── prebuild-fts5.ts │ ├── prepare-release.js │ ├── publish-npm-quick.sh │ ├── publish-npm.sh │ ├── quick-test.ts │ ├── run-benchmarks-ci.js │ ├── sync-runtime-version.js │ ├── test-ai-validation-debug.ts │ ├── test-code-node-enhancements.ts │ ├── test-code-node-fixes.ts │ ├── test-docker-config.sh │ ├── test-docker-fingerprint.ts │ ├── test-docker-optimization.sh │ ├── test-docker.sh │ ├── test-empty-connection-validation.ts │ ├── test-error-message-tracking.ts │ ├── test-error-output-validation.ts │ ├── test-error-validation.js │ ├── test-essentials.ts │ ├── test-expression-code-validation.ts │ ├── test-expression-format-validation.js │ ├── test-fts5-search.ts │ ├── test-fuzzy-fix.ts │ ├── test-fuzzy-simple.ts │ ├── test-helpers-validation.ts │ ├── test-http-search.ts │ ├── test-http.sh │ ├── test-jmespath-validation.ts │ ├── test-multi-tenant-simple.ts │ ├── test-multi-tenant.ts │ ├── test-n8n-integration.sh │ ├── test-node-info.js │ ├── test-node-type-validation.ts │ ├── test-nodes-base-prefix.ts │ ├── test-operation-validation.ts │ ├── test-optimized-docker.sh │ ├── test-release-automation.js │ ├── test-search-improvements.ts │ ├── test-security.ts │ ├── test-single-session.sh │ ├── test-sqljs-triggers.ts │ ├── test-telemetry-debug.ts │ ├── test-telemetry-direct.ts │ ├── test-telemetry-env.ts │ ├── test-telemetry-integration.ts │ ├── test-telemetry-no-select.ts │ ├── test-telemetry-security.ts │ ├── test-telemetry-simple.ts │ ├── test-typeversion-validation.ts │ ├── test-url-configuration.ts │ ├── test-user-id-persistence.ts │ ├── test-webhook-validation.ts │ ├── test-workflow-insert.ts │ ├── test-workflow-sanitizer.ts │ ├── test-workflow-tracking-debug.ts │ ├── update-and-publish-prep.sh │ ├── update-n8n-deps.js │ ├── update-readme-version.js │ ├── vitest-benchmark-json-reporter.js │ └── vitest-benchmark-reporter.ts ├── SECURITY.md ├── src │ ├── config │ │ └── n8n-api.ts │ ├── data │ │ └── canonical-ai-tool-examples.json │ ├── database │ │ ├── database-adapter.ts │ │ ├── migrations │ │ │ └── add-template-node-configs.sql │ │ ├── node-repository.ts │ │ ├── nodes.db │ │ ├── schema-optimized.sql │ │ └── schema.sql │ ├── errors │ │ └── validation-service-error.ts │ ├── http-server-single-session.ts │ ├── http-server.ts │ ├── index.ts │ ├── loaders │ │ └── node-loader.ts │ ├── mappers │ │ └── docs-mapper.ts │ ├── mcp │ │ ├── handlers-n8n-manager.ts │ │ ├── handlers-workflow-diff.ts │ │ ├── index.ts │ │ ├── server.ts │ │ ├── stdio-wrapper.ts │ │ ├── tool-docs │ │ │ ├── configuration │ │ │ │ ├── get-node-as-tool-info.ts │ │ │ │ ├── get-node-documentation.ts │ │ │ │ ├── get-node-essentials.ts │ │ │ │ ├── get-node-info.ts │ │ │ │ ├── get-property-dependencies.ts │ │ │ │ ├── index.ts │ │ │ │ └── search-node-properties.ts │ │ │ ├── discovery │ │ │ │ ├── get-database-statistics.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-ai-tools.ts │ │ │ │ ├── list-nodes.ts │ │ │ │ └── search-nodes.ts │ │ │ ├── guides │ │ │ │ ├── ai-agents-guide.ts │ │ │ │ └── index.ts │ │ │ ├── index.ts │ │ │ ├── system │ │ │ │ ├── index.ts │ │ │ │ ├── n8n-diagnostic.ts │ │ │ │ ├── n8n-health-check.ts │ │ │ │ ├── n8n-list-available-tools.ts │ │ │ │ └── tools-documentation.ts │ │ │ ├── templates │ │ │ │ ├── get-template.ts │ │ │ │ ├── get-templates-for-task.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-node-templates.ts │ │ │ │ ├── list-tasks.ts │ │ │ │ ├── search-templates-by-metadata.ts │ │ │ │ └── search-templates.ts │ │ │ ├── types.ts │ │ │ ├── validation │ │ │ │ ├── index.ts │ │ │ │ ├── validate-node-minimal.ts │ │ │ │ ├── validate-node-operation.ts │ │ │ │ ├── validate-workflow-connections.ts │ │ │ │ ├── validate-workflow-expressions.ts │ │ │ │ └── validate-workflow.ts │ │ │ └── workflow_management │ │ │ ├── index.ts │ │ │ ├── n8n-autofix-workflow.ts │ │ │ ├── n8n-create-workflow.ts │ │ │ ├── n8n-delete-execution.ts │ │ │ ├── n8n-delete-workflow.ts │ │ │ ├── n8n-get-execution.ts │ │ │ ├── n8n-get-workflow-details.ts │ │ │ ├── n8n-get-workflow-minimal.ts │ │ │ ├── n8n-get-workflow-structure.ts │ │ │ ├── n8n-get-workflow.ts │ │ │ ├── n8n-list-executions.ts │ │ │ ├── n8n-list-workflows.ts │ │ │ ├── n8n-trigger-webhook-workflow.ts │ │ │ ├── n8n-update-full-workflow.ts │ │ │ ├── n8n-update-partial-workflow.ts │ │ │ └── n8n-validate-workflow.ts │ │ ├── tools-documentation.ts │ │ ├── tools-n8n-friendly.ts │ │ ├── tools-n8n-manager.ts │ │ ├── tools.ts │ │ └── workflow-examples.ts │ ├── mcp-engine.ts │ ├── mcp-tools-engine.ts │ ├── n8n │ │ ├── MCPApi.credentials.ts │ │ └── MCPNode.node.ts │ ├── parsers │ │ ├── node-parser.ts │ │ ├── property-extractor.ts │ │ └── simple-parser.ts │ ├── scripts │ │ ├── debug-http-search.ts │ │ ├── extract-from-docker.ts │ │ ├── fetch-templates-robust.ts │ │ ├── fetch-templates.ts │ │ ├── rebuild-database.ts │ │ ├── rebuild-optimized.ts │ │ ├── rebuild.ts │ │ ├── sanitize-templates.ts │ │ ├── seed-canonical-ai-examples.ts │ │ ├── test-autofix-documentation.ts │ │ ├── test-autofix-workflow.ts │ │ ├── test-execution-filtering.ts │ │ ├── test-node-suggestions.ts │ │ ├── test-protocol-negotiation.ts │ │ ├── test-summary.ts │ │ ├── test-webhook-autofix.ts │ │ ├── validate.ts │ │ └── validation-summary.ts │ ├── services │ │ ├── ai-node-validator.ts │ │ ├── ai-tool-validators.ts │ │ ├── confidence-scorer.ts │ │ ├── config-validator.ts │ │ ├── enhanced-config-validator.ts │ │ ├── example-generator.ts │ │ ├── execution-processor.ts │ │ ├── expression-format-validator.ts │ │ ├── expression-validator.ts │ │ ├── n8n-api-client.ts │ │ ├── n8n-validation.ts │ │ ├── node-documentation-service.ts │ │ ├── node-similarity-service.ts │ │ ├── node-specific-validators.ts │ │ ├── operation-similarity-service.ts │ │ ├── property-dependencies.ts │ │ ├── property-filter.ts │ │ ├── resource-similarity-service.ts │ │ ├── sqlite-storage-service.ts │ │ ├── task-templates.ts │ │ ├── universal-expression-validator.ts │ │ ├── workflow-auto-fixer.ts │ │ ├── workflow-diff-engine.ts │ │ └── workflow-validator.ts │ ├── telemetry │ │ ├── batch-processor.ts │ │ ├── config-manager.ts │ │ ├── early-error-logger.ts │ │ ├── error-sanitization-utils.ts │ │ ├── error-sanitizer.ts │ │ ├── event-tracker.ts │ │ ├── event-validator.ts │ │ ├── index.ts │ │ ├── performance-monitor.ts │ │ ├── rate-limiter.ts │ │ ├── startup-checkpoints.ts │ │ ├── telemetry-error.ts │ │ ├── telemetry-manager.ts │ │ ├── telemetry-types.ts │ │ └── workflow-sanitizer.ts │ ├── templates │ │ ├── batch-processor.ts │ │ ├── metadata-generator.ts │ │ ├── README.md │ │ ├── template-fetcher.ts │ │ ├── template-repository.ts │ │ └── template-service.ts │ ├── types │ │ ├── index.ts │ │ ├── instance-context.ts │ │ ├── n8n-api.ts │ │ ├── node-types.ts │ │ └── workflow-diff.ts │ └── utils │ ├── auth.ts │ ├── bridge.ts │ ├── cache-utils.ts │ ├── console-manager.ts │ ├── documentation-fetcher.ts │ ├── enhanced-documentation-fetcher.ts │ ├── error-handler.ts │ ├── example-generator.ts │ ├── fixed-collection-validator.ts │ ├── logger.ts │ ├── mcp-client.ts │ ├── n8n-errors.ts │ ├── node-source-extractor.ts │ ├── node-type-normalizer.ts │ ├── node-type-utils.ts │ ├── node-utils.ts │ ├── npm-version-checker.ts │ ├── protocol-version.ts │ ├── simple-cache.ts │ ├── ssrf-protection.ts │ ├── template-node-resolver.ts │ ├── template-sanitizer.ts │ ├── url-detector.ts │ ├── validation-schemas.ts │ └── version.ts ├── test-output.txt ├── test-reinit-fix.sh ├── tests │ ├── __snapshots__ │ │ └── .gitkeep │ ├── auth.test.ts │ ├── benchmarks │ │ ├── database-queries.bench.ts │ │ ├── index.ts │ │ ├── mcp-tools.bench.ts │ │ ├── mcp-tools.bench.ts.disabled │ │ ├── mcp-tools.bench.ts.skip │ │ ├── node-loading.bench.ts.disabled │ │ ├── README.md │ │ ├── search-operations.bench.ts.disabled │ │ └── validation-performance.bench.ts.disabled │ ├── bridge.test.ts │ ├── comprehensive-extraction-test.js │ ├── data │ │ └── .gitkeep │ ├── debug-slack-doc.js │ ├── demo-enhanced-documentation.js │ ├── docker-tests-README.md │ ├── error-handler.test.ts │ ├── examples │ │ └── using-database-utils.test.ts │ ├── extracted-nodes-db │ │ ├── database-import.json │ │ ├── extraction-report.json │ │ ├── insert-nodes.sql │ │ ├── n8n-nodes-base__Airtable.json │ │ ├── n8n-nodes-base__Discord.json │ │ ├── n8n-nodes-base__Function.json │ │ ├── n8n-nodes-base__HttpRequest.json │ │ ├── n8n-nodes-base__If.json │ │ ├── n8n-nodes-base__Slack.json │ │ ├── n8n-nodes-base__SplitInBatches.json │ │ └── n8n-nodes-base__Webhook.json │ ├── factories │ │ ├── node-factory.ts │ │ └── property-definition-factory.ts │ ├── fixtures │ │ ├── .gitkeep │ │ ├── database │ │ │ └── test-nodes.json │ │ ├── factories │ │ │ ├── node.factory.ts │ │ │ └── parser-node.factory.ts │ │ └── template-configs.ts │ ├── helpers │ │ └── env-helpers.ts │ ├── http-server-auth.test.ts │ ├── integration │ │ ├── ai-validation │ │ │ ├── ai-agent-validation.test.ts │ │ │ ├── ai-tool-validation.test.ts │ │ │ ├── chat-trigger-validation.test.ts │ │ │ ├── e2e-validation.test.ts │ │ │ ├── helpers.ts │ │ │ ├── llm-chain-validation.test.ts │ │ │ ├── README.md │ │ │ └── TEST_REPORT.md │ │ ├── ci │ │ │ └── database-population.test.ts │ │ ├── database │ │ │ ├── connection-management.test.ts │ │ │ ├── empty-database.test.ts │ │ │ ├── fts5-search.test.ts │ │ │ ├── node-fts5-search.test.ts │ │ │ ├── node-repository.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── sqljs-memory-leak.test.ts │ │ │ ├── template-node-configs.test.ts │ │ │ ├── template-repository.test.ts │ │ │ ├── test-utils.ts │ │ │ └── transactions.test.ts │ │ ├── database-integration.test.ts │ │ ├── docker │ │ │ ├── docker-config.test.ts │ │ │ ├── docker-entrypoint.test.ts │ │ │ └── test-helpers.ts │ │ ├── flexible-instance-config.test.ts │ │ ├── mcp │ │ │ └── template-examples-e2e.test.ts │ │ ├── mcp-protocol │ │ │ ├── basic-connection.test.ts │ │ │ ├── error-handling.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── protocol-compliance.test.ts │ │ │ ├── README.md │ │ │ ├── session-management.test.ts │ │ │ ├── test-helpers.ts │ │ │ ├── tool-invocation.test.ts │ │ │ └── workflow-error-validation.test.ts │ │ ├── msw-setup.test.ts │ │ ├── n8n-api │ │ │ ├── executions │ │ │ │ ├── delete-execution.test.ts │ │ │ │ ├── get-execution.test.ts │ │ │ │ ├── list-executions.test.ts │ │ │ │ └── trigger-webhook.test.ts │ │ │ ├── scripts │ │ │ │ └── cleanup-orphans.ts │ │ │ ├── system │ │ │ │ ├── diagnostic.test.ts │ │ │ │ ├── health-check.test.ts │ │ │ │ └── list-tools.test.ts │ │ │ ├── test-connection.ts │ │ │ ├── types │ │ │ │ └── mcp-responses.ts │ │ │ ├── utils │ │ │ │ ├── cleanup-helpers.ts │ │ │ │ ├── credentials.ts │ │ │ │ ├── factories.ts │ │ │ │ ├── fixtures.ts │ │ │ │ ├── mcp-context.ts │ │ │ │ ├── n8n-client.ts │ │ │ │ ├── node-repository.ts │ │ │ │ ├── response-types.ts │ │ │ │ ├── test-context.ts │ │ │ │ └── webhook-workflows.ts │ │ │ └── workflows │ │ │ ├── autofix-workflow.test.ts │ │ │ ├── create-workflow.test.ts │ │ │ ├── delete-workflow.test.ts │ │ │ ├── get-workflow-details.test.ts │ │ │ ├── get-workflow-minimal.test.ts │ │ │ ├── get-workflow-structure.test.ts │ │ │ ├── get-workflow.test.ts │ │ │ ├── list-workflows.test.ts │ │ │ ├── smart-parameters.test.ts │ │ │ ├── update-partial-workflow.test.ts │ │ │ ├── update-workflow.test.ts │ │ │ └── validate-workflow.test.ts │ │ ├── security │ │ │ ├── command-injection-prevention.test.ts │ │ │ └── rate-limiting.test.ts │ │ ├── setup │ │ │ ├── integration-setup.ts │ │ │ └── msw-test-server.ts │ │ ├── telemetry │ │ │ ├── docker-user-id-stability.test.ts │ │ │ └── mcp-telemetry.test.ts │ │ ├── templates │ │ │ └── metadata-operations.test.ts │ │ └── workflow-creation-node-type-format.test.ts │ ├── logger.test.ts │ ├── MOCKING_STRATEGY.md │ ├── mocks │ │ ├── n8n-api │ │ │ ├── data │ │ │ │ ├── credentials.ts │ │ │ │ ├── executions.ts │ │ │ │ └── workflows.ts │ │ │ ├── handlers.ts │ │ │ └── index.ts │ │ └── README.md │ ├── node-storage-export.json │ ├── setup │ │ ├── global-setup.ts │ │ ├── msw-setup.ts │ │ ├── TEST_ENV_DOCUMENTATION.md │ │ └── test-env.ts │ ├── test-database-extraction.js │ ├── test-direct-extraction.js │ ├── test-enhanced-documentation.js │ ├── test-enhanced-integration.js │ ├── test-mcp-extraction.js │ ├── test-mcp-server-extraction.js │ ├── test-mcp-tools-integration.js │ ├── test-node-documentation-service.js │ ├── test-node-list.js │ ├── test-package-info.js │ ├── test-parsing-operations.js │ ├── test-slack-node-complete.js │ ├── test-small-rebuild.js │ ├── test-sqlite-search.js │ ├── test-storage-system.js │ ├── unit │ │ ├── __mocks__ │ │ │ ├── n8n-nodes-base.test.ts │ │ │ ├── n8n-nodes-base.ts │ │ │ └── README.md │ │ ├── database │ │ │ ├── __mocks__ │ │ │ │ └── better-sqlite3.ts │ │ │ ├── database-adapter-unit.test.ts │ │ │ ├── node-repository-core.test.ts │ │ │ ├── node-repository-operations.test.ts │ │ │ ├── node-repository-outputs.test.ts │ │ │ ├── README.md │ │ │ └── template-repository-core.test.ts │ │ ├── docker │ │ │ ├── config-security.test.ts │ │ │ ├── edge-cases.test.ts │ │ │ ├── parse-config.test.ts │ │ │ └── serve-command.test.ts │ │ ├── errors │ │ │ └── validation-service-error.test.ts │ │ ├── examples │ │ │ └── using-n8n-nodes-base-mock.test.ts │ │ ├── flexible-instance-security-advanced.test.ts │ │ ├── flexible-instance-security.test.ts │ │ ├── http-server │ │ │ └── multi-tenant-support.test.ts │ │ ├── http-server-n8n-mode.test.ts │ │ ├── http-server-n8n-reinit.test.ts │ │ ├── http-server-session-management.test.ts │ │ ├── loaders │ │ │ └── node-loader.test.ts │ │ ├── mappers │ │ │ └── docs-mapper.test.ts │ │ ├── mcp │ │ │ ├── get-node-essentials-examples.test.ts │ │ │ ├── handlers-n8n-manager-simple.test.ts │ │ │ ├── handlers-n8n-manager.test.ts │ │ │ ├── handlers-workflow-diff.test.ts │ │ │ ├── lru-cache-behavior.test.ts │ │ │ ├── multi-tenant-tool-listing.test.ts.disabled │ │ │ ├── parameter-validation.test.ts │ │ │ ├── search-nodes-examples.test.ts │ │ │ ├── tools-documentation.test.ts │ │ │ └── tools.test.ts │ │ ├── monitoring │ │ │ └── cache-metrics.test.ts │ │ ├── MULTI_TENANT_TEST_COVERAGE.md │ │ ├── multi-tenant-integration.test.ts │ │ ├── parsers │ │ │ ├── node-parser-outputs.test.ts │ │ │ ├── node-parser.test.ts │ │ │ ├── property-extractor.test.ts │ │ │ └── simple-parser.test.ts │ │ ├── scripts │ │ │ └── fetch-templates-extraction.test.ts │ │ ├── services │ │ │ ├── ai-node-validator.test.ts │ │ │ ├── ai-tool-validators.test.ts │ │ │ ├── confidence-scorer.test.ts │ │ │ ├── config-validator-basic.test.ts │ │ │ ├── config-validator-edge-cases.test.ts │ │ │ ├── config-validator-node-specific.test.ts │ │ │ ├── config-validator-security.test.ts │ │ │ ├── debug-validator.test.ts │ │ │ ├── enhanced-config-validator-integration.test.ts │ │ │ ├── enhanced-config-validator-operations.test.ts │ │ │ ├── enhanced-config-validator.test.ts │ │ │ ├── example-generator.test.ts │ │ │ ├── execution-processor.test.ts │ │ │ ├── expression-format-validator.test.ts │ │ │ ├── expression-validator-edge-cases.test.ts │ │ │ ├── expression-validator.test.ts │ │ │ ├── fixed-collection-validation.test.ts │ │ │ ├── loop-output-edge-cases.test.ts │ │ │ ├── n8n-api-client.test.ts │ │ │ ├── n8n-validation.test.ts │ │ │ ├── node-similarity-service.test.ts │ │ │ ├── node-specific-validators.test.ts │ │ │ ├── operation-similarity-service-comprehensive.test.ts │ │ │ ├── operation-similarity-service.test.ts │ │ │ ├── property-dependencies.test.ts │ │ │ ├── property-filter-edge-cases.test.ts │ │ │ ├── property-filter.test.ts │ │ │ ├── resource-similarity-service-comprehensive.test.ts │ │ │ ├── resource-similarity-service.test.ts │ │ │ ├── task-templates.test.ts │ │ │ ├── template-service.test.ts │ │ │ ├── universal-expression-validator.test.ts │ │ │ ├── validation-fixes.test.ts │ │ │ ├── workflow-auto-fixer.test.ts │ │ │ ├── workflow-diff-engine.test.ts │ │ │ ├── workflow-fixed-collection-validation.test.ts │ │ │ ├── workflow-validator-comprehensive.test.ts │ │ │ ├── workflow-validator-edge-cases.test.ts │ │ │ ├── workflow-validator-error-outputs.test.ts │ │ │ ├── workflow-validator-expression-format.test.ts │ │ │ ├── workflow-validator-loops-simple.test.ts │ │ │ ├── workflow-validator-loops.test.ts │ │ │ ├── workflow-validator-mocks.test.ts │ │ │ ├── workflow-validator-performance.test.ts │ │ │ ├── workflow-validator-with-mocks.test.ts │ │ │ └── workflow-validator.test.ts │ │ ├── telemetry │ │ │ ├── batch-processor.test.ts │ │ │ ├── config-manager.test.ts │ │ │ ├── event-tracker.test.ts │ │ │ ├── event-validator.test.ts │ │ │ ├── rate-limiter.test.ts │ │ │ ├── telemetry-error.test.ts │ │ │ ├── telemetry-manager.test.ts │ │ │ ├── v2.18.3-fixes-verification.test.ts │ │ │ └── workflow-sanitizer.test.ts │ │ ├── templates │ │ │ ├── batch-processor.test.ts │ │ │ ├── metadata-generator.test.ts │ │ │ ├── template-repository-metadata.test.ts │ │ │ └── template-repository-security.test.ts │ │ ├── test-env-example.test.ts │ │ ├── test-infrastructure.test.ts │ │ ├── types │ │ │ ├── instance-context-coverage.test.ts │ │ │ └── instance-context-multi-tenant.test.ts │ │ ├── utils │ │ │ ├── auth-timing-safe.test.ts │ │ │ ├── cache-utils.test.ts │ │ │ ├── console-manager.test.ts │ │ │ ├── database-utils.test.ts │ │ │ ├── fixed-collection-validator.test.ts │ │ │ ├── n8n-errors.test.ts │ │ │ ├── node-type-normalizer.test.ts │ │ │ ├── node-type-utils.test.ts │ │ │ ├── node-utils.test.ts │ │ │ ├── simple-cache-memory-leak-fix.test.ts │ │ │ ├── ssrf-protection.test.ts │ │ │ └── template-node-resolver.test.ts │ │ └── validation-fixes.test.ts │ └── utils │ ├── assertions.ts │ ├── builders │ │ └── workflow.builder.ts │ ├── data-generators.ts │ ├── database-utils.ts │ ├── README.md │ └── test-helpers.ts ├── thumbnail.png ├── tsconfig.build.json ├── tsconfig.json ├── types │ ├── mcp.d.ts │ └── test-env.d.ts ├── verify-telemetry-fix.js ├── versioned-nodes.md ├── vitest.config.benchmark.ts ├── vitest.config.integration.ts └── vitest.config.ts ``` # Files -------------------------------------------------------------------------------- /tests/unit/mcp/search-nodes-examples.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; import { N8NDocumentationMCPServer } from '../../../src/mcp/server'; import { createDatabaseAdapter } from '../../../src/database/database-adapter'; import path from 'path'; import fs from 'fs'; /** * Unit tests for search_nodes with includeExamples parameter * Testing P0-R3 feature: Template-based configuration examples */ describe('search_nodes with includeExamples', () => { let server: N8NDocumentationMCPServer; let dbPath: string; beforeEach(async () => { // Use in-memory database for testing process.env.NODE_DB_PATH = ':memory:'; server = new N8NDocumentationMCPServer(); await (server as any).initialized; // Populate in-memory database with test nodes // NOTE: Database stores nodes in SHORT form (nodes-base.xxx, not n8n-nodes-base.xxx) const testNodes = [ { node_type: 'nodes-base.webhook', package_name: 'n8n-nodes-base', display_name: 'Webhook', description: 'Starts workflow on webhook call', category: 'Core Nodes', is_ai_tool: 0, is_trigger: 1, is_webhook: 1, is_versioned: 1, version: '1', properties_schema: JSON.stringify([]), operations: JSON.stringify([]) }, { node_type: 'nodes-base.httpRequest', package_name: 'n8n-nodes-base', display_name: 'HTTP Request', description: 'Makes an HTTP request', category: 'Core Nodes', is_ai_tool: 0, is_trigger: 0, is_webhook: 0, is_versioned: 1, version: '1', properties_schema: JSON.stringify([]), operations: JSON.stringify([]) } ]; // Insert test nodes into the in-memory database const db = (server as any).db; if (db) { const insertStmt = db.prepare(` INSERT INTO nodes ( node_type, package_name, display_name, description, category, is_ai_tool, is_trigger, is_webhook, is_versioned, version, properties_schema, operations ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `); for (const node of testNodes) { insertStmt.run( node.node_type, node.package_name, node.display_name, node.description, node.category, node.is_ai_tool, node.is_trigger, node.is_webhook, node.is_versioned, node.version, node.properties_schema, node.operations ); } // Note: FTS table is not created in test environment // searchNodes will fall back to LIKE search when FTS doesn't exist } }); afterEach(() => { delete process.env.NODE_DB_PATH; }); describe('includeExamples parameter', () => { it('should not include examples when includeExamples is false', async () => { const result = await (server as any).searchNodes('webhook', 5, { includeExamples: false }); expect(result.results).toBeDefined(); if (result.results.length > 0) { result.results.forEach((node: any) => { expect(node.examples).toBeUndefined(); }); } }); it('should not include examples when includeExamples is undefined', async () => { const result = await (server as any).searchNodes('webhook', 5, {}); expect(result.results).toBeDefined(); if (result.results.length > 0) { result.results.forEach((node: any) => { expect(node.examples).toBeUndefined(); }); } }); it('should include examples when includeExamples is true', async () => { const result = await (server as any).searchNodes('webhook', 5, { includeExamples: true }); expect(result.results).toBeDefined(); // Note: In-memory test database may not have template configs // This test validates the parameter is processed correctly }); it('should handle nodes without examples gracefully', async () => { const result = await (server as any).searchNodes('nonexistent', 5, { includeExamples: true }); expect(result.results).toBeDefined(); expect(result.results).toHaveLength(0); }); it('should limit examples to top 2 per node', async () => { // This test would need a database with actual template_node_configs data // In a real scenario, we'd verify that only 2 examples are returned const result = await (server as any).searchNodes('http', 5, { includeExamples: true }); expect(result.results).toBeDefined(); if (result.results.length > 0) { result.results.forEach((node: any) => { if (node.examples) { expect(node.examples.length).toBeLessThanOrEqual(2); } }); } }); }); describe('example data structure', () => { it('should return examples with correct structure when present', async () => { // Mock database to return example data const mockDb = (server as any).db; if (mockDb) { const originalPrepare = mockDb.prepare.bind(mockDb); mockDb.prepare = vi.fn((query: string) => { if (query.includes('template_node_configs')) { return { all: vi.fn(() => [ { parameters_json: JSON.stringify({ httpMethod: 'POST', path: 'webhook-test' }), template_name: 'Test Template', template_views: 1000 }, { parameters_json: JSON.stringify({ httpMethod: 'GET', path: 'webhook-get' }), template_name: 'Another Template', template_views: 500 } ]) }; } return originalPrepare(query); }); const result = await (server as any).searchNodes('webhook', 5, { includeExamples: true }); if (result.results.length > 0 && result.results[0].examples) { const example = result.results[0].examples[0]; expect(example).toHaveProperty('configuration'); expect(example).toHaveProperty('template'); expect(example).toHaveProperty('views'); expect(typeof example.configuration).toBe('object'); expect(typeof example.template).toBe('string'); expect(typeof example.views).toBe('number'); } } }); }); describe('backward compatibility', () => { it('should maintain backward compatibility when includeExamples not specified', async () => { const resultWithoutParam = await (server as any).searchNodes('http', 5); const resultWithFalse = await (server as any).searchNodes('http', 5, { includeExamples: false }); expect(resultWithoutParam.results).toBeDefined(); expect(resultWithFalse.results).toBeDefined(); // Both should have same structure (no examples) if (resultWithoutParam.results.length > 0) { expect(resultWithoutParam.results[0].examples).toBeUndefined(); } if (resultWithFalse.results.length > 0) { expect(resultWithFalse.results[0].examples).toBeUndefined(); } }); }); describe('performance considerations', () => { it('should not significantly impact performance when includeExamples is false', async () => { const startWithout = Date.now(); await (server as any).searchNodes('http', 20, { includeExamples: false }); const durationWithout = Date.now() - startWithout; const startWith = Date.now(); await (server as any).searchNodes('http', 20, { includeExamples: true }); const durationWith = Date.now() - startWith; // Both should complete quickly (under 100ms) expect(durationWithout).toBeLessThan(100); expect(durationWith).toBeLessThan(200); }); }); describe('error handling', () => { it('should continue to work even if example fetch fails', async () => { // Mock database to throw error on example fetch const mockDb = (server as any).db; if (mockDb) { const originalPrepare = mockDb.prepare.bind(mockDb); mockDb.prepare = vi.fn((query: string) => { if (query.includes('template_node_configs')) { throw new Error('Database error'); } return originalPrepare(query); }); // Should not throw, should return results without examples const result = await (server as any).searchNodes('webhook', 5, { includeExamples: true }); expect(result.results).toBeDefined(); // Examples should be undefined due to error if (result.results.length > 0) { expect(result.results[0].examples).toBeUndefined(); } } }); it('should handle malformed parameters_json gracefully', async () => { const mockDb = (server as any).db; if (mockDb) { const originalPrepare = mockDb.prepare.bind(mockDb); mockDb.prepare = vi.fn((query: string) => { if (query.includes('template_node_configs')) { return { all: vi.fn(() => [ { parameters_json: 'invalid json', template_name: 'Test Template', template_views: 1000 } ]) }; } return originalPrepare(query); }); // Should not throw const result = await (server as any).searchNodes('webhook', 5, { includeExamples: true }); expect(result).toBeDefined(); } }); }); }); describe('searchNodesLIKE with includeExamples', () => { let server: N8NDocumentationMCPServer; beforeEach(async () => { process.env.NODE_DB_PATH = ':memory:'; server = new N8NDocumentationMCPServer(); await (server as any).initialized; // Populate in-memory database with test nodes const testNodes = [ { node_type: 'nodes-base.webhook', package_name: 'n8n-nodes-base', display_name: 'Webhook', description: 'Starts workflow on webhook call', category: 'Core Nodes', is_ai_tool: 0, is_trigger: 1, is_webhook: 1, is_versioned: 1, version: '1', properties_schema: JSON.stringify([]), operations: JSON.stringify([]) } ]; const db = (server as any).db; if (db) { const insertStmt = db.prepare(` INSERT INTO nodes ( node_type, package_name, display_name, description, category, is_ai_tool, is_trigger, is_webhook, is_versioned, version, properties_schema, operations ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `); for (const node of testNodes) { insertStmt.run( node.node_type, node.package_name, node.display_name, node.description, node.category, node.is_ai_tool, node.is_trigger, node.is_webhook, node.is_versioned, node.version, node.properties_schema, node.operations ); } } }); afterEach(() => { delete process.env.NODE_DB_PATH; }); it('should support includeExamples in LIKE search', async () => { const result = await (server as any).searchNodesLIKE('webhook', 5, { includeExamples: true }); expect(result).toBeDefined(); expect(result.results).toBeDefined(); expect(Array.isArray(result.results)).toBe(true); }); it('should not include examples when includeExamples is false', async () => { const result = await (server as any).searchNodesLIKE('webhook', 5, { includeExamples: false }); expect(result).toBeDefined(); expect(result.results).toBeDefined(); if (result.results.length > 0) { result.results.forEach((node: any) => { expect(node.examples).toBeUndefined(); }); } }); }); describe('searchNodesFTS with includeExamples', () => { let server: N8NDocumentationMCPServer; beforeEach(async () => { process.env.NODE_DB_PATH = ':memory:'; server = new N8NDocumentationMCPServer(); await (server as any).initialized; }); afterEach(() => { delete process.env.NODE_DB_PATH; }); it('should support includeExamples in FTS search', async () => { const result = await (server as any).searchNodesFTS('webhook', 5, 'OR', { includeExamples: true }); expect(result.results).toBeDefined(); expect(Array.isArray(result.results)).toBe(true); }); it('should pass options to example fetching logic', async () => { const result = await (server as any).searchNodesFTS('http', 5, 'AND', { includeExamples: true }); expect(result).toBeDefined(); expect(result.results).toBeDefined(); }); }); ``` -------------------------------------------------------------------------------- /tests/unit/services/expression-format-validator.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect } from 'vitest'; import { ExpressionFormatValidator } from '../../../src/services/expression-format-validator'; describe('ExpressionFormatValidator', () => { describe('validateAndFix', () => { const context = { nodeType: 'n8n-nodes-base.httpRequest', nodeName: 'HTTP Request', nodeId: 'test-id-1' }; describe('Simple string expressions', () => { it('should detect missing = prefix for expression', () => { const value = '{{ $env.API_KEY }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'apiKey', context); expect(issue).toBeTruthy(); expect(issue?.issueType).toBe('missing-prefix'); expect(issue?.correctedValue).toBe('={{ $env.API_KEY }}'); expect(issue?.severity).toBe('error'); }); it('should accept expression with = prefix', () => { const value = '={{ $env.API_KEY }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'apiKey', context); expect(issue).toBeNull(); }); it('should detect mixed content without prefix', () => { const value = 'Bearer {{ $env.TOKEN }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'authorization', context); expect(issue).toBeTruthy(); expect(issue?.issueType).toBe('missing-prefix'); expect(issue?.correctedValue).toBe('=Bearer {{ $env.TOKEN }}'); }); it('should accept mixed content with prefix', () => { const value = '=Bearer {{ $env.TOKEN }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'authorization', context); expect(issue).toBeNull(); }); it('should ignore plain strings without expressions', () => { const value = 'https://api.example.com'; const issue = ExpressionFormatValidator.validateAndFix(value, 'url', context); expect(issue).toBeNull(); }); }); describe('Resource Locator fields', () => { const githubContext = { nodeType: 'n8n-nodes-base.github', nodeName: 'GitHub', nodeId: 'github-1' }; it('should detect expression in owner field needing resource locator', () => { const value = '{{ $vars.GITHUB_OWNER }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'owner', githubContext); expect(issue).toBeTruthy(); expect(issue?.issueType).toBe('needs-resource-locator'); expect(issue?.correctedValue).toEqual({ __rl: true, value: '={{ $vars.GITHUB_OWNER }}', mode: 'expression' }); expect(issue?.severity).toBe('error'); }); it('should accept resource locator with expression', () => { const value = { __rl: true, value: '={{ $vars.GITHUB_OWNER }}', mode: 'expression' }; const issue = ExpressionFormatValidator.validateAndFix(value, 'owner', githubContext); expect(issue).toBeNull(); }); it('should detect missing prefix in resource locator value', () => { const value = { __rl: true, value: '{{ $vars.GITHUB_OWNER }}', mode: 'expression' }; const issue = ExpressionFormatValidator.validateAndFix(value, 'owner', githubContext); expect(issue).toBeTruthy(); expect(issue?.issueType).toBe('missing-prefix'); expect(issue?.correctedValue.value).toBe('={{ $vars.GITHUB_OWNER }}'); }); it('should warn if expression has prefix but should use RL format', () => { const value = '={{ $vars.GITHUB_OWNER }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'owner', githubContext); expect(issue).toBeTruthy(); expect(issue?.issueType).toBe('needs-resource-locator'); expect(issue?.severity).toBe('warning'); }); }); describe('Multiple expressions', () => { it('should detect multiple expressions without prefix', () => { const value = '{{ $json.first }} - {{ $json.last }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'fullName', context); expect(issue).toBeTruthy(); expect(issue?.issueType).toBe('missing-prefix'); expect(issue?.correctedValue).toBe('={{ $json.first }} - {{ $json.last }}'); }); it('should accept multiple expressions with prefix', () => { const value = '={{ $json.first }} - {{ $json.last }}'; const issue = ExpressionFormatValidator.validateAndFix(value, 'fullName', context); expect(issue).toBeNull(); }); }); describe('Edge cases', () => { it('should handle null values', () => { const issue = ExpressionFormatValidator.validateAndFix(null, 'field', context); expect(issue).toBeNull(); }); it('should handle undefined values', () => { const issue = ExpressionFormatValidator.validateAndFix(undefined, 'field', context); expect(issue).toBeNull(); }); it('should handle empty strings', () => { const issue = ExpressionFormatValidator.validateAndFix('', 'field', context); expect(issue).toBeNull(); }); it('should handle numbers', () => { const issue = ExpressionFormatValidator.validateAndFix(42, 'field', context); expect(issue).toBeNull(); }); it('should handle booleans', () => { const issue = ExpressionFormatValidator.validateAndFix(true, 'field', context); expect(issue).toBeNull(); }); it('should handle arrays', () => { const issue = ExpressionFormatValidator.validateAndFix(['item1', 'item2'], 'field', context); expect(issue).toBeNull(); }); }); }); describe('validateNodeParameters', () => { const context = { nodeType: 'n8n-nodes-base.emailSend', nodeName: 'Send Email', nodeId: 'email-1' }; it('should validate all parameters recursively', () => { const parameters = { fromEmail: '{{ $env.SENDER_EMAIL }}', toEmail: '[email protected]', subject: 'Test {{ $json.type }}', body: { html: '<p>Hello {{ $json.name }}</p>', text: 'Hello {{ $json.name }}' }, options: { replyTo: '={{ $env.REPLY_EMAIL }}' } }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); expect(issues).toHaveLength(4); expect(issues.map(i => i.fieldPath)).toContain('fromEmail'); expect(issues.map(i => i.fieldPath)).toContain('subject'); expect(issues.map(i => i.fieldPath)).toContain('body.html'); expect(issues.map(i => i.fieldPath)).toContain('body.text'); }); it('should handle arrays with expressions', () => { const parameters = { recipients: [ '{{ $json.email1 }}', '[email protected]', '={{ $json.email2 }}' ] }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); expect(issues).toHaveLength(1); expect(issues[0].fieldPath).toBe('recipients[0]'); expect(issues[0].correctedValue).toBe('={{ $json.email1 }}'); }); it('should handle nested objects', () => { const parameters = { config: { database: { host: '{{ $env.DB_HOST }}', port: 5432, name: 'mydb' } } }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); expect(issues).toHaveLength(1); expect(issues[0].fieldPath).toBe('config.database.host'); }); it('should skip circular references', () => { const circular: any = { a: 1 }; circular.self = circular; const parameters = { normal: '{{ $json.value }}', circular }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); // Should only find the issue in 'normal', not crash on circular expect(issues).toHaveLength(1); expect(issues[0].fieldPath).toBe('normal'); }); it('should handle maximum recursion depth', () => { // Create a deeply nested object (105 levels deep, exceeding the limit of 100) let deepObject: any = { value: '{{ $json.data }}' }; let current = deepObject; for (let i = 0; i < 105; i++) { current.nested = { value: `{{ $json.level${i} }}` }; current = current.nested; } const parameters = { deep: deepObject }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); // Should find expression format issues up to the depth limit const depthWarning = issues.find(i => i.explanation.includes('Maximum recursion depth')); expect(depthWarning).toBeTruthy(); expect(depthWarning?.severity).toBe('warning'); // Should still find some expression format errors before hitting the limit const formatErrors = issues.filter(i => i.issueType === 'missing-prefix'); expect(formatErrors.length).toBeGreaterThan(0); expect(formatErrors.length).toBeLessThanOrEqual(100); // Should not exceed the depth limit }); }); describe('formatErrorMessage', () => { const context = { nodeType: 'n8n-nodes-base.github', nodeName: 'Create Issue', nodeId: 'github-1' }; it('should format error message for missing prefix', () => { const issue = { fieldPath: 'title', currentValue: '{{ $json.title }}', correctedValue: '={{ $json.title }}', issueType: 'missing-prefix' as const, explanation: "Expression missing required '=' prefix.", severity: 'error' as const }; const message = ExpressionFormatValidator.formatErrorMessage(issue, context); expect(message).toContain("Expression format error in node 'Create Issue'"); expect(message).toContain('Field \'title\''); expect(message).toContain('Current (incorrect):'); expect(message).toContain('"title": "{{ $json.title }}"'); expect(message).toContain('Fixed (correct):'); expect(message).toContain('"title": "={{ $json.title }}"'); }); it('should format error message for resource locator', () => { const issue = { fieldPath: 'owner', currentValue: '{{ $vars.OWNER }}', correctedValue: { __rl: true, value: '={{ $vars.OWNER }}', mode: 'expression' }, issueType: 'needs-resource-locator' as const, explanation: 'Field needs resource locator format.', severity: 'error' as const }; const message = ExpressionFormatValidator.formatErrorMessage(issue, context); expect(message).toContain("Expression format error in node 'Create Issue'"); expect(message).toContain('Current (incorrect):'); expect(message).toContain('"owner": "{{ $vars.OWNER }}"'); expect(message).toContain('Fixed (correct):'); expect(message).toContain('"__rl": true'); expect(message).toContain('"value": "={{ $vars.OWNER }}"'); expect(message).toContain('"mode": "expression"'); }); }); describe('Real-world examples', () => { it('should validate Email Send node example', () => { const context = { nodeType: 'n8n-nodes-base.emailSend', nodeName: 'Error Handler', nodeId: 'b9dd1cfd-ee66-4049-97e7-1af6d976a4e0' }; const parameters = { fromEmail: '{{ $env.ADMIN_EMAIL }}', toEmail: '[email protected]', subject: 'GitHub Issue Workflow Error - HIGH PRIORITY', options: {} }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); expect(issues).toHaveLength(1); expect(issues[0].fieldPath).toBe('fromEmail'); expect(issues[0].correctedValue).toBe('={{ $env.ADMIN_EMAIL }}'); }); it('should validate GitHub node example', () => { const context = { nodeType: 'n8n-nodes-base.github', nodeName: 'Send Welcome Comment', nodeId: '3c742ca1-af8f-4d80-a47e-e68fb1ced491' }; const parameters = { operation: 'createComment', owner: '{{ $vars.GITHUB_OWNER }}', repository: '{{ $vars.GITHUB_REPO }}', issueNumber: null, body: '👋 Hi @{{ $(\'Extract Issue Data\').first().json.author }}!\n\nThank you for creating this issue.' }; const issues = ExpressionFormatValidator.validateNodeParameters(parameters, context); expect(issues.length).toBeGreaterThan(0); expect(issues.some(i => i.fieldPath === 'owner')).toBe(true); expect(issues.some(i => i.fieldPath === 'repository')).toBe(true); expect(issues.some(i => i.fieldPath === 'body')).toBe(true); }); }); }); ``` -------------------------------------------------------------------------------- /tests/unit/services/enhanced-config-validator-operations.test.ts: -------------------------------------------------------------------------------- ```typescript /** * Tests for EnhancedConfigValidator operation and resource validation */ import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import { EnhancedConfigValidator } from '../../../src/services/enhanced-config-validator'; import { NodeRepository } from '../../../src/database/node-repository'; import { createTestDatabase } from '../../utils/database-utils'; describe('EnhancedConfigValidator - Operation and Resource Validation', () => { let repository: NodeRepository; let testDb: any; beforeEach(async () => { testDb = await createTestDatabase(); repository = testDb.nodeRepository; // Initialize similarity services EnhancedConfigValidator.initializeSimilarityServices(repository); // Add Google Drive test node const googleDriveNode = { nodeType: 'nodes-base.googleDrive', packageName: 'n8n-nodes-base', displayName: 'Google Drive', description: 'Access Google Drive', category: 'transform', style: 'declarative' as const, isAITool: false, isTrigger: false, isWebhook: false, isVersioned: true, version: '1', properties: [ { name: 'resource', type: 'options', required: true, options: [ { value: 'file', name: 'File' }, { value: 'folder', name: 'Folder' }, { value: 'fileFolder', name: 'File & Folder' } ] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['file'] } }, options: [ { value: 'copy', name: 'Copy' }, { value: 'delete', name: 'Delete' }, { value: 'download', name: 'Download' }, { value: 'list', name: 'List' }, { value: 'share', name: 'Share' }, { value: 'update', name: 'Update' }, { value: 'upload', name: 'Upload' } ] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['folder'] } }, options: [ { value: 'create', name: 'Create' }, { value: 'delete', name: 'Delete' }, { value: 'share', name: 'Share' } ] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['fileFolder'] } }, options: [ { value: 'search', name: 'Search' } ] } ], operations: [], credentials: [] }; repository.saveNode(googleDriveNode); // Add Slack test node const slackNode = { nodeType: 'nodes-base.slack', packageName: 'n8n-nodes-base', displayName: 'Slack', description: 'Send messages to Slack', category: 'communication', style: 'declarative' as const, isAITool: false, isTrigger: false, isWebhook: false, isVersioned: true, version: '2', properties: [ { name: 'resource', type: 'options', required: true, options: [ { value: 'channel', name: 'Channel' }, { value: 'message', name: 'Message' }, { value: 'user', name: 'User' } ] }, { name: 'operation', type: 'options', required: true, displayOptions: { show: { resource: ['message'] } }, options: [ { value: 'send', name: 'Send' }, { value: 'update', name: 'Update' }, { value: 'delete', name: 'Delete' } ] } ], operations: [], credentials: [] }; repository.saveNode(slackNode); }); afterEach(async () => { // Clean up database if (testDb) { await testDb.cleanup(); } }); describe('Invalid Operations', () => { it('should detect invalid operation "listFiles" for Google Drive', () => { const config = { resource: 'fileFolder', operation: 'listFiles' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); // Should have an error for invalid operation const operationError = result.errors.find(e => e.property === 'operation'); expect(operationError).toBeDefined(); expect(operationError!.message).toContain('Invalid operation "listFiles"'); expect(operationError!.message).toContain('Did you mean'); expect(operationError!.fix).toContain('search'); // Should suggest 'search' for fileFolder resource }); it('should provide suggestions for typos in operations', () => { const config = { resource: 'file', operation: 'downlod' // Typo: missing 'a' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const operationError = result.errors.find(e => e.property === 'operation'); expect(operationError).toBeDefined(); expect(operationError!.message).toContain('Did you mean "download"'); }); it('should list valid operations for the resource', () => { const config = { resource: 'folder', operation: 'upload' // Invalid for folder resource }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const operationError = result.errors.find(e => e.property === 'operation'); expect(operationError).toBeDefined(); expect(operationError!.fix).toContain('Valid operations for resource "folder"'); expect(operationError!.fix).toContain('create'); expect(operationError!.fix).toContain('delete'); expect(operationError!.fix).toContain('share'); }); }); describe('Invalid Resources', () => { it('should detect plural resource "files" and suggest singular', () => { const config = { resource: 'files', // Should be 'file' operation: 'list' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const resourceError = result.errors.find(e => e.property === 'resource'); expect(resourceError).toBeDefined(); expect(resourceError!.message).toContain('Invalid resource "files"'); expect(resourceError!.message).toContain('Did you mean "file"'); expect(resourceError!.fix).toContain('Use singular'); }); it('should suggest similar resources for typos', () => { const config = { resource: 'flie', // Typo operation: 'download' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const resourceError = result.errors.find(e => e.property === 'resource'); expect(resourceError).toBeDefined(); expect(resourceError!.message).toContain('Did you mean "file"'); }); it('should list valid resources when no match found', () => { const config = { resource: 'document', // Not a valid resource operation: 'create' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const resourceError = result.errors.find(e => e.property === 'resource'); expect(resourceError).toBeDefined(); expect(resourceError!.fix).toContain('Valid resources:'); expect(resourceError!.fix).toContain('file'); expect(resourceError!.fix).toContain('folder'); }); }); describe('Combined Resource and Operation Validation', () => { it('should validate both resource and operation together', () => { const config = { resource: 'files', // Invalid: should be singular operation: 'listFiles' // Invalid: should be 'list' or 'search' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); expect(result.errors.length).toBeGreaterThanOrEqual(2); // Should have error for resource const resourceError = result.errors.find(e => e.property === 'resource'); expect(resourceError).toBeDefined(); expect(resourceError!.message).toContain('files'); // Should have error for operation const operationError = result.errors.find(e => e.property === 'operation'); expect(operationError).toBeDefined(); expect(operationError!.message).toContain('listFiles'); }); }); describe('Slack Node Validation', () => { it('should suggest "send" instead of "sendMessage"', () => { const config = { resource: 'message', operation: 'sendMessage' // Common mistake }; const node = repository.getNode('nodes-base.slack'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.slack', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const operationError = result.errors.find(e => e.property === 'operation'); expect(operationError).toBeDefined(); expect(operationError!.message).toContain('Did you mean "send"'); }); it('should suggest singular "channel" instead of "channels"', () => { const config = { resource: 'channels', // Should be singular operation: 'create' }; const node = repository.getNode('nodes-base.slack'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.slack', config, node.properties, 'operation', 'ai-friendly' ); expect(result.valid).toBe(false); const resourceError = result.errors.find(e => e.property === 'resource'); expect(resourceError).toBeDefined(); expect(resourceError!.message).toContain('Did you mean "channel"'); }); }); describe('Valid Configurations', () => { it('should accept valid Google Drive configuration', () => { const config = { resource: 'file', operation: 'download' }; const node = repository.getNode('nodes-base.googleDrive'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.googleDrive', config, node.properties, 'operation', 'ai-friendly' ); // Should not have errors for resource or operation const resourceError = result.errors.find(e => e.property === 'resource'); const operationError = result.errors.find(e => e.property === 'operation'); expect(resourceError).toBeUndefined(); expect(operationError).toBeUndefined(); }); it('should accept valid Slack configuration', () => { const config = { resource: 'message', operation: 'send' }; const node = repository.getNode('nodes-base.slack'); const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.slack', config, node.properties, 'operation', 'ai-friendly' ); // Should not have errors for resource or operation const resourceError = result.errors.find(e => e.property === 'resource'); const operationError = result.errors.find(e => e.property === 'operation'); expect(resourceError).toBeUndefined(); expect(operationError).toBeUndefined(); }); }); }); ``` -------------------------------------------------------------------------------- /P0-R3-TEST-PLAN.md: -------------------------------------------------------------------------------- ```markdown # P0-R3 Feature Test Coverage Plan ## Executive Summary This document outlines comprehensive test coverage for the P0-R3 feature (Template-based Configuration Examples). The feature adds real-world configuration examples from popular templates to node search and essentials tools. **Feature Overview:** - New database table: `template_node_configs` (197 pre-extracted configurations) - Enhanced tools: `search_nodes({includeExamples: true})` and `get_node_essentials({includeExamples: true})` - Breaking changes: Removed `get_node_for_task` tool ## Test Files Created ### Unit Tests #### 1. `/tests/unit/scripts/fetch-templates-extraction.test.ts` ✅ **Purpose:** Test template extraction logic from `fetch-templates.ts` **Coverage:** - `extractNodeConfigs()` - 90%+ coverage - Valid workflows with multiple nodes - Empty workflows - Malformed compressed data - Invalid JSON - Nodes without parameters - Sticky note filtering - Credential handling - Expression detection - Special characters - Large workflows (100 nodes) - `detectExpressions()` - 100% coverage - `={{...}}` syntax detection - `$json` references - `$node` references - Nested objects - Arrays - Null/undefined handling - Multiple expression types **Test Count:** 27 tests **Expected Coverage:** 92%+ --- #### 2. `/tests/unit/mcp/search-nodes-examples.test.ts` ✅ **Purpose:** Test `search_nodes` tool with includeExamples parameter **Coverage:** - includeExamples parameter behavior - false: no examples returned - undefined: no examples returned (default) - true: examples returned - Example data structure validation - Top 2 limit enforcement - Backward compatibility - Performance (<100ms) - Error handling (malformed JSON, database errors) - searchNodesLIKE integration - searchNodesFTS integration **Test Count:** 12 tests **Expected Coverage:** 85%+ --- #### 3. `/tests/unit/mcp/get-node-essentials-examples.test.ts` ✅ **Purpose:** Test `get_node_essentials` tool with includeExamples parameter **Coverage:** - includeExamples parameter behavior - Full metadata structure - configuration object - source (template, views, complexity) - useCases (limited to 2) - metadata (hasCredentials, hasExpressions) - Cache key differentiation - Backward compatibility - Performance (<100ms) - Error handling - Top 3 limit enforcement **Test Count:** 13 tests **Expected Coverage:** 88%+ --- ### Integration Tests #### 4. `/tests/integration/database/template-node-configs.test.ts` ✅ **Purpose:** Test database schema, migrations, and operations **Coverage:** - Schema validation - Table creation - All columns present - Correct types and constraints - CHECK constraint on complexity - Indexes - idx_config_node_type_rank - idx_config_complexity - idx_config_auth - View: ranked_node_configs - Top 5 per node_type - Correct ordering - Foreign key constraints - CASCADE delete - Referential integrity - Data operations - INSERT with all fields - Nullable fields - Rank updates - Delete rank > 10 - Performance - 1000 records < 10ms queries - Migration idempotency **Test Count:** 19 tests **Expected Coverage:** 95%+ --- #### 5. `/tests/integration/mcp/template-examples-e2e.test.ts` ✅ **Purpose:** End-to-end integration testing **Coverage:** - Direct SQL queries - Top 2 examples for search_nodes - Top 3 examples with metadata for get_node_essentials - Data structure validation - Valid JSON in all fields - Credentials when has_credentials=1 - Ranked view functionality - Performance with 100+ configs - Query performance < 5ms - Complexity filtering - Edge cases - Non-existent node types - Long parameters_json (100 params) - Special characters (Unicode, emojis, symbols) - Data integrity - Foreign key constraints - Cascade deletes **Test Count:** 14 tests **Expected Coverage:** 90%+ --- ### Test Fixtures #### 6. `/tests/fixtures/template-configs.ts` ✅ **Purpose:** Reusable test data **Provides:** - `sampleConfigs`: 7 realistic node configurations - simpleWebhook - webhookWithAuth - httpRequestBasic - httpRequestWithExpressions - slackMessage - codeNodeTransform - codeNodeWithExpressions - `sampleWorkflows`: 3 complete workflows - webhookToSlack - apiWorkflow - complexWorkflow - **Helper Functions:** - `compressWorkflow()` - Compress to base64 - `createTemplateMetadata()` - Generate metadata - `createConfigBatch()` - Batch create configs - `getConfigByComplexity()` - Filter by complexity - `getConfigsWithExpressions()` - Filter with expressions - `getConfigsWithCredentials()` - Filter with credentials - `createInsertStatement()` - SQL insert helper --- ## Existing Tests Requiring Updates ### High Priority #### 1. `tests/unit/mcp/parameter-validation.test.ts` **Line 480:** Remove `get_node_for_task` from legacyValidationTools array ```typescript // REMOVE THIS: { name: 'get_node_for_task', args: {}, expected: 'Missing required parameters for get_node_for_task: task' }, ``` **Status:** ⚠️ BREAKING CHANGE - Tool removed --- #### 2. `tests/unit/mcp/tools.test.ts` **Update:** Remove `get_node_for_task` from templates category ```typescript // BEFORE: templates: ['list_tasks', 'get_node_for_task', 'search_templates', ...] // AFTER: templates: ['list_tasks', 'search_templates', ...] ``` **Add:** Tests for new includeExamples parameter in tool definitions ```typescript it('should have includeExamples parameter in search_nodes', () => { const searchNodesTool = tools.find(t => t.name === 'search_nodes'); expect(searchNodesTool.inputSchema.properties.includeExamples).toBeDefined(); expect(searchNodesTool.inputSchema.properties.includeExamples.type).toBe('boolean'); expect(searchNodesTool.inputSchema.properties.includeExamples.default).toBe(false); }); it('should have includeExamples parameter in get_node_essentials', () => { const essentialsTool = tools.find(t => t.name === 'get_node_essentials'); expect(essentialsTool.inputSchema.properties.includeExamples).toBeDefined(); }); ``` **Status:** ⚠️ REQUIRED UPDATE --- #### 3. `tests/integration/mcp-protocol/session-management.test.ts` **Remove:** Test case calling `get_node_for_task` with invalid task ```typescript // REMOVE THIS TEST: client.callTool({ name: 'get_node_for_task', arguments: { task: 'invalid_task' } }).catch(e => e) ``` **Status:** ⚠️ BREAKING CHANGE --- #### 4. `tests/integration/mcp-protocol/tool-invocation.test.ts` **Remove:** Entire `get_node_for_task` describe block **Add:** Tests for new includeExamples functionality ```typescript describe('search_nodes with includeExamples', () => { it('should return examples when includeExamples is true', async () => { const response = await client.callTool({ name: 'search_nodes', arguments: { query: 'webhook', includeExamples: true } }); expect(response.results).toBeDefined(); // Examples may or may not be present depending on database }); it('should not return examples when includeExamples is false', async () => { const response = await client.callTool({ name: 'search_nodes', arguments: { query: 'webhook', includeExamples: false } }); expect(response.results).toBeDefined(); response.results.forEach(node => { expect(node.examples).toBeUndefined(); }); }); }); describe('get_node_essentials with includeExamples', () => { it('should return examples with metadata when includeExamples is true', async () => { const response = await client.callTool({ name: 'get_node_essentials', arguments: { nodeType: 'nodes-base.webhook', includeExamples: true } }); expect(response.nodeType).toBeDefined(); // Examples may or may not be present depending on database }); }); ``` **Status:** ⚠️ REQUIRED UPDATE --- ### Medium Priority #### 5. `tests/unit/services/task-templates.test.ts` **Status:** ✅ No changes needed (TaskTemplates marked as deprecated but not removed) **Note:** TaskTemplates remains for backward compatibility. Tests should continue to pass. --- ## Test Execution Plan ### Phase 1: Unit Tests ```bash # Run new unit tests npm test tests/unit/scripts/fetch-templates-extraction.test.ts npm test tests/unit/mcp/search-nodes-examples.test.ts npm test tests/unit/mcp/get-node-essentials-examples.test.ts # Expected: All pass, 52 tests ``` ### Phase 2: Integration Tests ```bash # Run new integration tests npm test tests/integration/database/template-node-configs.test.ts npm test tests/integration/mcp/template-examples-e2e.test.ts # Expected: All pass, 33 tests ``` ### Phase 3: Update Existing Tests ```bash # Update files as outlined above, then run: npm test tests/unit/mcp/parameter-validation.test.ts npm test tests/unit/mcp/tools.test.ts npm test tests/integration/mcp-protocol/session-management.test.ts npm test tests/integration/mcp-protocol/tool-invocation.test.ts # Expected: All pass after updates ``` ### Phase 4: Full Test Suite ```bash # Run all tests npm test # Run with coverage npm run test:coverage # Expected coverage improvements: # - src/scripts/fetch-templates.ts: +20% (60% → 80%) # - src/mcp/server.ts: +5% (75% → 80%) # - Overall project: +2% (current → current+2%) ``` --- ## Coverage Expectations ### New Code Coverage | File | Function | Target | Tests | |------|----------|--------|-------| | fetch-templates.ts | extractNodeConfigs | 95% | 15 tests | | fetch-templates.ts | detectExpressions | 100% | 12 tests | | server.ts | searchNodes (with examples) | 90% | 8 tests | | server.ts | getNodeEssentials (with examples) | 90% | 10 tests | | Database migration | template_node_configs | 100% | 19 tests | ### Overall Coverage Goals - **Unit Tests:** 90%+ coverage for new code - **Integration Tests:** All happy paths + critical error paths - **E2E Tests:** Complete feature workflows - **Performance:** All queries <10ms (database), <100ms (MCP) --- ## Test Infrastructure ### Dependencies Required All dependencies already present in `package.json`: - vitest (test runner) - better-sqlite3 (database) - @vitest/coverage-v8 (coverage) ### Test Utilities Used - TestDatabase helper (from existing test utils) - createTestDatabaseAdapter (from existing test utils) - Standard vitest matchers ### No New Dependencies Required ✅ --- ## Regression Prevention ### Critical Paths Protected 1. **Backward Compatibility** - Tools work without includeExamples parameter - Existing workflows unchanged - Cache keys differentiated 2. **Performance** - No degradation when includeExamples=false - Indexed queries <10ms - Example fetch errors don't break responses 3. **Data Integrity** - Foreign key constraints enforced - JSON validation in all fields - Rank calculations correct --- ## CI/CD Integration ### GitHub Actions Updates No changes required. Existing test commands will run new tests: ```yaml - run: npm test - run: npm run test:coverage ``` ### Coverage Thresholds Current thresholds maintained. Expected improvements: - Lines: +2% - Functions: +3% - Branches: +2% --- ## Manual Testing Checklist ### Pre-Deployment Verification - [ ] Run `npm run rebuild` - Verify migration applies cleanly - [ ] Run `npm run fetch:templates --extract-only` - Verify extraction works - [ ] Check database: `SELECT COUNT(*) FROM template_node_configs` - Should be ~197 - [ ] Test MCP tool: `search_nodes({query: "webhook", includeExamples: true})` - [ ] Test MCP tool: `get_node_essentials({nodeType: "nodes-base.webhook", includeExamples: true})` - [ ] Verify backward compatibility: Tools work without includeExamples parameter - [ ] Performance test: Query 100 nodes with examples < 200ms --- ## Rollback Plan If issues are detected: 1. **Database Rollback:** ```sql DROP TABLE IF EXISTS template_node_configs; DROP VIEW IF EXISTS ranked_node_configs; ``` 2. **Code Rollback:** - Revert server.ts changes - Revert tools.ts changes - Restore get_node_for_task tool (if critical) 3. **Test Rollback:** - Revert parameter-validation.test.ts - Revert tools.test.ts - Revert tool-invocation.test.ts --- ## Success Metrics ### Test Metrics - ✅ 85+ new tests added - ✅ 0 tests failing after updates - ✅ Coverage increase 2%+ - ✅ All performance tests pass ### Feature Metrics - ✅ 197 template configs extracted - ✅ Top 2/3 examples returned correctly - ✅ Query performance <10ms - ✅ No backward compatibility breaks --- ## Conclusion This test plan provides **comprehensive coverage** for the P0-R3 feature with: - **85+ new tests** across unit, integration, and E2E levels - **Complete coverage** of extraction, storage, and retrieval - **Backward compatibility** protection - **Performance validation** (<10ms queries) - **Clear migration path** for existing tests **All test files are ready for execution.** Update the 4 existing test files as outlined, then run the full test suite. **Estimated Total Implementation Time:** 2-3 hours for updating existing tests + validation ``` -------------------------------------------------------------------------------- /tests/unit/services/expression-validator-edge-cases.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect, vi, beforeEach } from 'vitest'; import { ExpressionValidator } from '@/services/expression-validator'; // Mock the database vi.mock('better-sqlite3'); describe('ExpressionValidator - Edge Cases', () => { beforeEach(() => { vi.clearAllMocks(); }); describe('Null and Undefined Handling', () => { it('should handle null expression gracefully', () => { const context = { availableNodes: ['Node1'] }; const result = ExpressionValidator.validateExpression(null as any, context); expect(result.valid).toBe(true); expect(result.errors).toEqual([]); }); it('should handle undefined expression gracefully', () => { const context = { availableNodes: ['Node1'] }; const result = ExpressionValidator.validateExpression(undefined as any, context); expect(result.valid).toBe(true); expect(result.errors).toEqual([]); }); it('should handle null context gracefully', () => { const result = ExpressionValidator.validateExpression('{{ $json.data }}', null as any); expect(result).toBeDefined(); // With null context, it will likely have errors about missing context expect(result.valid).toBe(false); }); it('should handle undefined context gracefully', () => { const result = ExpressionValidator.validateExpression('{{ $json.data }}', undefined as any); expect(result).toBeDefined(); // With undefined context, it will likely have errors about missing context expect(result.valid).toBe(false); }); }); describe('Boundary Value Testing', () => { it('should handle empty string expression', () => { const context = { availableNodes: [] }; const result = ExpressionValidator.validateExpression('', context); expect(result.valid).toBe(true); expect(result.errors).toEqual([]); expect(result.usedVariables.size).toBe(0); }); it('should handle extremely long expressions', () => { const longExpression = '{{ ' + '$json.field'.repeat(1000) + ' }}'; const context = { availableNodes: ['Node1'] }; const start = Date.now(); const result = ExpressionValidator.validateExpression(longExpression, context); const duration = Date.now() - start; expect(result).toBeDefined(); expect(duration).toBeLessThan(1000); // Should process within 1 second }); it('should handle deeply nested property access', () => { const deepExpression = '{{ $json' + '.property'.repeat(50) + ' }}'; const context = { availableNodes: ['Node1'] }; const result = ExpressionValidator.validateExpression(deepExpression, context); expect(result.valid).toBe(true); expect(result.usedVariables.has('$json')).toBe(true); }); it('should handle many different variables in one expression', () => { const complexExpression = `{{ $json.data + $node["Node1"].json.value + $input.item.field + $items("Node2", 0)[0].data + $parameter["apiKey"] + $env.API_URL + $workflow.name + $execution.id + $itemIndex + $now }}`; const context = { availableNodes: ['Node1', 'Node2'], hasInputData: true }; const result = ExpressionValidator.validateExpression(complexExpression, context); expect(result.usedVariables.size).toBeGreaterThan(5); expect(result.usedNodes.has('Node1')).toBe(true); expect(result.usedNodes.has('Node2')).toBe(true); }); }); describe('Invalid Syntax Handling', () => { it('should detect unclosed expressions', () => { const expressions = [ '{{ $json.field', '$json.field }}', '{{ $json.field }', '{ $json.field }}' ]; const context = { availableNodes: [] }; expressions.forEach(expr => { const result = ExpressionValidator.validateExpression(expr, context); expect(result.errors.some(e => e.includes('Unmatched'))).toBe(true); }); }); it('should detect nested expressions', () => { const nestedExpression = '{{ $json.field + {{ $node["Node1"].json }} }}'; const context = { availableNodes: ['Node1'] }; const result = ExpressionValidator.validateExpression(nestedExpression, context); expect(result.errors.some(e => e.includes('Nested expressions'))).toBe(true); }); it('should detect empty expressions', () => { const emptyExpression = 'Value: {{}}'; const context = { availableNodes: [] }; const result = ExpressionValidator.validateExpression(emptyExpression, context); expect(result.errors.some(e => e.includes('Empty expression'))).toBe(true); }); it('should handle malformed node references', () => { const expressions = [ '{{ $node[].json }}', '{{ $node[""].json }}', '{{ $node[Node1].json }}', // Missing quotes '{{ $node["Node1" ].json }}' // Extra space - this might actually be valid ]; const context = { availableNodes: ['Node1'] }; expressions.forEach(expr => { const result = ExpressionValidator.validateExpression(expr, context); // Some of these might generate warnings or errors expect(result).toBeDefined(); }); }); }); describe('Special Characters and Unicode', () => { it('should handle special characters in node names', () => { const specialNodes = ['Node-123', 'Node_Test', 'Node@Special', 'Node 中文', 'Node😊']; const context = { availableNodes: specialNodes }; specialNodes.forEach(nodeName => { const expression = `{{ $node["${nodeName}"].json.value }}`; const result = ExpressionValidator.validateExpression(expression, context); expect(result.usedNodes.has(nodeName)).toBe(true); expect(result.errors.filter(e => e.includes(nodeName))).toHaveLength(0); }); }); it('should handle Unicode in property names', () => { const expression = '{{ $json.名前 + $json.שם + $json.имя }}'; const context = { availableNodes: [] }; const result = ExpressionValidator.validateExpression(expression, context); expect(result.usedVariables.has('$json')).toBe(true); }); }); describe('Context Validation', () => { it('should warn about $input when no input data available', () => { const expression = '{{ $input.item.data }}'; const context = { availableNodes: [], hasInputData: false }; const result = ExpressionValidator.validateExpression(expression, context); expect(result.warnings.some(w => w.includes('$input'))).toBe(true); }); it('should handle references to non-existent nodes', () => { const expression = '{{ $node["NonExistentNode"].json.value }}'; const context = { availableNodes: ['Node1', 'Node2'] }; const result = ExpressionValidator.validateExpression(expression, context); expect(result.errors.some(e => e.includes('NonExistentNode'))).toBe(true); }); it('should validate $items function references', () => { const expression = '{{ $items("NonExistentNode", 0)[0].json }}'; const context = { availableNodes: ['Node1', 'Node2'] }; const result = ExpressionValidator.validateExpression(expression, context); expect(result.errors.some(e => e.includes('NonExistentNode'))).toBe(true); }); }); describe('Complex Expression Patterns', () => { it('should handle JavaScript operations in expressions', () => { const expressions = [ '{{ $json.count > 10 ? "high" : "low" }}', '{{ Math.round($json.price * 1.2) }}', '{{ $json.items.filter(item => item.active).length }}', '{{ new Date($json.timestamp).toISOString() }}', '{{ $json.name.toLowerCase().replace(" ", "-") }}' ]; const context = { availableNodes: [] }; expressions.forEach(expr => { const result = ExpressionValidator.validateExpression(expr, context); expect(result.usedVariables.has('$json')).toBe(true); }); }); it('should handle array access patterns', () => { const expressions = [ '{{ $json[0] }}', '{{ $json.items[5].name }}', '{{ $node["Node1"].json[0].data[1] }}', '{{ $json["items"][0]["name"] }}' ]; const context = { availableNodes: ['Node1'] }; expressions.forEach(expr => { const result = ExpressionValidator.validateExpression(expr, context); expect(result.usedVariables.size).toBeGreaterThan(0); }); }); }); describe('validateNodeExpressions', () => { it('should validate all expressions in node parameters', () => { const parameters = { field1: '{{ $json.data }}', field2: 'static value', nested: { field3: '{{ $node["Node1"].json.value }}', array: [ '{{ $json.item1 }}', 'not an expression', '{{ $json.item2 }}' ] } }; const context = { availableNodes: ['Node1'] }; const result = ExpressionValidator.validateNodeExpressions(parameters, context); expect(result.usedVariables.has('$json')).toBe(true); expect(result.usedNodes.has('Node1')).toBe(true); expect(result.valid).toBe(true); }); it('should handle null/undefined in parameters', () => { const parameters = { field1: null, field2: undefined, field3: '', field4: '{{ $json.data }}' }; const context = { availableNodes: [] }; const result = ExpressionValidator.validateNodeExpressions(parameters, context); expect(result.usedVariables.has('$json')).toBe(true); expect(result.errors.length).toBe(0); }); it('should handle circular references in parameters', () => { const parameters: any = { field1: '{{ $json.data }}' }; parameters.circular = parameters; const context = { availableNodes: [] }; // Should not throw expect(() => { ExpressionValidator.validateNodeExpressions(parameters, context); }).not.toThrow(); }); it('should aggregate errors from multiple expressions', () => { const parameters = { field1: '{{ $node["Missing1"].json }}', field2: '{{ $node["Missing2"].json }}', field3: '{{ }}', // Empty expression field4: '{{ $json.valid }}' }; const context = { availableNodes: ['ValidNode'] }; const result = ExpressionValidator.validateNodeExpressions(parameters, context); expect(result.valid).toBe(false); // Should have at least 3 errors: 2 missing nodes + 1 empty expression expect(result.errors.length).toBeGreaterThanOrEqual(3); expect(result.usedVariables.has('$json')).toBe(true); }); }); describe('Performance Edge Cases', () => { it('should handle recursive parameter structures efficiently', () => { const createNestedObject = (depth: number): any => { if (depth === 0) return '{{ $json.value }}'; return { level: depth, expression: `{{ $json.level${depth} }}`, nested: createNestedObject(depth - 1) }; }; const deepParameters = createNestedObject(100); const context = { availableNodes: [] }; const start = Date.now(); const result = ExpressionValidator.validateNodeExpressions(deepParameters, context); const duration = Date.now() - start; expect(result).toBeDefined(); expect(duration).toBeLessThan(1000); // Should complete within 1 second }); it('should handle large arrays of expressions', () => { const parameters = { items: Array(1000).fill(null).map((_, i) => `{{ $json.item${i} }}`) }; const context = { availableNodes: [] }; const result = ExpressionValidator.validateNodeExpressions(parameters, context); expect(result.usedVariables.has('$json')).toBe(true); expect(result.valid).toBe(true); }); }); describe('Error Message Quality', () => { it('should provide helpful error messages', () => { const testCases = [ { expression: '{{ $node["Node With Spaces"].json }}', context: { availableNodes: ['NodeWithSpaces'] }, expectedError: 'Node With Spaces' }, { expression: '{{ $items("WrongNode", -1) }}', context: { availableNodes: ['RightNode'] }, expectedError: 'WrongNode' } ]; testCases.forEach(({ expression, context, expectedError }) => { const result = ExpressionValidator.validateExpression(expression, context); const hasRelevantError = result.errors.some(e => e.includes(expectedError)); expect(hasRelevantError).toBe(true); }); }); }); }); ``` -------------------------------------------------------------------------------- /tests/unit/validation-fixes.test.ts: -------------------------------------------------------------------------------- ```typescript /** * Test suite for validation system fixes * Covers issues #58, #68, #70, #73 */ import { describe, test, expect, beforeAll, afterAll } from 'vitest'; import { WorkflowValidator } from '../../src/services/workflow-validator'; import { EnhancedConfigValidator } from '../../src/services/enhanced-config-validator'; import { ToolValidation, Validator, ValidationError } from '../../src/utils/validation-schemas'; describe('Validation System Fixes', () => { let workflowValidator: WorkflowValidator; let mockNodeRepository: any; beforeAll(async () => { // Initialize test environment process.env.NODE_ENV = 'test'; // Mock repository for testing mockNodeRepository = { getNode: (nodeType: string) => { if (nodeType === 'nodes-base.webhook' || nodeType === 'n8n-nodes-base.webhook') { return { nodeType: 'nodes-base.webhook', displayName: 'Webhook', properties: [ { name: 'path', required: true, displayName: 'Path' }, { name: 'httpMethod', required: true, displayName: 'HTTP Method' } ] }; } if (nodeType === 'nodes-base.set' || nodeType === 'n8n-nodes-base.set') { return { nodeType: 'nodes-base.set', displayName: 'Set', properties: [ { name: 'values', required: false, displayName: 'Values' } ] }; } return null; } } as any; workflowValidator = new WorkflowValidator(mockNodeRepository, EnhancedConfigValidator); }); afterAll(() => { // Reset NODE_ENV instead of deleting it delete (process.env as any).NODE_ENV; }); describe('Issue #73: validate_node_minimal crashes without input validation', () => { test('should handle empty config in validation schemas', () => { // Test the validation schema handles empty config const result = ToolValidation.validateNodeMinimal({ nodeType: 'nodes-base.webhook', config: undefined }); expect(result).toBeDefined(); expect(result.valid).toBe(false); expect(result.errors.length).toBeGreaterThan(0); expect(result.errors[0].field).toBe('config'); }); test('should handle null config in validation schemas', () => { const result = ToolValidation.validateNodeMinimal({ nodeType: 'nodes-base.webhook', config: null }); expect(result).toBeDefined(); expect(result.valid).toBe(false); expect(result.errors.length).toBeGreaterThan(0); expect(result.errors[0].field).toBe('config'); }); test('should accept valid config object', () => { const result = ToolValidation.validateNodeMinimal({ nodeType: 'nodes-base.webhook', config: { path: '/webhook', httpMethod: 'POST' } }); expect(result).toBeDefined(); expect(result.valid).toBe(true); expect(result.errors).toHaveLength(0); }); }); describe('Issue #58: validate_node_operation crashes on nested input', () => { test('should handle invalid nodeType gracefully', () => { expect(() => { EnhancedConfigValidator.validateWithMode( undefined as any, { resource: 'channel', operation: 'create' }, [], 'operation', 'ai-friendly' ); }).toThrow(Error); }); test('should handle null nodeType gracefully', () => { expect(() => { EnhancedConfigValidator.validateWithMode( null as any, { resource: 'channel', operation: 'create' }, [], 'operation', 'ai-friendly' ); }).toThrow(Error); }); test('should handle non-string nodeType gracefully', () => { expect(() => { EnhancedConfigValidator.validateWithMode( { type: 'nodes-base.slack' } as any, { resource: 'channel', operation: 'create' }, [], 'operation', 'ai-friendly' ); }).toThrow(Error); }); test('should handle valid nodeType properly', () => { const result = EnhancedConfigValidator.validateWithMode( 'nodes-base.set', { values: {} }, [], 'operation', 'ai-friendly' ); expect(result).toBeDefined(); expect(typeof result.valid).toBe('boolean'); }); }); describe('Issue #70: Profile settings not respected', () => { test('should pass profile parameter to all validation phases', async () => { const workflow = { nodes: [ { id: '1', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [100, 200] as [number, number], parameters: { path: '/test', httpMethod: 'POST' }, typeVersion: 1 }, { id: '2', name: 'Set', type: 'n8n-nodes-base.set', position: [300, 200] as [number, number], parameters: { values: {} }, typeVersion: 1 } ], connections: { 'Webhook': { main: [[{ node: 'Set', type: 'main', index: 0 }]] } } }; const result = await workflowValidator.validateWorkflow(workflow, { validateNodes: true, validateConnections: true, validateExpressions: true, profile: 'minimal' }); expect(result).toBeDefined(); expect(result.valid).toBe(true); // In minimal profile, should have fewer warnings/errors - just check it's reasonable expect(result.warnings.length).toBeLessThanOrEqual(5); }); test('should filter out sticky notes from validation', async () => { const workflow = { nodes: [ { id: '1', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [100, 200] as [number, number], parameters: { path: '/test', httpMethod: 'POST' }, typeVersion: 1 }, { id: '2', name: 'Sticky Note', type: 'n8n-nodes-base.stickyNote', position: [300, 100] as [number, number], parameters: { content: 'This is a note' }, typeVersion: 1 } ], connections: {} }; const result = await workflowValidator.validateWorkflow(workflow); expect(result).toBeDefined(); expect(result.statistics.totalNodes).toBe(1); // Only webhook, sticky note excluded expect(result.statistics.enabledNodes).toBe(1); }); test('should allow legitimate loops in cycle detection', async () => { const workflow = { nodes: [ { id: '1', name: 'Manual Trigger', type: 'n8n-nodes-base.manualTrigger', position: [100, 200] as [number, number], parameters: {}, typeVersion: 1 }, { id: '2', name: 'SplitInBatches', type: 'n8n-nodes-base.splitInBatches', position: [300, 200] as [number, number], parameters: { batchSize: 1 }, typeVersion: 1 }, { id: '3', name: 'Set', type: 'n8n-nodes-base.set', position: [500, 200] as [number, number], parameters: { values: {} }, typeVersion: 1 } ], connections: { 'Manual Trigger': { main: [[{ node: 'SplitInBatches', type: 'main', index: 0 }]] }, 'SplitInBatches': { main: [ [{ node: 'Set', type: 'main', index: 0 }], // Done output [{ node: 'Set', type: 'main', index: 0 }] // Loop output ] }, 'Set': { main: [[{ node: 'SplitInBatches', type: 'main', index: 0 }]] // Loop back } } }; const result = await workflowValidator.validateWorkflow(workflow); expect(result).toBeDefined(); // Should not report cycle error for legitimate SplitInBatches loop const cycleErrors = result.errors.filter(e => e.message.includes('cycle')); expect(cycleErrors).toHaveLength(0); }); }); describe('Issue #68: Better error recovery suggestions', () => { test('should provide recovery suggestions for invalid node types', async () => { const workflow = { nodes: [ { id: '1', name: 'Invalid Node', type: 'invalid-node-type', position: [100, 200] as [number, number], parameters: {}, typeVersion: 1 } ], connections: {} }; const result = await workflowValidator.validateWorkflow(workflow); expect(result).toBeDefined(); expect(result.valid).toBe(false); expect(result.suggestions.length).toBeGreaterThan(0); // Should contain recovery suggestions const recoveryStarted = result.suggestions.some(s => s.includes('🔧 RECOVERY')); expect(recoveryStarted).toBe(true); }); test('should provide recovery suggestions for connection errors', async () => { const workflow = { nodes: [ { id: '1', name: 'Webhook', type: 'n8n-nodes-base.webhook', position: [100, 200] as [number, number], parameters: { path: '/test', httpMethod: 'POST' }, typeVersion: 1 } ], connections: { 'Webhook': { main: [[{ node: 'NonExistentNode', type: 'main', index: 0 }]] } } }; const result = await workflowValidator.validateWorkflow(workflow); expect(result).toBeDefined(); expect(result.valid).toBe(false); expect(result.suggestions.length).toBeGreaterThan(0); // Should contain connection recovery suggestions const connectionRecovery = result.suggestions.some(s => s.includes('Connection errors detected') || s.includes('connection') ); expect(connectionRecovery).toBe(true); }); test('should provide workflow for multiple errors', async () => { const workflow = { nodes: [ { id: '1', name: 'Invalid Node 1', type: 'invalid-type-1', position: [100, 200] as [number, number], parameters: {} // Missing typeVersion }, { id: '2', name: 'Invalid Node 2', type: 'invalid-type-2', position: [300, 200] as [number, number], parameters: {} // Missing typeVersion }, { id: '3', name: 'Invalid Node 3', type: 'invalid-type-3', position: [500, 200] as [number, number], parameters: {} // Missing typeVersion } ], connections: { 'Invalid Node 1': { main: [[{ node: 'NonExistent', type: 'main', index: 0 }]] } } }; const result = await workflowValidator.validateWorkflow(workflow); expect(result).toBeDefined(); expect(result.valid).toBe(false); expect(result.errors.length).toBeGreaterThan(3); // Should provide step-by-step recovery workflow const workflowSuggestion = result.suggestions.some(s => s.includes('SUGGESTED WORKFLOW') && s.includes('Too many errors detected') ); expect(workflowSuggestion).toBe(true); }); }); describe('Enhanced Input Validation', () => { test('should validate tool parameters with schemas', () => { // Test validate_node_operation parameters const validationResult = ToolValidation.validateNodeOperation({ nodeType: 'nodes-base.webhook', config: { path: '/test' }, profile: 'ai-friendly' }); expect(validationResult.valid).toBe(true); expect(validationResult.errors).toHaveLength(0); }); test('should reject invalid parameters', () => { const validationResult = ToolValidation.validateNodeOperation({ nodeType: 123, // Invalid type config: 'not an object', // Invalid type profile: 'invalid-profile' // Invalid enum value }); expect(validationResult.valid).toBe(false); expect(validationResult.errors.length).toBeGreaterThan(0); }); test('should format validation errors properly', () => { const validationResult = ToolValidation.validateNodeOperation({ nodeType: null, config: null }); const errorMessage = Validator.formatErrors(validationResult, 'validate_node_operation'); expect(errorMessage).toContain('validate_node_operation: Validation failed:'); expect(errorMessage).toContain('nodeType'); expect(errorMessage).toContain('config'); }); }); }); ``` -------------------------------------------------------------------------------- /tests/unit/services/config-validator-edge-cases.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect, vi, beforeEach } from 'vitest'; import { ConfigValidator } from '@/services/config-validator'; import type { ValidationResult, ValidationError, ValidationWarning } from '@/services/config-validator'; // Mock the database vi.mock('better-sqlite3'); describe('ConfigValidator - Edge Cases', () => { beforeEach(() => { vi.clearAllMocks(); }); describe('Null and Undefined Handling', () => { it('should handle null config gracefully', () => { const nodeType = 'nodes-base.test'; const config = null as any; const properties: any[] = []; expect(() => { ConfigValidator.validate(nodeType, config, properties); }).toThrow(TypeError); }); it('should handle undefined config gracefully', () => { const nodeType = 'nodes-base.test'; const config = undefined as any; const properties: any[] = []; expect(() => { ConfigValidator.validate(nodeType, config, properties); }).toThrow(TypeError); }); it('should handle null properties array gracefully', () => { const nodeType = 'nodes-base.test'; const config = {}; const properties = null as any; expect(() => { ConfigValidator.validate(nodeType, config, properties); }).toThrow(TypeError); }); it('should handle undefined properties array gracefully', () => { const nodeType = 'nodes-base.test'; const config = {}; const properties = undefined as any; expect(() => { ConfigValidator.validate(nodeType, config, properties); }).toThrow(TypeError); }); it('should handle properties with null values in config', () => { const nodeType = 'nodes-base.test'; const config = { nullField: null, undefinedField: undefined, validField: 'value' }; const properties = [ { name: 'nullField', type: 'string', required: true }, { name: 'undefinedField', type: 'string', required: true }, { name: 'validField', type: 'string' } ]; const result = ConfigValidator.validate(nodeType, config, properties); // Check that we have errors for both null and undefined required fields expect(result.errors.some(e => e.property === 'nullField')).toBe(true); expect(result.errors.some(e => e.property === 'undefinedField')).toBe(true); // The actual error types might vary, so let's just ensure we caught the errors const nullFieldError = result.errors.find(e => e.property === 'nullField'); const undefinedFieldError = result.errors.find(e => e.property === 'undefinedField'); expect(nullFieldError).toBeDefined(); expect(undefinedFieldError).toBeDefined(); }); }); describe('Boundary Value Testing', () => { it('should handle empty arrays', () => { const nodeType = 'nodes-base.test'; const config = { arrayField: [] }; const properties = [ { name: 'arrayField', type: 'collection' } ]; const result = ConfigValidator.validate(nodeType, config, properties); expect(result.valid).toBe(true); }); it('should handle very large property arrays', () => { const nodeType = 'nodes-base.test'; const config = { field1: 'value1' }; const properties = Array(1000).fill(null).map((_, i) => ({ name: `field${i}`, type: 'string' })); const result = ConfigValidator.validate(nodeType, config, properties); expect(result.valid).toBe(true); }); it('should handle deeply nested displayOptions', () => { const nodeType = 'nodes-base.test'; const config = { level1: 'a', level2: 'b', level3: 'c', deepField: 'value' }; const properties = [ { name: 'level1', type: 'options', options: ['a', 'b'] }, { name: 'level2', type: 'options', options: ['a', 'b'], displayOptions: { show: { level1: ['a'] } } }, { name: 'level3', type: 'options', options: ['a', 'b', 'c'], displayOptions: { show: { level1: ['a'], level2: ['b'] } } }, { name: 'deepField', type: 'string', displayOptions: { show: { level1: ['a'], level2: ['b'], level3: ['c'] } } } ]; const result = ConfigValidator.validate(nodeType, config, properties); expect(result.visibleProperties).toContain('deepField'); }); it('should handle extremely long string values', () => { const nodeType = 'nodes-base.test'; const longString = 'a'.repeat(10000); const config = { longField: longString }; const properties = [ { name: 'longField', type: 'string' } ]; const result = ConfigValidator.validate(nodeType, config, properties); expect(result.valid).toBe(true); }); }); describe('Invalid Data Type Handling', () => { it('should handle NaN values', () => { const nodeType = 'nodes-base.test'; const config = { numberField: NaN }; const properties = [ { name: 'numberField', type: 'number' } ]; const result = ConfigValidator.validate(nodeType, config, properties); // NaN is technically type 'number' in JavaScript, so type validation passes // The validator might not have specific NaN checking, so we check for warnings // or just verify it doesn't crash expect(result).toBeDefined(); expect(() => result).not.toThrow(); }); it('should handle Infinity values', () => { const nodeType = 'nodes-base.test'; const config = { numberField: Infinity }; const properties = [ { name: 'numberField', type: 'number' } ]; const result = ConfigValidator.validate(nodeType, config, properties); // Infinity is technically a valid number in JavaScript // The validator might not flag it as an error, so just verify it handles it expect(result).toBeDefined(); expect(() => result).not.toThrow(); }); it('should handle objects when expecting primitives', () => { const nodeType = 'nodes-base.test'; const config = { stringField: { nested: 'object' }, numberField: { value: 123 } }; const properties = [ { name: 'stringField', type: 'string' }, { name: 'numberField', type: 'number' } ]; const result = ConfigValidator.validate(nodeType, config, properties); expect(result.errors).toHaveLength(2); expect(result.errors.every(e => e.type === 'invalid_type')).toBe(true); }); it('should handle circular references in config', () => { const nodeType = 'nodes-base.test'; const config: any = { field: 'value' }; config.circular = config; // Create circular reference const properties = [ { name: 'field', type: 'string' }, { name: 'circular', type: 'json' } ]; // Should not throw error const result = ConfigValidator.validate(nodeType, config, properties); expect(result).toBeDefined(); }); }); describe('Performance Boundaries', () => { it('should validate large config objects within reasonable time', () => { const nodeType = 'nodes-base.test'; const config: Record<string, any> = {}; const properties: any[] = []; // Create a large config with 1000 properties for (let i = 0; i < 1000; i++) { config[`field_${i}`] = `value_${i}`; properties.push({ name: `field_${i}`, type: 'string' }); } const startTime = Date.now(); const result = ConfigValidator.validate(nodeType, config, properties); const endTime = Date.now(); expect(result.valid).toBe(true); expect(endTime - startTime).toBeLessThan(1000); // Should complete within 1 second }); }); describe('Special Characters and Encoding', () => { it('should handle special characters in property values', () => { const nodeType = 'nodes-base.test'; const config = { specialField: 'Value with special chars: <>&"\'`\n\r\t' }; const properties = [ { name: 'specialField', type: 'string' } ]; const result = ConfigValidator.validate(nodeType, config, properties); expect(result.valid).toBe(true); }); it('should handle unicode characters', () => { const nodeType = 'nodes-base.test'; const config = { unicodeField: '🚀 Unicode: 你好世界 مرحبا بالعالم' }; const properties = [ { name: 'unicodeField', type: 'string' } ]; const result = ConfigValidator.validate(nodeType, config, properties); expect(result.valid).toBe(true); }); }); describe('Complex Validation Scenarios', () => { it('should handle conflicting displayOptions conditions', () => { const nodeType = 'nodes-base.test'; const config = { mode: 'both', showField: true, conflictField: 'value' }; const properties = [ { name: 'mode', type: 'options', options: ['show', 'hide', 'both'] }, { name: 'showField', type: 'boolean' }, { name: 'conflictField', type: 'string', displayOptions: { show: { mode: ['show'], showField: [true] }, hide: { mode: ['hide'] } } } ]; const result = ConfigValidator.validate(nodeType, config, properties); // With mode='both', the field visibility depends on implementation expect(result).toBeDefined(); }); it('should handle multiple validation profiles correctly', () => { const nodeType = 'nodes-base.code'; const config = { language: 'javascript', jsCode: 'const x = 1;' }; const properties = [ { name: 'language', type: 'options' }, { name: 'jsCode', type: 'string' } ]; // Should perform node-specific validation for Code nodes const result = ConfigValidator.validate(nodeType, config, properties); expect(result.warnings.some(w => w.message.includes('No return statement found') )).toBe(true); }); }); describe('Error Recovery and Resilience', () => { it('should continue validation after encountering errors', () => { const nodeType = 'nodes-base.test'; const config = { field1: 'invalid-for-number', field2: null, // Required field missing field3: 'valid' }; const properties = [ { name: 'field1', type: 'number' }, { name: 'field2', type: 'string', required: true }, { name: 'field3', type: 'string' } ]; const result = ConfigValidator.validate(nodeType, config, properties); // Should have errors for field1 and field2, but field3 should be validated expect(result.errors.length).toBeGreaterThanOrEqual(2); // Check that we have errors for field1 (type error) and field2 (required field) const field1Error = result.errors.find(e => e.property === 'field1'); const field2Error = result.errors.find(e => e.property === 'field2'); expect(field1Error).toBeDefined(); expect(field1Error?.type).toBe('invalid_type'); expect(field2Error).toBeDefined(); // field2 is null, which might be treated as invalid_type rather than missing_required expect(['missing_required', 'invalid_type']).toContain(field2Error?.type); expect(result.visibleProperties).toContain('field3'); }); it('should handle malformed property definitions gracefully', () => { const nodeType = 'nodes-base.test'; const config = { field: 'value' }; const properties = [ { name: 'field', type: 'string' }, { /* Malformed property without name */ type: 'string' } as any, { name: 'field2', /* Missing type */ } as any ]; // Should handle malformed properties without crashing // Note: null properties will cause errors in the current implementation const result = ConfigValidator.validate(nodeType, config, properties); expect(result).toBeDefined(); expect(result.valid).toBeDefined(); }); }); describe('validateBatch method implementation', () => { it('should validate multiple configs in batch if method exists', () => { // This test is for future implementation const configs = [ { nodeType: 'nodes-base.test', config: { field: 'value1' }, properties: [] }, { nodeType: 'nodes-base.test', config: { field: 'value2' }, properties: [] } ]; // If validateBatch method is implemented in the future if ('validateBatch' in ConfigValidator) { const results = (ConfigValidator as any).validateBatch(configs); expect(results).toHaveLength(2); } else { // For now, just validate individually const results = configs.map(c => ConfigValidator.validate(c.nodeType, c.config, c.properties) ); expect(results).toHaveLength(2); } }); }); }); ``` -------------------------------------------------------------------------------- /src/templates/template-service.ts: -------------------------------------------------------------------------------- ```typescript import { DatabaseAdapter } from '../database/database-adapter'; import { TemplateRepository, StoredTemplate } from './template-repository'; import { logger } from '../utils/logger'; export interface TemplateInfo { id: number; name: string; description: string; author: { name: string; username: string; verified: boolean; }; nodes: string[]; views: number; created: string; url: string; metadata?: { categories: string[]; complexity: 'simple' | 'medium' | 'complex'; use_cases: string[]; estimated_setup_minutes: number; required_services: string[]; key_features: string[]; target_audience: string[]; }; } export interface TemplateWithWorkflow extends TemplateInfo { workflow: any; } export interface PaginatedResponse<T> { items: T[]; total: number; limit: number; offset: number; hasMore: boolean; } export interface TemplateMinimal { id: number; name: string; description: string; views: number; nodeCount: number; metadata?: { categories: string[]; complexity: 'simple' | 'medium' | 'complex'; use_cases: string[]; estimated_setup_minutes: number; required_services: string[]; key_features: string[]; target_audience: string[]; }; } export type TemplateField = 'id' | 'name' | 'description' | 'author' | 'nodes' | 'views' | 'created' | 'url' | 'metadata'; export type PartialTemplateInfo = Partial<TemplateInfo>; export class TemplateService { private repository: TemplateRepository; constructor(db: DatabaseAdapter) { this.repository = new TemplateRepository(db); } /** * List templates that use specific node types */ async listNodeTemplates(nodeTypes: string[], limit: number = 10, offset: number = 0): Promise<PaginatedResponse<TemplateInfo>> { const templates = this.repository.getTemplatesByNodes(nodeTypes, limit, offset); const total = this.repository.getNodeTemplatesCount(nodeTypes); return { items: templates.map(this.formatTemplateInfo), total, limit, offset, hasMore: offset + limit < total }; } /** * Get a specific template with different detail levels */ async getTemplate(templateId: number, mode: 'nodes_only' | 'structure' | 'full' = 'full'): Promise<any> { const template = this.repository.getTemplate(templateId); if (!template) { return null; } const workflow = JSON.parse(template.workflow_json || '{}'); if (mode === 'nodes_only') { return { id: template.id, name: template.name, nodes: workflow.nodes?.map((n: any) => ({ type: n.type, name: n.name })) || [] }; } if (mode === 'structure') { return { id: template.id, name: template.name, nodes: workflow.nodes?.map((n: any) => ({ id: n.id, type: n.type, name: n.name, position: n.position })) || [], connections: workflow.connections || {} }; } // Full mode return { ...this.formatTemplateInfo(template), workflow }; } /** * Search templates by query */ async searchTemplates(query: string, limit: number = 20, offset: number = 0, fields?: string[]): Promise<PaginatedResponse<PartialTemplateInfo>> { const templates = this.repository.searchTemplates(query, limit, offset); const total = this.repository.getSearchCount(query); // If fields are specified, filter the template info const items = fields ? templates.map(t => this.formatTemplateWithFields(t, fields)) : templates.map(t => this.formatTemplateInfo(t)); return { items, total, limit, offset, hasMore: offset + limit < total }; } /** * Get templates for a specific task */ async getTemplatesForTask(task: string, limit: number = 10, offset: number = 0): Promise<PaginatedResponse<TemplateInfo>> { const templates = this.repository.getTemplatesForTask(task, limit, offset); const total = this.repository.getTaskTemplatesCount(task); return { items: templates.map(this.formatTemplateInfo), total, limit, offset, hasMore: offset + limit < total }; } /** * List all templates with minimal data */ async listTemplates(limit: number = 10, offset: number = 0, sortBy: 'views' | 'created_at' | 'name' = 'views', includeMetadata: boolean = false): Promise<PaginatedResponse<TemplateMinimal>> { const templates = this.repository.getAllTemplates(limit, offset, sortBy); const total = this.repository.getTemplateCount(); const items = templates.map(t => { const item: TemplateMinimal = { id: t.id, name: t.name, description: t.description, // Always include description views: t.views, nodeCount: JSON.parse(t.nodes_used).length }; // Optionally include metadata if (includeMetadata && t.metadata_json) { try { item.metadata = JSON.parse(t.metadata_json); } catch (error) { logger.warn(`Failed to parse metadata for template ${t.id}:`, error); } } return item; }); return { items, total, limit, offset, hasMore: offset + limit < total }; } /** * List available tasks */ listAvailableTasks(): string[] { return [ 'ai_automation', 'data_sync', 'webhook_processing', 'email_automation', 'slack_integration', 'data_transformation', 'file_processing', 'scheduling', 'api_integration', 'database_operations' ]; } /** * Search templates by metadata filters */ async searchTemplatesByMetadata( filters: { category?: string; complexity?: 'simple' | 'medium' | 'complex'; maxSetupMinutes?: number; minSetupMinutes?: number; requiredService?: string; targetAudience?: string; }, limit: number = 20, offset: number = 0 ): Promise<PaginatedResponse<TemplateInfo>> { const templates = this.repository.searchTemplatesByMetadata(filters, limit, offset); const total = this.repository.getMetadataSearchCount(filters); return { items: templates.map(this.formatTemplateInfo.bind(this)), total, limit, offset, hasMore: offset + limit < total }; } /** * Get available categories from template metadata */ async getAvailableCategories(): Promise<string[]> { return this.repository.getAvailableCategories(); } /** * Get available target audiences from template metadata */ async getAvailableTargetAudiences(): Promise<string[]> { return this.repository.getAvailableTargetAudiences(); } /** * Get templates by category */ async getTemplatesByCategory( category: string, limit: number = 10, offset: number = 0 ): Promise<PaginatedResponse<TemplateInfo>> { const templates = this.repository.getTemplatesByCategory(category, limit, offset); const total = this.repository.getMetadataSearchCount({ category }); return { items: templates.map(this.formatTemplateInfo.bind(this)), total, limit, offset, hasMore: offset + limit < total }; } /** * Get templates by complexity level */ async getTemplatesByComplexity( complexity: 'simple' | 'medium' | 'complex', limit: number = 10, offset: number = 0 ): Promise<PaginatedResponse<TemplateInfo>> { const templates = this.repository.getTemplatesByComplexity(complexity, limit, offset); const total = this.repository.getMetadataSearchCount({ complexity }); return { items: templates.map(this.formatTemplateInfo.bind(this)), total, limit, offset, hasMore: offset + limit < total }; } /** * Get template statistics */ async getTemplateStats(): Promise<Record<string, any>> { return this.repository.getTemplateStats(); } /** * Fetch and update templates from n8n.io * @param mode - 'rebuild' to clear and rebuild, 'update' to add only new templates */ async fetchAndUpdateTemplates( progressCallback?: (message: string, current: number, total: number) => void, mode: 'rebuild' | 'update' = 'rebuild' ): Promise<void> { try { // Dynamically import fetcher only when needed (requires axios) const { TemplateFetcher } = await import('./template-fetcher'); const fetcher = new TemplateFetcher(); // Get existing template IDs if in update mode let existingIds: Set<number> = new Set(); let sinceDate: Date | undefined; if (mode === 'update') { existingIds = this.repository.getExistingTemplateIds(); logger.info(`Update mode: Found ${existingIds.size} existing templates in database`); // Get most recent template date and fetch only templates from last 2 weeks const mostRecentDate = this.repository.getMostRecentTemplateDate(); if (mostRecentDate) { // Fetch templates from 2 weeks before the most recent template sinceDate = new Date(mostRecentDate); sinceDate.setDate(sinceDate.getDate() - 14); logger.info(`Update mode: Fetching templates since ${sinceDate.toISOString().split('T')[0]} (2 weeks before most recent)`); } else { // No templates yet, fetch from last 2 weeks sinceDate = new Date(); sinceDate.setDate(sinceDate.getDate() - 14); logger.info(`Update mode: No existing templates, fetching from last 2 weeks`); } } else { // Clear existing templates in rebuild mode this.repository.clearTemplates(); logger.info('Rebuild mode: Cleared existing templates'); } // Fetch template list logger.info(`Fetching template list from n8n.io (mode: ${mode})`); const templates = await fetcher.fetchTemplates((current, total) => { progressCallback?.('Fetching template list', current, total); }, sinceDate); logger.info(`Found ${templates.length} templates matching date criteria`); // Filter to only new templates if in update mode let templatesToFetch = templates; if (mode === 'update') { templatesToFetch = templates.filter(t => !existingIds.has(t.id)); logger.info(`Update mode: ${templatesToFetch.length} new templates to fetch (skipping ${templates.length - templatesToFetch.length} existing)`); if (templatesToFetch.length === 0) { logger.info('No new templates to fetch'); progressCallback?.('No new templates', 0, 0); return; } } // Fetch details for each template logger.info(`Fetching details for ${templatesToFetch.length} templates`); const details = await fetcher.fetchAllTemplateDetails(templatesToFetch, (current, total) => { progressCallback?.('Fetching template details', current, total); }); // Save to database logger.info('Saving templates to database'); let saved = 0; for (const template of templatesToFetch) { const detail = details.get(template.id); if (detail) { this.repository.saveTemplate(template, detail); saved++; } } logger.info(`Successfully saved ${saved} templates to database`); // Rebuild FTS5 index after bulk import if (saved > 0) { logger.info('Rebuilding FTS5 index for templates'); this.repository.rebuildTemplateFTS(); } progressCallback?.('Complete', saved, saved); } catch (error) { logger.error('Error fetching templates:', error); throw error; } } /** * Format stored template for API response */ private formatTemplateInfo(template: StoredTemplate): TemplateInfo { const info: TemplateInfo = { id: template.id, name: template.name, description: template.description, author: { name: template.author_name, username: template.author_username, verified: template.author_verified === 1 }, nodes: JSON.parse(template.nodes_used), views: template.views, created: template.created_at, url: template.url }; // Include metadata if available if (template.metadata_json) { try { info.metadata = JSON.parse(template.metadata_json); } catch (error) { logger.warn(`Failed to parse metadata for template ${template.id}:`, error); } } return info; } /** * Format template with only specified fields */ private formatTemplateWithFields(template: StoredTemplate, fields: string[]): PartialTemplateInfo { const fullInfo = this.formatTemplateInfo(template); const result: PartialTemplateInfo = {}; // Only include requested fields for (const field of fields) { if (field in fullInfo) { (result as any)[field] = (fullInfo as any)[field]; } } return result; } } ``` -------------------------------------------------------------------------------- /src/services/n8n-validation.ts: -------------------------------------------------------------------------------- ```typescript import { z } from 'zod'; import { WorkflowNode, WorkflowConnection, Workflow } from '../types/n8n-api'; // Zod schemas for n8n API validation export const workflowNodeSchema = z.object({ id: z.string(), name: z.string(), type: z.string(), typeVersion: z.number(), position: z.tuple([z.number(), z.number()]), parameters: z.record(z.unknown()), credentials: z.record(z.unknown()).optional(), disabled: z.boolean().optional(), notes: z.string().optional(), notesInFlow: z.boolean().optional(), continueOnFail: z.boolean().optional(), retryOnFail: z.boolean().optional(), maxTries: z.number().optional(), waitBetweenTries: z.number().optional(), alwaysOutputData: z.boolean().optional(), executeOnce: z.boolean().optional(), }); export const workflowConnectionSchema = z.record( z.object({ main: z.array( z.array( z.object({ node: z.string(), type: z.string(), index: z.number(), }) ) ), }) ); export const workflowSettingsSchema = z.object({ executionOrder: z.enum(['v0', 'v1']).default('v1'), timezone: z.string().optional(), saveDataErrorExecution: z.enum(['all', 'none']).default('all'), saveDataSuccessExecution: z.enum(['all', 'none']).default('all'), saveManualExecutions: z.boolean().default(true), saveExecutionProgress: z.boolean().default(true), executionTimeout: z.number().optional(), errorWorkflow: z.string().optional(), callerPolicy: z.enum(['any', 'workflowsFromSameOwner', 'workflowsFromAList']).optional(), }); // Default settings for workflow creation export const defaultWorkflowSettings = { executionOrder: 'v1' as const, saveDataErrorExecution: 'all' as const, saveDataSuccessExecution: 'all' as const, saveManualExecutions: true, saveExecutionProgress: true, }; // Validation functions export function validateWorkflowNode(node: unknown): WorkflowNode { return workflowNodeSchema.parse(node); } export function validateWorkflowConnections(connections: unknown): WorkflowConnection { return workflowConnectionSchema.parse(connections); } export function validateWorkflowSettings(settings: unknown): z.infer<typeof workflowSettingsSchema> { return workflowSettingsSchema.parse(settings); } // Clean workflow data for API operations export function cleanWorkflowForCreate(workflow: Partial<Workflow>): Partial<Workflow> { const { // Remove read-only fields id, createdAt, updatedAt, versionId, meta, // Remove fields that cause API errors during creation active, tags, // Keep everything else ...cleanedWorkflow } = workflow; // Ensure settings are present with defaults if (!cleanedWorkflow.settings) { cleanedWorkflow.settings = defaultWorkflowSettings; } return cleanedWorkflow; } /** * Clean workflow data for update operations. * * This function removes read-only and computed fields that should not be sent * in API update requests. It does NOT add any default values or new fields. * * Note: Unlike cleanWorkflowForCreate, this function does not add default settings. * The n8n API will reject update requests that include properties not present in * the original workflow ("settings must NOT have additional properties" error). * * Settings are filtered to only include whitelisted properties to prevent API * errors when workflows from n8n contain UI-only or deprecated properties. * * @param workflow - The workflow object to clean * @returns A cleaned partial workflow suitable for API updates */ export function cleanWorkflowForUpdate(workflow: Workflow): Partial<Workflow> { const { // Remove read-only/computed fields id, createdAt, updatedAt, versionId, meta, staticData, // Remove fields that cause API errors pinData, tags, // Remove additional fields that n8n API doesn't accept isArchived, usedCredentials, sharedWithProjects, triggerCount, shared, active, // Keep everything else ...cleanedWorkflow } = workflow as any; // CRITICAL FIX for Issue #248: // The n8n API has version-specific behavior for settings in workflow updates: // // PROBLEM: // - Some versions reject updates with settings properties (community forum reports) // - Cloud versions REQUIRE settings property to be present (n8n.estyl.team) // - Properties like callerPolicy cause "additional properties" errors // // SOLUTION: // - Filter settings to only include whitelisted properties (OpenAPI spec) // - If no settings provided, use empty object {} for safety // - Empty object satisfies "required property" validation (cloud API) // - Whitelisted properties prevent "additional properties" errors // // References: // - https://community.n8n.io/t/api-workflow-update-endpoint-doesnt-support-setting-callerpolicy/161916 // - OpenAPI spec: workflowSettings schema // - Tested on n8n.estyl.team (cloud) and localhost (self-hosted) // Whitelisted settings properties from n8n OpenAPI spec const safeSettingsProperties = [ 'saveExecutionProgress', 'saveManualExecutions', 'saveDataErrorExecution', 'saveDataSuccessExecution', 'executionTimeout', 'errorWorkflow', 'timezone', 'executionOrder' ]; if (cleanedWorkflow.settings && typeof cleanedWorkflow.settings === 'object') { // Filter to only safe properties const filteredSettings: any = {}; for (const key of safeSettingsProperties) { if (key in cleanedWorkflow.settings) { filteredSettings[key] = (cleanedWorkflow.settings as any)[key]; } } cleanedWorkflow.settings = filteredSettings; } else { // No settings provided - use empty object for safety cleanedWorkflow.settings = {}; } return cleanedWorkflow; } // Validate workflow structure export function validateWorkflowStructure(workflow: Partial<Workflow>): string[] { const errors: string[] = []; // Check required fields if (!workflow.name) { errors.push('Workflow name is required'); } if (!workflow.nodes || workflow.nodes.length === 0) { errors.push('Workflow must have at least one node'); } if (!workflow.connections) { errors.push('Workflow connections are required'); } // Check for minimum viable workflow if (workflow.nodes && workflow.nodes.length === 1) { const singleNode = workflow.nodes[0]; const isWebhookOnly = singleNode.type === 'n8n-nodes-base.webhook' || singleNode.type === 'n8n-nodes-base.webhookTrigger'; if (!isWebhookOnly) { errors.push('Single-node workflows are only valid for webhooks. Add at least one more node and connect them. Example: Manual Trigger → Set node'); } } // Check for empty connections in multi-node workflows if (workflow.nodes && workflow.nodes.length > 1 && workflow.connections) { const connectionCount = Object.keys(workflow.connections).length; if (connectionCount === 0) { errors.push('Multi-node workflow has empty connections. Connect nodes like this: connections: { "Node1 Name": { "main": [[{ "node": "Node2 Name", "type": "main", "index": 0 }]] } }'); } } // Validate nodes if (workflow.nodes) { workflow.nodes.forEach((node, index) => { try { validateWorkflowNode(node); // Additional check for common node type mistakes if (node.type.startsWith('nodes-base.')) { errors.push(`Invalid node type "${node.type}" at index ${index}. Use "n8n-nodes-base.${node.type.substring(11)}" instead.`); } else if (!node.type.includes('.')) { errors.push(`Invalid node type "${node.type}" at index ${index}. Node types must include package prefix (e.g., "n8n-nodes-base.webhook").`); } } catch (error) { errors.push(`Invalid node at index ${index}: ${error instanceof Error ? error.message : 'Unknown error'}`); } }); } // Validate connections if (workflow.connections) { try { validateWorkflowConnections(workflow.connections); } catch (error) { errors.push(`Invalid connections: ${error instanceof Error ? error.message : 'Unknown error'}`); } } // Validate that all connection references exist and use node NAMES (not IDs) if (workflow.nodes && workflow.connections) { const nodeNames = new Set(workflow.nodes.map(node => node.name)); const nodeIds = new Set(workflow.nodes.map(node => node.id)); const nodeIdToName = new Map(workflow.nodes.map(node => [node.id, node.name])); Object.entries(workflow.connections).forEach(([sourceName, connection]) => { // Check if source exists by name (correct) if (!nodeNames.has(sourceName)) { // Check if they're using an ID instead of name if (nodeIds.has(sourceName)) { const correctName = nodeIdToName.get(sourceName); errors.push(`Connection uses node ID '${sourceName}' but must use node name '${correctName}'. Change connections.${sourceName} to connections['${correctName}']`); } else { errors.push(`Connection references non-existent node: ${sourceName}`); } } if (connection.main && Array.isArray(connection.main)) { connection.main.forEach((outputs, outputIndex) => { if (Array.isArray(outputs)) { outputs.forEach((target, targetIndex) => { // Check if target exists by name (correct) if (!nodeNames.has(target.node)) { // Check if they're using an ID instead of name if (nodeIds.has(target.node)) { const correctName = nodeIdToName.get(target.node); errors.push(`Connection target uses node ID '${target.node}' but must use node name '${correctName}' (from ${sourceName}[${outputIndex}][${targetIndex}])`); } else { errors.push(`Connection references non-existent target node: ${target.node} (from ${sourceName}[${outputIndex}][${targetIndex}])`); } } }); } }); } }); } return errors; } // Check if workflow has webhook trigger export function hasWebhookTrigger(workflow: Workflow): boolean { return workflow.nodes.some(node => node.type === 'n8n-nodes-base.webhook' || node.type === 'n8n-nodes-base.webhookTrigger' ); } // Get webhook URL from workflow export function getWebhookUrl(workflow: Workflow): string | null { const webhookNode = workflow.nodes.find(node => node.type === 'n8n-nodes-base.webhook' || node.type === 'n8n-nodes-base.webhookTrigger' ); if (!webhookNode || !webhookNode.parameters) { return null; } // Check for path parameter const path = webhookNode.parameters.path as string | undefined; if (!path) { return null; } // Note: We can't construct the full URL without knowing the n8n instance URL // The caller will need to prepend the base URL return path; } // Helper function to generate proper workflow structure examples export function getWorkflowStructureExample(): string { return ` Minimal Workflow Example: { "name": "My Workflow", "nodes": [ { "id": "manual-trigger-1", "name": "Manual Trigger", "type": "n8n-nodes-base.manualTrigger", "typeVersion": 1, "position": [250, 300], "parameters": {} }, { "id": "set-1", "name": "Set Data", "type": "n8n-nodes-base.set", "typeVersion": 3.4, "position": [450, 300], "parameters": { "mode": "manual", "assignments": { "assignments": [{ "id": "1", "name": "message", "value": "Hello World", "type": "string" }] } } } ], "connections": { "Manual Trigger": { "main": [[{ "node": "Set Data", "type": "main", "index": 0 }]] } } } IMPORTANT: In connections, use the node NAME (e.g., "Manual Trigger"), NOT the node ID or type!`; } // Helper function to fix common workflow issues export function getWorkflowFixSuggestions(errors: string[]): string[] { const suggestions: string[] = []; if (errors.some(e => e.includes('empty connections'))) { suggestions.push('Add connections between your nodes. Each node (except endpoints) should connect to another node.'); suggestions.push('Connection format: connections: { "Source Node Name": { "main": [[{ "node": "Target Node Name", "type": "main", "index": 0 }]] } }'); } if (errors.some(e => e.includes('Single-node workflows'))) { suggestions.push('Add at least one more node to process data. Common patterns: Trigger → Process → Output'); suggestions.push('Examples: Manual Trigger → Set, Webhook → HTTP Request, Schedule Trigger → Database Query'); } if (errors.some(e => e.includes('node ID') && e.includes('instead of node name'))) { suggestions.push('Replace node IDs with node names in connections. The name is what appears in the node header.'); suggestions.push('Wrong: connections: { "set-1": {...} }, Right: connections: { "Set Data": {...} }'); } return suggestions; } ``` -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- ```yaml name: Test Suite on: push: branches: [main, feat/comprehensive-testing-suite] paths-ignore: - '**.md' - '**.txt' - 'docs/**' - 'examples/**' - '.github/FUNDING.yml' - '.github/ISSUE_TEMPLATE/**' - '.github/pull_request_template.md' - '.gitignore' - 'LICENSE*' - 'ATTRIBUTION.md' - 'SECURITY.md' - 'CODE_OF_CONDUCT.md' pull_request: branches: [main] paths-ignore: - '**.md' - '**.txt' - 'docs/**' - 'examples/**' - '.github/FUNDING.yml' - '.github/ISSUE_TEMPLATE/**' - '.github/pull_request_template.md' - '.gitignore' - 'LICENSE*' - 'ATTRIBUTION.md' - 'SECURITY.md' - 'CODE_OF_CONDUCT.md' permissions: contents: read issues: write pull-requests: write checks: write jobs: test: runs-on: ubuntu-latest timeout-minutes: 10 # Add a 10-minute timeout to prevent hanging steps: - uses: actions/checkout@v4 - uses: actions/setup-node@v4 with: node-version: 20 cache: 'npm' - name: Install dependencies run: npm ci # Verify test environment setup - name: Verify test environment run: | echo "Current directory: $(pwd)" echo "Checking for .env.test file:" ls -la .env.test || echo ".env.test not found!" echo "First few lines of .env.test:" head -5 .env.test || echo "Cannot read .env.test" # Run unit tests first (without MSW) - name: Run unit tests with coverage run: npm run test:unit -- --coverage --coverage.thresholds.lines=0 --coverage.thresholds.functions=0 --coverage.thresholds.branches=0 --coverage.thresholds.statements=0 --reporter=default --reporter=junit env: CI: true # Run integration tests separately (with MSW setup) - name: Run integration tests run: npm run test:integration -- --reporter=default --reporter=junit env: CI: true N8N_API_URL: ${{ secrets.N8N_API_URL }} N8N_API_KEY: ${{ secrets.N8N_API_KEY }} N8N_TEST_WEBHOOK_GET_URL: ${{ secrets.N8N_TEST_WEBHOOK_GET_URL }} N8N_TEST_WEBHOOK_POST_URL: ${{ secrets.N8N_TEST_WEBHOOK_POST_URL }} N8N_TEST_WEBHOOK_PUT_URL: ${{ secrets.N8N_TEST_WEBHOOK_PUT_URL }} N8N_TEST_WEBHOOK_DELETE_URL: ${{ secrets.N8N_TEST_WEBHOOK_DELETE_URL }} # Generate test summary - name: Generate test summary if: always() run: node scripts/generate-test-summary.js # Generate detailed reports - name: Generate detailed reports if: always() run: node scripts/generate-detailed-reports.js # Upload test results artifacts - name: Upload test results if: always() uses: actions/upload-artifact@v4 with: name: test-results-${{ github.run_number }}-${{ github.run_attempt }} path: | test-results/ test-summary.md test-reports/ retention-days: 30 if-no-files-found: warn # Upload coverage artifacts - name: Upload coverage reports if: always() uses: actions/upload-artifact@v4 with: name: coverage-${{ github.run_number }}-${{ github.run_attempt }} path: | coverage/ retention-days: 30 if-no-files-found: warn # Upload coverage to Codecov - name: Upload coverage to Codecov if: always() uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} files: ./coverage/lcov.info flags: unittests name: codecov-umbrella fail_ci_if_error: false verbose: true # Run linting - name: Run linting run: npm run lint # Run type checking - name: Run type checking run: npm run typecheck # Run benchmarks - name: Run benchmarks id: benchmarks run: npm run benchmark:ci continue-on-error: true # Upload benchmark results - name: Upload benchmark results if: always() && steps.benchmarks.outcome != 'skipped' uses: actions/upload-artifact@v4 with: name: benchmark-results-${{ github.run_number }}-${{ github.run_attempt }} path: | benchmark-results.json retention-days: 30 if-no-files-found: warn # Create test report comment for PRs - name: Create test report comment if: github.event_name == 'pull_request' && always() uses: actions/github-script@v7 continue-on-error: true with: script: | const fs = require('fs'); let summary = '## Test Results\n\nTest summary generation failed.'; try { if (fs.existsSync('test-summary.md')) { summary = fs.readFileSync('test-summary.md', 'utf8'); } } catch (error) { console.error('Error reading test summary:', error); } try { // Find existing comment const { data: comments } = await github.rest.issues.listComments({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number, }); const botComment = comments.find(comment => comment.user.type === 'Bot' && comment.body.includes('## Test Results') ); if (botComment) { // Update existing comment await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, comment_id: botComment.id, body: summary }); } else { // Create new comment await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: context.issue.number, body: summary }); } } catch (error) { console.error('Failed to create/update PR comment:', error.message); console.log('This is likely due to insufficient permissions for external PRs.'); console.log('Test results have been saved to the job summary instead.'); } # Generate job summary - name: Generate job summary if: always() run: | echo "# Test Run Summary" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY if [ -f test-summary.md ]; then cat test-summary.md >> $GITHUB_STEP_SUMMARY else echo "Test summary generation failed." >> $GITHUB_STEP_SUMMARY fi echo "" >> $GITHUB_STEP_SUMMARY echo "## 📥 Download Artifacts" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY echo "- [Test Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY echo "- [Coverage Report](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY echo "- [Benchmark Results](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY # Store test metadata - name: Store test metadata if: always() run: | cat > test-metadata.json << EOF { "run_id": "${{ github.run_id }}", "run_number": "${{ github.run_number }}", "run_attempt": "${{ github.run_attempt }}", "sha": "${{ github.sha }}", "ref": "${{ github.ref }}", "event_name": "${{ github.event_name }}", "repository": "${{ github.repository }}", "actor": "${{ github.actor }}", "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", "node_version": "$(node --version)", "npm_version": "$(npm --version)" } EOF - name: Upload test metadata if: always() uses: actions/upload-artifact@v4 with: name: test-metadata-${{ github.run_number }}-${{ github.run_attempt }} path: test-metadata.json retention-days: 30 # Separate job to process and publish test results publish-results: needs: test runs-on: ubuntu-latest if: always() permissions: checks: write pull-requests: write steps: - uses: actions/checkout@v4 # Download all artifacts - name: Download all artifacts uses: actions/download-artifact@v4 with: path: artifacts # Publish test results as checks - name: Publish test results uses: dorny/test-reporter@v1 if: always() continue-on-error: true with: name: Test Results path: 'artifacts/test-results-*/test-results/junit.xml' reporter: java-junit fail-on-error: false fail-on-empty: false # Create a combined artifact with all results - name: Create combined results artifact if: always() run: | mkdir -p combined-results cp -r artifacts/* combined-results/ 2>/dev/null || true # Create index file cat > combined-results/index.html << 'EOF' <!DOCTYPE html> <html> <head> <title>n8n-mcp Test Results</title> <style> body { font-family: Arial, sans-serif; margin: 40px; } h1 { color: #333; } .section { margin: 20px 0; padding: 20px; border: 1px solid #ddd; border-radius: 5px; } a { color: #0066cc; text-decoration: none; } a:hover { text-decoration: underline; } </style> </head> <body> <h1>n8n-mcp Test Results</h1> <div class="section"> <h2>Test Reports</h2> <ul> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.html">📊 Detailed HTML Report</a></li> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/html/index.html">📈 Vitest HTML Report</a></li> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.md">📄 Markdown Report</a></li> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-summary.md">📝 PR Summary</a></li> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/junit.xml">🔧 JUnit XML</a></li> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-results/results.json">🔢 JSON Results</a></li> <li><a href="test-results-${{ github.run_number }}-${{ github.run_attempt }}/test-reports/report.json">📊 Full JSON Report</a></li> </ul> </div> <div class="section"> <h2>Coverage Reports</h2> <ul> <li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/html/index.html">HTML Coverage Report</a></li> <li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/lcov.info">LCOV Report</a></li> <li><a href="coverage-${{ github.run_number }}-${{ github.run_attempt }}/coverage-summary.json">Coverage Summary JSON</a></li> </ul> </div> <div class="section"> <h2>Benchmark Results</h2> <ul> <li><a href="benchmark-results-${{ github.run_number }}-${{ github.run_attempt }}/benchmark-results.json">Benchmark Results JSON</a></li> </ul> </div> <div class="section"> <h2>Metadata</h2> <ul> <li><a href="test-metadata-${{ github.run_number }}-${{ github.run_attempt }}/test-metadata.json">Test Run Metadata</a></li> </ul> </div> <div class="section"> <p><em>Generated at $(date -u +%Y-%m-%dT%H:%M:%SZ)</em></p> <p><em>Run: #${{ github.run_number }} | SHA: ${{ github.sha }}</em></p> </div> </body> </html> EOF - name: Upload combined results if: always() uses: actions/upload-artifact@v4 with: name: all-test-results-${{ github.run_number }} path: combined-results/ retention-days: 90 ``` -------------------------------------------------------------------------------- /docs/DOCKER_README.md: -------------------------------------------------------------------------------- ```markdown # Docker Deployment Guide for n8n-MCP This guide provides comprehensive instructions for deploying n8n-MCP using Docker. ## 🚀 Quick Start ### Prerequisites - Docker Engine 20.10+ (Docker Desktop on Windows/macOS, or Docker Engine on Linux) - Docker Compose V2 - (Optional) openssl for generating auth tokens ### 1. HTTP Server Mode (Recommended) The simplest way to deploy n8n-MCP is using Docker Compose with HTTP mode: ```bash # Clone the repository git clone https://github.com/czlonkowski/n8n-mcp.git cd n8n-mcp # Create .env file with auth token cat > .env << EOF AUTH_TOKEN=$(openssl rand -base64 32) USE_FIXED_HTTP=true EOF # Start the server docker compose up -d # Check logs docker compose logs -f # Test the health endpoint curl http://localhost:3000/health ``` ### 2. Using Pre-built Images Pre-built images are available on GitHub Container Registry: ```bash # Pull the latest image (~280MB optimized) docker pull ghcr.io/czlonkowski/n8n-mcp:latest # Run with HTTP mode docker run -d \ --name n8n-mcp \ -e MCP_MODE=http \ -e USE_FIXED_HTTP=true \ -e AUTH_TOKEN=your-secure-token \ -p 3000:3000 \ ghcr.io/czlonkowski/n8n-mcp:latest ``` ## 📋 Configuration Options ### Environment Variables | Variable | Description | Default | Required | |----------|-------------|---------|----------| | `MCP_MODE` | Server mode: `stdio` or `http` | `stdio` | No | | `AUTH_TOKEN` | Bearer token for HTTP authentication | - | Yes (HTTP mode)* | | `AUTH_TOKEN_FILE` | Path to file containing auth token (v2.7.5+) | - | Yes (HTTP mode)* | | `PORT` | HTTP server port | `3000` | No | | `NODE_ENV` | Environment: `development` or `production` | `production` | No | | `LOG_LEVEL` | Logging level: `debug`, `info`, `warn`, `error` | `info` | No | | `NODE_DB_PATH` | Custom database path (v2.7.16+) | `/app/data/nodes.db` | No | | `AUTH_RATE_LIMIT_WINDOW` | Rate limit window in ms (v2.16.3+) | `900000` (15 min) | No | | `AUTH_RATE_LIMIT_MAX` | Max auth attempts per window (v2.16.3+) | `20` | No | | `WEBHOOK_SECURITY_MODE` | SSRF protection: `strict`/`moderate`/`permissive` (v2.16.3+) | `strict` | No | *Either `AUTH_TOKEN` or `AUTH_TOKEN_FILE` must be set for HTTP mode. If both are set, `AUTH_TOKEN` takes precedence. ### Configuration File Support (v2.8.2+) You can mount a JSON configuration file to set environment variables: ```bash # Create config file cat > config.json << EOF { "MCP_MODE": "http", "AUTH_TOKEN": "your-secure-token", "LOG_LEVEL": "info", "N8N_API_URL": "https://your-n8n-instance.com", "N8N_API_KEY": "your-api-key" } EOF # Run with config file docker run -d \ --name n8n-mcp \ -v $(pwd)/config.json:/app/config.json:ro \ -p 3000:3000 \ ghcr.io/czlonkowski/n8n-mcp:latest ``` The config file supports: - All standard environment variables - Nested objects (flattened with underscore separators) - Arrays, booleans, numbers, and strings - Secure handling with command injection prevention - Dangerous variable blocking for security ### Docker Compose Configuration The default `docker-compose.yml` provides: - Automatic restart on failure - Named volume for data persistence - Memory limits (512MB max, 256MB reserved) - Health checks every 30 seconds - Container labels for organization ### Custom Configuration Create a `docker-compose.override.yml` for local customizations: ```yaml # docker-compose.override.yml services: n8n-mcp: ports: - "8080:3000" # Use different port environment: LOG_LEVEL: debug NODE_ENV: development volumes: - ./custom-data:/app/data # Use local directory ``` ## 🔧 Usage Modes ### HTTP Mode (Remote Access) Perfect for cloud deployments and remote access: ```bash # Start in HTTP mode docker run -d \ --name n8n-mcp-http \ -e MCP_MODE=http \ -e AUTH_TOKEN=your-secure-token \ -p 3000:3000 \ ghcr.io/czlonkowski/n8n-mcp:latest ``` Configure Claude Desktop with mcp-remote: ```json { "mcpServers": { "n8n-remote": { "command": "npx", "args": [ "-y", "@modelcontextprotocol/mcp-remote@latest", "connect", "http://your-server:3000/mcp" ], "env": { "MCP_AUTH_TOKEN": "your-secure-token" } } } } ``` ### Stdio Mode (Local Direct Access) For local Claude Desktop integration without HTTP: ```bash # Run in stdio mode (interactive) docker run --rm -i --init \ -e MCP_MODE=stdio \ -v n8n-mcp-data:/app/data \ ghcr.io/czlonkowski/n8n-mcp:latest ``` ### Server Mode (Command Line) You can also use the `serve` command to start in HTTP mode: ```bash # Using the serve command (v2.8.2+) docker run -d \ --name n8n-mcp \ -e AUTH_TOKEN=your-secure-token \ -p 3000:3000 \ ghcr.io/czlonkowski/n8n-mcp:latest serve ``` Configure Claude Desktop: ```json { "mcpServers": { "n8n-docker": { "command": "docker", "args": [ "run", "--rm", "-i", "--init", "-e", "MCP_MODE=stdio", "-v", "n8n-mcp-data:/app/data", "ghcr.io/czlonkowski/n8n-mcp:latest" ] } } } ``` ## 🏗️ Building from Source ### Build Locally ```bash # Clone repository git clone https://github.com/czlonkowski/n8n-mcp.git cd n8n-mcp # Build image docker build -t n8n-mcp:local . # Run your local build docker run -d \ --name n8n-mcp-local \ -e MCP_MODE=http \ -e AUTH_TOKEN=test-token \ -p 3000:3000 \ n8n-mcp:local ``` ### Multi-architecture Build Build for multiple platforms: ```bash # Enable buildx docker buildx create --use # Build for amd64 and arm64 docker buildx build \ --platform linux/amd64,linux/arm64 \ -t n8n-mcp:multiarch \ --load \ . ``` ## 🔍 Health Monitoring ### Health Check Endpoint The container includes a health check that runs every 30 seconds: ```bash # Check health status curl http://localhost:3000/health ``` Response example: ```json { "status": "healthy", "uptime": 120.5, "memory": { "used": "8.5 MB", "rss": "45.2 MB", "external": "1.2 MB" }, "version": "2.3.0", "mode": "http", "database": { "adapter": "better-sqlite3", "ready": true } } ``` ### Docker Health Status ```bash # Check container health docker ps --format "table {{.Names}}\t{{.Status}}" # View health check logs docker inspect n8n-mcp | jq '.[0].State.Health' ``` ## 🔒 Security Features (v2.16.3+) ### Rate Limiting Protects against brute force authentication attacks: ```bash # Configure in .env or docker-compose.yml AUTH_RATE_LIMIT_WINDOW=900000 # 15 minutes in milliseconds AUTH_RATE_LIMIT_MAX=20 # 20 attempts per IP per window ``` ### SSRF Protection Prevents Server-Side Request Forgery when using webhook triggers: ```bash # For production (blocks localhost + private IPs + cloud metadata) WEBHOOK_SECURITY_MODE=strict # For local development with local n8n instance WEBHOOK_SECURITY_MODE=moderate # For internal testing only (allows private IPs) WEBHOOK_SECURITY_MODE=permissive ``` **Note:** Cloud metadata endpoints (169.254.169.254, metadata.google.internal, etc.) are ALWAYS blocked in all modes. ## 🔒 Authentication ### Authentication n8n-MCP supports two authentication methods for HTTP mode: #### Method 1: AUTH_TOKEN (Environment Variable) - Set the token directly as an environment variable - Simple and straightforward for basic deployments - Always use a strong token (minimum 32 characters) ```bash # Generate secure token openssl rand -base64 32 # Use in Docker docker run -e AUTH_TOKEN=your-secure-token ... ``` #### Method 2: AUTH_TOKEN_FILE (File Path) - NEW in v2.7.5 - Read token from a file (Docker secrets compatible) - More secure for production deployments - Prevents token exposure in process lists ```bash # Create token file echo "your-secure-token" > /path/to/token.txt # Use with Docker secrets docker run -e AUTH_TOKEN_FILE=/run/secrets/auth_token ... ``` #### Best Practices - Never commit tokens to version control - Rotate tokens regularly - Use AUTH_TOKEN_FILE with Docker secrets for production - Ensure token files have restricted permissions (600) ### Network Security For production deployments: 1. **Use HTTPS** - Put a reverse proxy (nginx, Caddy) in front 2. **Firewall** - Restrict access to trusted IPs only 3. **VPN** - Consider VPN access for internal use Example with Caddy: ``` your-domain.com { reverse_proxy n8n-mcp:3000 basicauth * { admin $2a$14$... # bcrypt hash } } ``` ### Container Security - Runs as non-root user (uid 1001) - Read-only root filesystem compatible - No unnecessary packages installed - Regular security updates via GitHub Actions ## 📊 Resource Management ### Memory Limits Default limits in docker-compose.yml: - Maximum: 512MB - Reserved: 256MB Adjust based on your needs: ```yaml services: n8n-mcp: deploy: resources: limits: memory: 1G reservations: memory: 512M ``` ### Volume Management ```bash # List volumes docker volume ls | grep n8n-mcp # Inspect volume docker volume inspect n8n-mcp-data # Backup data docker run --rm \ -v n8n-mcp-data:/source:ro \ -v $(pwd):/backup \ alpine tar czf /backup/n8n-mcp-backup.tar.gz -C /source . # Restore data docker run --rm \ -v n8n-mcp-data:/target \ -v $(pwd):/backup:ro \ alpine tar xzf /backup/n8n-mcp-backup.tar.gz -C /target ``` ### Custom Database Path (v2.7.16+) You can specify a custom database location using `NODE_DB_PATH`: ```bash # Use custom path within mounted volume docker run -d \ --name n8n-mcp \ -e MCP_MODE=http \ -e AUTH_TOKEN=your-token \ -e NODE_DB_PATH=/app/data/custom/my-nodes.db \ -v n8n-mcp-data:/app/data \ -p 3000:3000 \ ghcr.io/czlonkowski/n8n-mcp:latest ``` **Important Notes:** - The path must end with `.db` - For data persistence, ensure the path is within a mounted volume - Paths outside mounted volumes will be lost on container restart - The directory will be created automatically if it doesn't exist ## 🐛 Troubleshooting ### Common Issues #### Container Exits Immediately ```bash # Check logs docker logs n8n-mcp # Common causes: # - Missing AUTH_TOKEN in HTTP mode # - Database initialization failure # - Port already in use ``` #### Database Not Initialized ```bash # Manually initialize database docker exec n8n-mcp node dist/scripts/rebuild.js # Or recreate container with fresh volume docker compose down -v docker compose up -d ``` #### Permission Errors ```bash # Fix volume permissions docker exec n8n-mcp chown -R nodejs:nodejs /app/data ``` ### Debug Mode Enable debug logging: ```bash docker run -d \ --name n8n-mcp-debug \ -e MCP_MODE=http \ -e AUTH_TOKEN=test \ -e LOG_LEVEL=debug \ -p 3000:3000 \ ghcr.io/czlonkowski/n8n-mcp:latest ``` ### Container Shell Access ```bash # Access running container docker exec -it n8n-mcp sh # Run as root for debugging docker exec -it -u root n8n-mcp sh ``` ## 🚀 Production Deployment ### Recommended Setup 1. **Use Docker Compose** for easier management 2. **Enable HTTPS** with reverse proxy 3. **Set up monitoring** (Prometheus, Grafana) 4. **Configure backups** for the data volume 5. **Use secrets management** for AUTH_TOKEN ### Example Production Stack ```yaml # docker-compose.prod.yml services: n8n-mcp: image: ghcr.io/czlonkowski/n8n-mcp:latest restart: always environment: MCP_MODE: http AUTH_TOKEN_FILE: /run/secrets/auth_token NODE_ENV: production secrets: - auth_token networks: - internal deploy: resources: limits: memory: 1G reservations: memory: 512M nginx: image: nginx:alpine restart: always ports: - "443:443" volumes: - ./nginx.conf:/etc/nginx/nginx.conf:ro - ./certs:/etc/nginx/certs:ro networks: - internal - external networks: internal: external: secrets: auth_token: file: ./secrets/auth_token.txt ``` ## 📦 Available Images - `ghcr.io/czlonkowski/n8n-mcp:latest` - Latest stable release - `ghcr.io/czlonkowski/n8n-mcp:2.3.0` - Specific version - `ghcr.io/czlonkowski/n8n-mcp:main-abc123` - Development builds ### Image Details - Base: `node:22-alpine` - Size: ~280MB compressed - Features: Pre-built database with all node information - Database: Complete SQLite with 525+ nodes - Architectures: `linux/amd64`, `linux/arm64` - Updated: Automatically via GitHub Actions ## 🔄 Updates and Maintenance ### Updating ```bash # Pull latest image docker compose pull # Recreate container docker compose up -d # View update logs docker compose logs -f ``` ### Automatic Updates (Watchtower) ```yaml # Add to docker-compose.yml services: watchtower: image: containrrr/watchtower volumes: - /var/run/docker.sock:/var/run/docker.sock command: --interval 86400 n8n-mcp ``` ## 📚 Additional Resources - [Main Documentation](./docs/README.md) - [HTTP Deployment Guide](./docs/HTTP_DEPLOYMENT.md) - [Troubleshooting Guide](./docs/TROUBLESHOOTING.md) - [Installation Guide](./docs/INSTALLATION.md) ## 🤝 Support - Issues: [GitHub Issues](https://github.com/czlonkowski/n8n-mcp/issues) - Discussions: [GitHub Discussions](https://github.com/czlonkowski/n8n-mcp/discussions) --- *Last updated: July 2025 - Docker implementation v1.1* ``` -------------------------------------------------------------------------------- /tests/integration/database/connection-management.test.ts: -------------------------------------------------------------------------------- ```typescript import { describe, it, expect, beforeEach, afterEach } from 'vitest'; import Database from 'better-sqlite3'; import * as fs from 'fs'; import * as path from 'path'; import { TestDatabase, TestDataGenerator } from './test-utils'; describe('Database Connection Management', () => { let testDb: TestDatabase; afterEach(async () => { if (testDb) { await testDb.cleanup(); } }); describe('In-Memory Database', () => { it('should create and connect to in-memory database', async () => { testDb = new TestDatabase({ mode: 'memory' }); const db = await testDb.initialize(); expect(db).toBeDefined(); expect(db.open).toBe(true); expect(db.name).toBe(':memory:'); }); it('should execute queries on in-memory database', async () => { testDb = new TestDatabase({ mode: 'memory' }); const db = await testDb.initialize(); // Test basic query const result = db.prepare('SELECT 1 as value').get() as { value: number }; expect(result.value).toBe(1); // Test table exists const tables = db.prepare( "SELECT name FROM sqlite_master WHERE type='table' AND name='nodes'" ).all(); expect(tables.length).toBe(1); }); it('should handle multiple connections to same in-memory database', async () => { // Each in-memory database is isolated const db1 = new TestDatabase({ mode: 'memory' }); const db2 = new TestDatabase({ mode: 'memory' }); const conn1 = await db1.initialize(); const conn2 = await db2.initialize(); // Insert data in first connection const node = TestDataGenerator.generateNode(); conn1.prepare(` INSERT INTO nodes ( node_type, package_name, display_name, description, category, development_style, is_ai_tool, is_trigger, is_webhook, is_versioned, version, documentation, properties_schema, operations, credentials_required ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `).run( node.nodeType, node.packageName, node.displayName, node.description || '', node.category || 'Core Nodes', node.developmentStyle || 'programmatic', node.isAITool ? 1 : 0, node.isTrigger ? 1 : 0, node.isWebhook ? 1 : 0, node.isVersioned ? 1 : 0, node.version, node.documentation, JSON.stringify(node.properties || []), JSON.stringify(node.operations || []), JSON.stringify(node.credentials || []) ); // Verify data is isolated const count1 = conn1.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number }; const count2 = conn2.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number }; expect(count1.count).toBe(1); expect(count2.count).toBe(0); await db1.cleanup(); await db2.cleanup(); }); }); describe('File-Based Database', () => { it('should create and connect to file database', async () => { testDb = new TestDatabase({ mode: 'file', name: 'test-connection.db' }); const db = await testDb.initialize(); expect(db).toBeDefined(); expect(db.open).toBe(true); expect(db.name).toContain('test-connection.db'); // Verify file exists const dbPath = path.join(__dirname, '../../../.test-dbs/test-connection.db'); expect(fs.existsSync(dbPath)).toBe(true); }); it('should enable WAL mode by default for file databases', async () => { testDb = new TestDatabase({ mode: 'file', name: 'test-wal.db' }); const db = await testDb.initialize(); const mode = db.prepare('PRAGMA journal_mode').get() as { journal_mode: string }; expect(mode.journal_mode).toBe('wal'); // Verify WAL files are created const dbPath = path.join(__dirname, '../../../.test-dbs/test-wal.db'); expect(fs.existsSync(`${dbPath}-wal`)).toBe(true); expect(fs.existsSync(`${dbPath}-shm`)).toBe(true); }); it('should allow disabling WAL mode', async () => { testDb = new TestDatabase({ mode: 'file', name: 'test-no-wal.db', enableWAL: false }); const db = await testDb.initialize(); const mode = db.prepare('PRAGMA journal_mode').get() as { journal_mode: string }; expect(mode.journal_mode).not.toBe('wal'); }); it('should handle connection pooling simulation', async () => { const dbPath = path.join(__dirname, '../../../.test-dbs/test-pool.db'); // Create initial database testDb = new TestDatabase({ mode: 'file', name: 'test-pool.db' }); const initialDb = await testDb.initialize(); // Close the initial connection but keep the file initialDb.close(); // Simulate multiple connections const connections: Database.Database[] = []; const connectionCount = 5; try { for (let i = 0; i < connectionCount; i++) { const conn = new Database(dbPath, { readonly: false, fileMustExist: true }); connections.push(conn); } // All connections should be open expect(connections.every(conn => conn.open)).toBe(true); // Test concurrent reads const promises = connections.map((conn, index) => { return new Promise((resolve, reject) => { try { const result = conn.prepare('SELECT ? as id').get(index); resolve(result); } catch (error) { reject(error); } }); }); const results = await Promise.all(promises); expect(results).toHaveLength(connectionCount); } finally { // Cleanup connections - ensure all are closed even if some fail await Promise.all( connections.map(async (conn) => { try { if (conn.open) { conn.close(); } } catch (error) { // Ignore close errors } }) ); // Clean up files with error handling try { if (fs.existsSync(dbPath)) { fs.unlinkSync(dbPath); } if (fs.existsSync(`${dbPath}-wal`)) { fs.unlinkSync(`${dbPath}-wal`); } if (fs.existsSync(`${dbPath}-shm`)) { fs.unlinkSync(`${dbPath}-shm`); } } catch (error) { // Ignore cleanup errors } // Mark testDb as cleaned up to avoid double cleanup testDb = null as any; } }); }); describe('Connection Error Handling', () => { it('should handle invalid file path gracefully', async () => { const invalidPath = '/invalid/path/that/does/not/exist/test.db'; expect(() => { new Database(invalidPath); }).toThrow(); }); it('should handle database file corruption', async () => { const corruptPath = path.join(__dirname, '../../../.test-dbs/corrupt.db'); // Create directory if it doesn't exist const dir = path.dirname(corruptPath); if (!fs.existsSync(dir)) { fs.mkdirSync(dir, { recursive: true }); } // Create a corrupt database file fs.writeFileSync(corruptPath, 'This is not a valid SQLite database'); try { // SQLite may not immediately throw on construction, but on first operation let db: Database.Database | null = null; let errorThrown = false; try { db = new Database(corruptPath); // Try to use the database - this should fail db.prepare('SELECT 1').get(); } catch (error) { errorThrown = true; expect(error).toBeDefined(); } finally { if (db && db.open) { db.close(); } } expect(errorThrown).toBe(true); } finally { if (fs.existsSync(corruptPath)) { fs.unlinkSync(corruptPath); } } }); it('should handle readonly database access', async () => { // Create a database first testDb = new TestDatabase({ mode: 'file', name: 'test-readonly.db' }); const db = await testDb.initialize(); // Insert test data using correct schema const node = TestDataGenerator.generateNode(); db.prepare(` INSERT INTO nodes ( node_type, package_name, display_name, description, category, development_style, is_ai_tool, is_trigger, is_webhook, is_versioned, version, documentation, properties_schema, operations, credentials_required ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) `).run( node.nodeType, node.packageName, node.displayName, node.description || '', node.category || 'Core Nodes', node.developmentStyle || 'programmatic', node.isAITool ? 1 : 0, node.isTrigger ? 1 : 0, node.isWebhook ? 1 : 0, node.isVersioned ? 1 : 0, node.version, node.documentation, JSON.stringify(node.properties || []), JSON.stringify(node.operations || []), JSON.stringify(node.credentials || []) ); // Close the write database first db.close(); // Get the actual path from the database name const dbPath = db.name; // Open as readonly const readonlyDb = new Database(dbPath, { readonly: true }); try { // Reading should work const count = readonlyDb.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number }; expect(count.count).toBe(1); // Writing should fail expect(() => { readonlyDb.prepare('DELETE FROM nodes').run(); }).toThrow(/readonly/); } finally { readonlyDb.close(); } }); }); describe('Connection Lifecycle', () => { it('should properly close database connections', async () => { testDb = new TestDatabase({ mode: 'file', name: 'test-lifecycle.db' }); const db = await testDb.initialize(); expect(db.open).toBe(true); await testDb.cleanup(); expect(db.open).toBe(false); }); it('should handle multiple open/close cycles', async () => { const dbPath = path.join(__dirname, '../../../.test-dbs/test-cycles.db'); for (let i = 0; i < 3; i++) { const db = new TestDatabase({ mode: 'file', name: 'test-cycles.db' }); const conn = await db.initialize(); // Perform operation const result = conn.prepare('SELECT ? as cycle').get(i) as { cycle: number }; expect(result.cycle).toBe(i); await db.cleanup(); } // Ensure file is cleaned up expect(fs.existsSync(dbPath)).toBe(false); }); it('should handle connection timeout simulation', async () => { testDb = new TestDatabase({ mode: 'file', name: 'test-timeout.db' }); const db = await testDb.initialize(); // Set a busy timeout db.exec('PRAGMA busy_timeout = 100'); // 100ms timeout // Start a transaction to lock the database db.exec('BEGIN EXCLUSIVE'); // Try to access from another connection (should timeout) const dbPath = path.join(__dirname, '../../../.test-dbs/test-timeout.db'); const conn2 = new Database(dbPath); conn2.exec('PRAGMA busy_timeout = 100'); try { expect(() => { conn2.exec('BEGIN EXCLUSIVE'); }).toThrow(/database is locked/); } finally { db.exec('ROLLBACK'); conn2.close(); } }, { timeout: 5000 }); // Add explicit timeout }); describe('Database Configuration', () => { it('should apply optimal pragmas for performance', async () => { testDb = new TestDatabase({ mode: 'file', name: 'test-pragmas.db' }); const db = await testDb.initialize(); // Apply performance pragmas db.exec('PRAGMA synchronous = NORMAL'); db.exec('PRAGMA cache_size = -64000'); // 64MB cache db.exec('PRAGMA temp_store = MEMORY'); db.exec('PRAGMA mmap_size = 268435456'); // 256MB mmap // Verify pragmas const sync = db.prepare('PRAGMA synchronous').get() as { synchronous: number }; const cache = db.prepare('PRAGMA cache_size').get() as { cache_size: number }; const temp = db.prepare('PRAGMA temp_store').get() as { temp_store: number }; const mmap = db.prepare('PRAGMA mmap_size').get() as { mmap_size: number }; expect(sync.synchronous).toBe(1); // NORMAL = 1 expect(cache.cache_size).toBe(-64000); expect(temp.temp_store).toBe(2); // MEMORY = 2 expect(mmap.mmap_size).toBeGreaterThan(0); }); it('should have foreign key support enabled', async () => { testDb = new TestDatabase({ mode: 'memory' }); const db = await testDb.initialize(); // Foreign keys should be enabled by default const fkEnabled = db.prepare('PRAGMA foreign_keys').get() as { foreign_keys: number }; expect(fkEnabled.foreign_keys).toBe(1); // Note: The current schema doesn't define foreign key constraints, // but the setting is enabled for future use }); }); }); ``` -------------------------------------------------------------------------------- /src/services/execution-processor.ts: -------------------------------------------------------------------------------- ```typescript /** * Execution Processor Service * * Intelligent processing and filtering of n8n execution data to enable * AI agents to inspect executions without exceeding token limits. * * Features: * - Preview mode: Show structure and counts without values * - Summary mode: Smart default with 2 sample items per node * - Filtered mode: Granular control (node filtering, item limits) * - Smart recommendations: Guide optimal retrieval strategy */ import { Execution, ExecutionMode, ExecutionPreview, NodePreview, ExecutionRecommendation, ExecutionFilterOptions, FilteredExecutionResponse, FilteredNodeData, ExecutionStatus, } from '../types/n8n-api'; import { logger } from '../utils/logger'; /** * Size estimation and threshold constants */ const THRESHOLDS = { CHAR_SIZE_BYTES: 2, // UTF-16 characters OVERHEAD_PER_OBJECT: 50, // Approximate JSON overhead MAX_RECOMMENDED_SIZE_KB: 100, // Threshold for "can fetch full" SMALL_DATASET_ITEMS: 20, // <= this is considered small MODERATE_DATASET_ITEMS: 50, // <= this is considered moderate MODERATE_DATASET_SIZE_KB: 200, // <= this is considered moderate MAX_DEPTH: 3, // Maximum depth for structure extraction MAX_ITEMS_LIMIT: 1000, // Maximum allowed itemsLimit value } as const; /** * Helper function to extract error message from various error formats */ function extractErrorMessage(error: unknown): string { if (typeof error === 'string') { return error; } if (error && typeof error === 'object') { if ('message' in error && typeof error.message === 'string') { return error.message; } if ('error' in error && typeof error.error === 'string') { return error.error; } } return 'Unknown error'; } /** * Extract data structure (JSON schema-like) from items */ function extractStructure(data: unknown, maxDepth = THRESHOLDS.MAX_DEPTH, currentDepth = 0): Record<string, unknown> | string | unknown[] { if (currentDepth >= maxDepth) { return typeof data; } if (data === null || data === undefined) { return 'null'; } if (Array.isArray(data)) { if (data.length === 0) { return []; } // Extract structure from first item return [extractStructure(data[0], maxDepth, currentDepth + 1)]; } if (typeof data === 'object') { const structure: Record<string, unknown> = {}; for (const key in data) { if (Object.prototype.hasOwnProperty.call(data, key)) { structure[key] = extractStructure((data as Record<string, unknown>)[key], maxDepth, currentDepth + 1); } } return structure; } return typeof data; } /** * Estimate size of data in KB */ function estimateDataSize(data: unknown): number { try { const jsonString = JSON.stringify(data); const sizeBytes = jsonString.length * THRESHOLDS.CHAR_SIZE_BYTES; return Math.ceil(sizeBytes / 1024); } catch (error) { logger.warn('Failed to estimate data size', { error }); return 0; } } /** * Count items in execution data */ function countItems(nodeData: unknown): { input: number; output: number } { const counts = { input: 0, output: 0 }; if (!nodeData || !Array.isArray(nodeData)) { return counts; } for (const run of nodeData) { if (run?.data?.main) { const mainData = run.data.main; if (Array.isArray(mainData)) { for (const output of mainData) { if (Array.isArray(output)) { counts.output += output.length; } } } } } return counts; } /** * Generate preview for an execution */ export function generatePreview(execution: Execution): { preview: ExecutionPreview; recommendation: ExecutionRecommendation; } { const preview: ExecutionPreview = { totalNodes: 0, executedNodes: 0, estimatedSizeKB: 0, nodes: {}, }; if (!execution.data?.resultData?.runData) { return { preview, recommendation: { canFetchFull: true, suggestedMode: 'summary', reason: 'No execution data available', }, }; } const runData = execution.data.resultData.runData; const nodeNames = Object.keys(runData); preview.totalNodes = nodeNames.length; let totalItemsOutput = 0; let largestNodeItems = 0; for (const nodeName of nodeNames) { const nodeData = runData[nodeName]; const itemCounts = countItems(nodeData); // Extract structure from first run's first output item let dataStructure: Record<string, unknown> = {}; if (Array.isArray(nodeData) && nodeData.length > 0) { const firstRun = nodeData[0]; const firstItem = firstRun?.data?.main?.[0]?.[0]; if (firstItem) { dataStructure = extractStructure(firstItem) as Record<string, unknown>; } } const nodeSize = estimateDataSize(nodeData); const nodePreview: NodePreview = { status: 'success', itemCounts, dataStructure, estimatedSizeKB: nodeSize, }; // Check for errors if (Array.isArray(nodeData)) { for (const run of nodeData) { if (run.error) { nodePreview.status = 'error'; nodePreview.error = extractErrorMessage(run.error); break; } } } preview.nodes[nodeName] = nodePreview; preview.estimatedSizeKB += nodeSize; preview.executedNodes++; totalItemsOutput += itemCounts.output; largestNodeItems = Math.max(largestNodeItems, itemCounts.output); } // Generate recommendation const recommendation = generateRecommendation( preview.estimatedSizeKB, totalItemsOutput, largestNodeItems ); return { preview, recommendation }; } /** * Generate smart recommendation based on data characteristics */ function generateRecommendation( totalSizeKB: number, totalItems: number, largestNodeItems: number ): ExecutionRecommendation { // Can safely fetch full data if (totalSizeKB <= THRESHOLDS.MAX_RECOMMENDED_SIZE_KB && totalItems <= THRESHOLDS.SMALL_DATASET_ITEMS) { return { canFetchFull: true, suggestedMode: 'full', reason: `Small dataset (${totalSizeKB}KB, ${totalItems} items). Safe to fetch full data.`, }; } // Moderate size - use summary if (totalSizeKB <= THRESHOLDS.MODERATE_DATASET_SIZE_KB && totalItems <= THRESHOLDS.MODERATE_DATASET_ITEMS) { return { canFetchFull: false, suggestedMode: 'summary', suggestedItemsLimit: 2, reason: `Moderate dataset (${totalSizeKB}KB, ${totalItems} items). Summary mode recommended.`, }; } // Large dataset - filter with limits const suggestedLimit = Math.max(1, Math.min(5, Math.floor(100 / largestNodeItems))); return { canFetchFull: false, suggestedMode: 'filtered', suggestedItemsLimit: suggestedLimit, reason: `Large dataset (${totalSizeKB}KB, ${totalItems} items). Use filtered mode with itemsLimit: ${suggestedLimit}.`, }; } /** * Truncate items array with metadata */ function truncateItems( items: unknown[][], limit: number ): { truncated: unknown[][]; metadata: { totalItems: number; itemsShown: number; truncated: boolean }; } { if (!Array.isArray(items) || items.length === 0) { return { truncated: items || [], metadata: { totalItems: 0, itemsShown: 0, truncated: false, }, }; } let totalItems = 0; for (const output of items) { if (Array.isArray(output)) { totalItems += output.length; } } // Special case: limit = 0 means structure only if (limit === 0) { const structureOnly = items.map(output => { if (!Array.isArray(output) || output.length === 0) { return []; } return [extractStructure(output[0])]; }); return { truncated: structureOnly, metadata: { totalItems, itemsShown: 0, truncated: true, }, }; } // Limit = -1 means unlimited if (limit < 0) { return { truncated: items, metadata: { totalItems, itemsShown: totalItems, truncated: false, }, }; } // Apply limit const result: unknown[][] = []; let itemsShown = 0; for (const output of items) { if (!Array.isArray(output)) { result.push(output); continue; } if (itemsShown >= limit) { break; } const remaining = limit - itemsShown; const toTake = Math.min(remaining, output.length); result.push(output.slice(0, toTake)); itemsShown += toTake; } return { truncated: result, metadata: { totalItems, itemsShown, truncated: itemsShown < totalItems, }, }; } /** * Filter execution data based on options */ export function filterExecutionData( execution: Execution, options: ExecutionFilterOptions ): FilteredExecutionResponse { const mode = options.mode || 'summary'; // Validate and bound itemsLimit let itemsLimit = options.itemsLimit !== undefined ? options.itemsLimit : 2; if (itemsLimit !== -1) { // -1 means unlimited if (itemsLimit < 0) { logger.warn('Invalid itemsLimit, defaulting to 2', { provided: itemsLimit }); itemsLimit = 2; } if (itemsLimit > THRESHOLDS.MAX_ITEMS_LIMIT) { logger.warn(`itemsLimit capped at ${THRESHOLDS.MAX_ITEMS_LIMIT}`, { provided: itemsLimit }); itemsLimit = THRESHOLDS.MAX_ITEMS_LIMIT; } } const includeInputData = options.includeInputData || false; const nodeNamesFilter = options.nodeNames; // Calculate duration const duration = execution.stoppedAt && execution.startedAt ? new Date(execution.stoppedAt).getTime() - new Date(execution.startedAt).getTime() : undefined; const response: FilteredExecutionResponse = { id: execution.id, workflowId: execution.workflowId, status: execution.status, mode, startedAt: execution.startedAt, stoppedAt: execution.stoppedAt, duration, finished: execution.finished, }; // Handle preview mode if (mode === 'preview') { const { preview, recommendation } = generatePreview(execution); response.preview = preview; response.recommendation = recommendation; return response; } // Handle no data case if (!execution.data?.resultData?.runData) { response.summary = { totalNodes: 0, executedNodes: 0, totalItems: 0, hasMoreData: false, }; response.nodes = {}; if (execution.data?.resultData?.error) { response.error = execution.data.resultData.error; } return response; } const runData = execution.data.resultData.runData; let nodeNames = Object.keys(runData); // Apply node name filter if (nodeNamesFilter && nodeNamesFilter.length > 0) { nodeNames = nodeNames.filter(name => nodeNamesFilter.includes(name)); } // Process nodes const processedNodes: Record<string, FilteredNodeData> = {}; let totalItems = 0; let hasMoreData = false; for (const nodeName of nodeNames) { const nodeData = runData[nodeName]; if (!Array.isArray(nodeData) || nodeData.length === 0) { processedNodes[nodeName] = { itemsInput: 0, itemsOutput: 0, status: 'success', }; continue; } // Get first run data const firstRun = nodeData[0]; const itemCounts = countItems(nodeData); totalItems += itemCounts.output; const nodeResult: FilteredNodeData = { executionTime: firstRun.executionTime, itemsInput: itemCounts.input, itemsOutput: itemCounts.output, status: 'success', }; // Check for errors if (firstRun.error) { nodeResult.status = 'error'; nodeResult.error = extractErrorMessage(firstRun.error); } // Handle full mode - include all data if (mode === 'full') { nodeResult.data = { output: firstRun.data?.main || [], metadata: { totalItems: itemCounts.output, itemsShown: itemCounts.output, truncated: false, }, }; if (includeInputData && firstRun.inputData) { nodeResult.data.input = firstRun.inputData; } } else { // Summary or filtered mode - apply limits const outputData = firstRun.data?.main || []; const { truncated, metadata } = truncateItems(outputData, itemsLimit); if (metadata.truncated) { hasMoreData = true; } nodeResult.data = { output: truncated, metadata, }; if (includeInputData && firstRun.inputData) { nodeResult.data.input = firstRun.inputData; } } processedNodes[nodeName] = nodeResult; } // Add summary response.summary = { totalNodes: Object.keys(runData).length, executedNodes: nodeNames.length, totalItems, hasMoreData, }; response.nodes = processedNodes; // Include error if present if (execution.data?.resultData?.error) { response.error = execution.data.resultData.error; } return response; } /** * Process execution based on mode and options * Main entry point for the service */ export function processExecution( execution: Execution, options: ExecutionFilterOptions = {} ): FilteredExecutionResponse | Execution { // Legacy behavior: if no mode specified and no filtering options, return original if (!options.mode && !options.nodeNames && options.itemsLimit === undefined) { return execution; } return filterExecutionData(execution, options); } ```