This is page 20 of 59. Use http://codebase.md/czlonkowski/n8n-mcp?lines=true&page={x} to view the full context. # Directory Structure ``` ├── _config.yml ├── .claude │ └── agents │ ├── code-reviewer.md │ ├── context-manager.md │ ├── debugger.md │ ├── deployment-engineer.md │ ├── mcp-backend-engineer.md │ ├── n8n-mcp-tester.md │ ├── technical-researcher.md │ └── test-automator.md ├── .dockerignore ├── .env.docker ├── .env.example ├── .env.n8n.example ├── .env.test ├── .env.test.example ├── .github │ ├── ABOUT.md │ ├── BENCHMARK_THRESHOLDS.md │ ├── FUNDING.yml │ ├── gh-pages.yml │ ├── secret_scanning.yml │ └── workflows │ ├── benchmark-pr.yml │ ├── benchmark.yml │ ├── docker-build-fast.yml │ ├── docker-build-n8n.yml │ ├── docker-build.yml │ ├── release.yml │ ├── test.yml │ └── update-n8n-deps.yml ├── .gitignore ├── .npmignore ├── ATTRIBUTION.md ├── CHANGELOG.md ├── CLAUDE.md ├── codecov.yml ├── coverage.json ├── data │ ├── .gitkeep │ ├── nodes.db │ ├── nodes.db-shm │ ├── nodes.db-wal │ └── templates.db ├── deploy │ └── quick-deploy-n8n.sh ├── docker │ ├── docker-entrypoint.sh │ ├── n8n-mcp │ ├── parse-config.js │ └── README.md ├── docker-compose.buildkit.yml ├── docker-compose.extract.yml ├── docker-compose.n8n.yml ├── docker-compose.override.yml.example ├── docker-compose.test-n8n.yml ├── docker-compose.yml ├── Dockerfile ├── Dockerfile.railway ├── Dockerfile.test ├── docs │ ├── AUTOMATED_RELEASES.md │ ├── BENCHMARKS.md │ ├── CHANGELOG.md │ ├── CLAUDE_CODE_SETUP.md │ ├── CLAUDE_INTERVIEW.md │ ├── CODECOV_SETUP.md │ ├── CODEX_SETUP.md │ ├── CURSOR_SETUP.md │ ├── DEPENDENCY_UPDATES.md │ ├── DOCKER_README.md │ ├── DOCKER_TROUBLESHOOTING.md │ ├── FINAL_AI_VALIDATION_SPEC.md │ ├── FLEXIBLE_INSTANCE_CONFIGURATION.md │ ├── HTTP_DEPLOYMENT.md │ ├── img │ │ ├── cc_command.png │ │ ├── cc_connected.png │ │ ├── codex_connected.png │ │ ├── cursor_tut.png │ │ ├── Railway_api.png │ │ ├── Railway_server_address.png │ │ ├── vsc_ghcp_chat_agent_mode.png │ │ ├── vsc_ghcp_chat_instruction_files.png │ │ ├── vsc_ghcp_chat_thinking_tool.png │ │ └── windsurf_tut.png │ ├── INSTALLATION.md │ ├── LIBRARY_USAGE.md │ ├── local │ │ ├── DEEP_DIVE_ANALYSIS_2025-10-02.md │ │ ├── DEEP_DIVE_ANALYSIS_README.md │ │ ├── Deep_dive_p1_p2.md │ │ ├── integration-testing-plan.md │ │ ├── integration-tests-phase1-summary.md │ │ ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md │ │ ├── P0_IMPLEMENTATION_PLAN.md │ │ └── TEMPLATE_MINING_ANALYSIS.md │ ├── MCP_ESSENTIALS_README.md │ ├── MCP_QUICK_START_GUIDE.md │ ├── N8N_DEPLOYMENT.md │ ├── RAILWAY_DEPLOYMENT.md │ ├── README_CLAUDE_SETUP.md │ ├── README.md │ ├── tools-documentation-usage.md │ ├── VS_CODE_PROJECT_SETUP.md │ ├── WINDSURF_SETUP.md │ └── workflow-diff-examples.md ├── examples │ └── enhanced-documentation-demo.js ├── fetch_log.txt ├── LICENSE ├── MEMORY_N8N_UPDATE.md ├── MEMORY_TEMPLATE_UPDATE.md ├── monitor_fetch.sh ├── N8N_HTTP_STREAMABLE_SETUP.md ├── n8n-nodes.db ├── P0-R3-TEST-PLAN.md ├── package-lock.json ├── package.json ├── package.runtime.json ├── PRIVACY.md ├── railway.json ├── README.md ├── renovate.json ├── scripts │ ├── analyze-optimization.sh │ ├── audit-schema-coverage.ts │ ├── build-optimized.sh │ ├── compare-benchmarks.js │ ├── demo-optimization.sh │ ├── deploy-http.sh │ ├── deploy-to-vm.sh │ ├── export-webhook-workflows.ts │ ├── extract-changelog.js │ ├── extract-from-docker.js │ ├── extract-nodes-docker.sh │ ├── extract-nodes-simple.sh │ ├── format-benchmark-results.js │ ├── generate-benchmark-stub.js │ ├── generate-detailed-reports.js │ ├── generate-test-summary.js │ ├── http-bridge.js │ ├── mcp-http-client.js │ ├── migrate-nodes-fts.ts │ ├── migrate-tool-docs.ts │ ├── n8n-docs-mcp.service │ ├── nginx-n8n-mcp.conf │ ├── prebuild-fts5.ts │ ├── prepare-release.js │ ├── publish-npm-quick.sh │ ├── publish-npm.sh │ ├── quick-test.ts │ ├── run-benchmarks-ci.js │ ├── sync-runtime-version.js │ ├── test-ai-validation-debug.ts │ ├── test-code-node-enhancements.ts │ ├── test-code-node-fixes.ts │ ├── test-docker-config.sh │ ├── test-docker-fingerprint.ts │ ├── test-docker-optimization.sh │ ├── test-docker.sh │ ├── test-empty-connection-validation.ts │ ├── test-error-message-tracking.ts │ ├── test-error-output-validation.ts │ ├── test-error-validation.js │ ├── test-essentials.ts │ ├── test-expression-code-validation.ts │ ├── test-expression-format-validation.js │ ├── test-fts5-search.ts │ ├── test-fuzzy-fix.ts │ ├── test-fuzzy-simple.ts │ ├── test-helpers-validation.ts │ ├── test-http-search.ts │ ├── test-http.sh │ ├── test-jmespath-validation.ts │ ├── test-multi-tenant-simple.ts │ ├── test-multi-tenant.ts │ ├── test-n8n-integration.sh │ ├── test-node-info.js │ ├── test-node-type-validation.ts │ ├── test-nodes-base-prefix.ts │ ├── test-operation-validation.ts │ ├── test-optimized-docker.sh │ ├── test-release-automation.js │ ├── test-search-improvements.ts │ ├── test-security.ts │ ├── test-single-session.sh │ ├── test-sqljs-triggers.ts │ ├── test-telemetry-debug.ts │ ├── test-telemetry-direct.ts │ ├── test-telemetry-env.ts │ ├── test-telemetry-integration.ts │ ├── test-telemetry-no-select.ts │ ├── test-telemetry-security.ts │ ├── test-telemetry-simple.ts │ ├── test-typeversion-validation.ts │ ├── test-url-configuration.ts │ ├── test-user-id-persistence.ts │ ├── test-webhook-validation.ts │ ├── test-workflow-insert.ts │ ├── test-workflow-sanitizer.ts │ ├── test-workflow-tracking-debug.ts │ ├── update-and-publish-prep.sh │ ├── update-n8n-deps.js │ ├── update-readme-version.js │ ├── vitest-benchmark-json-reporter.js │ └── vitest-benchmark-reporter.ts ├── SECURITY.md ├── src │ ├── config │ │ └── n8n-api.ts │ ├── data │ │ └── canonical-ai-tool-examples.json │ ├── database │ │ ├── database-adapter.ts │ │ ├── migrations │ │ │ └── add-template-node-configs.sql │ │ ├── node-repository.ts │ │ ├── nodes.db │ │ ├── schema-optimized.sql │ │ └── schema.sql │ ├── errors │ │ └── validation-service-error.ts │ ├── http-server-single-session.ts │ ├── http-server.ts │ ├── index.ts │ ├── loaders │ │ └── node-loader.ts │ ├── mappers │ │ └── docs-mapper.ts │ ├── mcp │ │ ├── handlers-n8n-manager.ts │ │ ├── handlers-workflow-diff.ts │ │ ├── index.ts │ │ ├── server.ts │ │ ├── stdio-wrapper.ts │ │ ├── tool-docs │ │ │ ├── configuration │ │ │ │ ├── get-node-as-tool-info.ts │ │ │ │ ├── get-node-documentation.ts │ │ │ │ ├── get-node-essentials.ts │ │ │ │ ├── get-node-info.ts │ │ │ │ ├── get-property-dependencies.ts │ │ │ │ ├── index.ts │ │ │ │ └── search-node-properties.ts │ │ │ ├── discovery │ │ │ │ ├── get-database-statistics.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-ai-tools.ts │ │ │ │ ├── list-nodes.ts │ │ │ │ └── search-nodes.ts │ │ │ ├── guides │ │ │ │ ├── ai-agents-guide.ts │ │ │ │ └── index.ts │ │ │ ├── index.ts │ │ │ ├── system │ │ │ │ ├── index.ts │ │ │ │ ├── n8n-diagnostic.ts │ │ │ │ ├── n8n-health-check.ts │ │ │ │ ├── n8n-list-available-tools.ts │ │ │ │ └── tools-documentation.ts │ │ │ ├── templates │ │ │ │ ├── get-template.ts │ │ │ │ ├── get-templates-for-task.ts │ │ │ │ ├── index.ts │ │ │ │ ├── list-node-templates.ts │ │ │ │ ├── list-tasks.ts │ │ │ │ ├── search-templates-by-metadata.ts │ │ │ │ └── search-templates.ts │ │ │ ├── types.ts │ │ │ ├── validation │ │ │ │ ├── index.ts │ │ │ │ ├── validate-node-minimal.ts │ │ │ │ ├── validate-node-operation.ts │ │ │ │ ├── validate-workflow-connections.ts │ │ │ │ ├── validate-workflow-expressions.ts │ │ │ │ └── validate-workflow.ts │ │ │ └── workflow_management │ │ │ ├── index.ts │ │ │ ├── n8n-autofix-workflow.ts │ │ │ ├── n8n-create-workflow.ts │ │ │ ├── n8n-delete-execution.ts │ │ │ ├── n8n-delete-workflow.ts │ │ │ ├── n8n-get-execution.ts │ │ │ ├── n8n-get-workflow-details.ts │ │ │ ├── n8n-get-workflow-minimal.ts │ │ │ ├── n8n-get-workflow-structure.ts │ │ │ ├── n8n-get-workflow.ts │ │ │ ├── n8n-list-executions.ts │ │ │ ├── n8n-list-workflows.ts │ │ │ ├── n8n-trigger-webhook-workflow.ts │ │ │ ├── n8n-update-full-workflow.ts │ │ │ ├── n8n-update-partial-workflow.ts │ │ │ └── n8n-validate-workflow.ts │ │ ├── tools-documentation.ts │ │ ├── tools-n8n-friendly.ts │ │ ├── tools-n8n-manager.ts │ │ ├── tools.ts │ │ └── workflow-examples.ts │ ├── mcp-engine.ts │ ├── mcp-tools-engine.ts │ ├── n8n │ │ ├── MCPApi.credentials.ts │ │ └── MCPNode.node.ts │ ├── parsers │ │ ├── node-parser.ts │ │ ├── property-extractor.ts │ │ └── simple-parser.ts │ ├── scripts │ │ ├── debug-http-search.ts │ │ ├── extract-from-docker.ts │ │ ├── fetch-templates-robust.ts │ │ ├── fetch-templates.ts │ │ ├── rebuild-database.ts │ │ ├── rebuild-optimized.ts │ │ ├── rebuild.ts │ │ ├── sanitize-templates.ts │ │ ├── seed-canonical-ai-examples.ts │ │ ├── test-autofix-documentation.ts │ │ ├── test-autofix-workflow.ts │ │ ├── test-execution-filtering.ts │ │ ├── test-node-suggestions.ts │ │ ├── test-protocol-negotiation.ts │ │ ├── test-summary.ts │ │ ├── test-webhook-autofix.ts │ │ ├── validate.ts │ │ └── validation-summary.ts │ ├── services │ │ ├── ai-node-validator.ts │ │ ├── ai-tool-validators.ts │ │ ├── confidence-scorer.ts │ │ ├── config-validator.ts │ │ ├── enhanced-config-validator.ts │ │ ├── example-generator.ts │ │ ├── execution-processor.ts │ │ ├── expression-format-validator.ts │ │ ├── expression-validator.ts │ │ ├── n8n-api-client.ts │ │ ├── n8n-validation.ts │ │ ├── node-documentation-service.ts │ │ ├── node-sanitizer.ts │ │ ├── node-similarity-service.ts │ │ ├── node-specific-validators.ts │ │ ├── operation-similarity-service.ts │ │ ├── property-dependencies.ts │ │ ├── property-filter.ts │ │ ├── resource-similarity-service.ts │ │ ├── sqlite-storage-service.ts │ │ ├── task-templates.ts │ │ ├── universal-expression-validator.ts │ │ ├── workflow-auto-fixer.ts │ │ ├── workflow-diff-engine.ts │ │ └── workflow-validator.ts │ ├── telemetry │ │ ├── batch-processor.ts │ │ ├── config-manager.ts │ │ ├── early-error-logger.ts │ │ ├── error-sanitization-utils.ts │ │ ├── error-sanitizer.ts │ │ ├── event-tracker.ts │ │ ├── event-validator.ts │ │ ├── index.ts │ │ ├── performance-monitor.ts │ │ ├── rate-limiter.ts │ │ ├── startup-checkpoints.ts │ │ ├── telemetry-error.ts │ │ ├── telemetry-manager.ts │ │ ├── telemetry-types.ts │ │ └── workflow-sanitizer.ts │ ├── templates │ │ ├── batch-processor.ts │ │ ├── metadata-generator.ts │ │ ├── README.md │ │ ├── template-fetcher.ts │ │ ├── template-repository.ts │ │ └── template-service.ts │ ├── types │ │ ├── index.ts │ │ ├── instance-context.ts │ │ ├── n8n-api.ts │ │ ├── node-types.ts │ │ └── workflow-diff.ts │ └── utils │ ├── auth.ts │ ├── bridge.ts │ ├── cache-utils.ts │ ├── console-manager.ts │ ├── documentation-fetcher.ts │ ├── enhanced-documentation-fetcher.ts │ ├── error-handler.ts │ ├── example-generator.ts │ ├── fixed-collection-validator.ts │ ├── logger.ts │ ├── mcp-client.ts │ ├── n8n-errors.ts │ ├── node-source-extractor.ts │ ├── node-type-normalizer.ts │ ├── node-type-utils.ts │ ├── node-utils.ts │ ├── npm-version-checker.ts │ ├── protocol-version.ts │ ├── simple-cache.ts │ ├── ssrf-protection.ts │ ├── template-node-resolver.ts │ ├── template-sanitizer.ts │ ├── url-detector.ts │ ├── validation-schemas.ts │ └── version.ts ├── test-output.txt ├── test-reinit-fix.sh ├── tests │ ├── __snapshots__ │ │ └── .gitkeep │ ├── auth.test.ts │ ├── benchmarks │ │ ├── database-queries.bench.ts │ │ ├── index.ts │ │ ├── mcp-tools.bench.ts │ │ ├── mcp-tools.bench.ts.disabled │ │ ├── mcp-tools.bench.ts.skip │ │ ├── node-loading.bench.ts.disabled │ │ ├── README.md │ │ ├── search-operations.bench.ts.disabled │ │ └── validation-performance.bench.ts.disabled │ ├── bridge.test.ts │ ├── comprehensive-extraction-test.js │ ├── data │ │ └── .gitkeep │ ├── debug-slack-doc.js │ ├── demo-enhanced-documentation.js │ ├── docker-tests-README.md │ ├── error-handler.test.ts │ ├── examples │ │ └── using-database-utils.test.ts │ ├── extracted-nodes-db │ │ ├── database-import.json │ │ ├── extraction-report.json │ │ ├── insert-nodes.sql │ │ ├── n8n-nodes-base__Airtable.json │ │ ├── n8n-nodes-base__Discord.json │ │ ├── n8n-nodes-base__Function.json │ │ ├── n8n-nodes-base__HttpRequest.json │ │ ├── n8n-nodes-base__If.json │ │ ├── n8n-nodes-base__Slack.json │ │ ├── n8n-nodes-base__SplitInBatches.json │ │ └── n8n-nodes-base__Webhook.json │ ├── factories │ │ ├── node-factory.ts │ │ └── property-definition-factory.ts │ ├── fixtures │ │ ├── .gitkeep │ │ ├── database │ │ │ └── test-nodes.json │ │ ├── factories │ │ │ ├── node.factory.ts │ │ │ └── parser-node.factory.ts │ │ └── template-configs.ts │ ├── helpers │ │ └── env-helpers.ts │ ├── http-server-auth.test.ts │ ├── integration │ │ ├── ai-validation │ │ │ ├── ai-agent-validation.test.ts │ │ │ ├── ai-tool-validation.test.ts │ │ │ ├── chat-trigger-validation.test.ts │ │ │ ├── e2e-validation.test.ts │ │ │ ├── helpers.ts │ │ │ ├── llm-chain-validation.test.ts │ │ │ ├── README.md │ │ │ └── TEST_REPORT.md │ │ ├── ci │ │ │ └── database-population.test.ts │ │ ├── database │ │ │ ├── connection-management.test.ts │ │ │ ├── empty-database.test.ts │ │ │ ├── fts5-search.test.ts │ │ │ ├── node-fts5-search.test.ts │ │ │ ├── node-repository.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── sqljs-memory-leak.test.ts │ │ │ ├── template-node-configs.test.ts │ │ │ ├── template-repository.test.ts │ │ │ ├── test-utils.ts │ │ │ └── transactions.test.ts │ │ ├── database-integration.test.ts │ │ ├── docker │ │ │ ├── docker-config.test.ts │ │ │ ├── docker-entrypoint.test.ts │ │ │ └── test-helpers.ts │ │ ├── flexible-instance-config.test.ts │ │ ├── mcp │ │ │ └── template-examples-e2e.test.ts │ │ ├── mcp-protocol │ │ │ ├── basic-connection.test.ts │ │ │ ├── error-handling.test.ts │ │ │ ├── performance.test.ts │ │ │ ├── protocol-compliance.test.ts │ │ │ ├── README.md │ │ │ ├── session-management.test.ts │ │ │ ├── test-helpers.ts │ │ │ ├── tool-invocation.test.ts │ │ │ └── workflow-error-validation.test.ts │ │ ├── msw-setup.test.ts │ │ ├── n8n-api │ │ │ ├── executions │ │ │ │ ├── delete-execution.test.ts │ │ │ │ ├── get-execution.test.ts │ │ │ │ ├── list-executions.test.ts │ │ │ │ └── trigger-webhook.test.ts │ │ │ ├── scripts │ │ │ │ └── cleanup-orphans.ts │ │ │ ├── system │ │ │ │ ├── diagnostic.test.ts │ │ │ │ ├── health-check.test.ts │ │ │ │ └── list-tools.test.ts │ │ │ ├── test-connection.ts │ │ │ ├── types │ │ │ │ └── mcp-responses.ts │ │ │ ├── utils │ │ │ │ ├── cleanup-helpers.ts │ │ │ │ ├── credentials.ts │ │ │ │ ├── factories.ts │ │ │ │ ├── fixtures.ts │ │ │ │ ├── mcp-context.ts │ │ │ │ ├── n8n-client.ts │ │ │ │ ├── node-repository.ts │ │ │ │ ├── response-types.ts │ │ │ │ ├── test-context.ts │ │ │ │ └── webhook-workflows.ts │ │ │ └── workflows │ │ │ ├── autofix-workflow.test.ts │ │ │ ├── create-workflow.test.ts │ │ │ ├── delete-workflow.test.ts │ │ │ ├── get-workflow-details.test.ts │ │ │ ├── get-workflow-minimal.test.ts │ │ │ ├── get-workflow-structure.test.ts │ │ │ ├── get-workflow.test.ts │ │ │ ├── list-workflows.test.ts │ │ │ ├── smart-parameters.test.ts │ │ │ ├── update-partial-workflow.test.ts │ │ │ ├── update-workflow.test.ts │ │ │ └── validate-workflow.test.ts │ │ ├── security │ │ │ ├── command-injection-prevention.test.ts │ │ │ └── rate-limiting.test.ts │ │ ├── setup │ │ │ ├── integration-setup.ts │ │ │ └── msw-test-server.ts │ │ ├── telemetry │ │ │ ├── docker-user-id-stability.test.ts │ │ │ └── mcp-telemetry.test.ts │ │ ├── templates │ │ │ └── metadata-operations.test.ts │ │ └── workflow-creation-node-type-format.test.ts │ ├── logger.test.ts │ ├── MOCKING_STRATEGY.md │ ├── mocks │ │ ├── n8n-api │ │ │ ├── data │ │ │ │ ├── credentials.ts │ │ │ │ ├── executions.ts │ │ │ │ └── workflows.ts │ │ │ ├── handlers.ts │ │ │ └── index.ts │ │ └── README.md │ ├── node-storage-export.json │ ├── setup │ │ ├── global-setup.ts │ │ ├── msw-setup.ts │ │ ├── TEST_ENV_DOCUMENTATION.md │ │ └── test-env.ts │ ├── test-database-extraction.js │ ├── test-direct-extraction.js │ ├── test-enhanced-documentation.js │ ├── test-enhanced-integration.js │ ├── test-mcp-extraction.js │ ├── test-mcp-server-extraction.js │ ├── test-mcp-tools-integration.js │ ├── test-node-documentation-service.js │ ├── test-node-list.js │ ├── test-package-info.js │ ├── test-parsing-operations.js │ ├── test-slack-node-complete.js │ ├── test-small-rebuild.js │ ├── test-sqlite-search.js │ ├── test-storage-system.js │ ├── unit │ │ ├── __mocks__ │ │ │ ├── n8n-nodes-base.test.ts │ │ │ ├── n8n-nodes-base.ts │ │ │ └── README.md │ │ ├── database │ │ │ ├── __mocks__ │ │ │ │ └── better-sqlite3.ts │ │ │ ├── database-adapter-unit.test.ts │ │ │ ├── node-repository-core.test.ts │ │ │ ├── node-repository-operations.test.ts │ │ │ ├── node-repository-outputs.test.ts │ │ │ ├── README.md │ │ │ └── template-repository-core.test.ts │ │ ├── docker │ │ │ ├── config-security.test.ts │ │ │ ├── edge-cases.test.ts │ │ │ ├── parse-config.test.ts │ │ │ └── serve-command.test.ts │ │ ├── errors │ │ │ └── validation-service-error.test.ts │ │ ├── examples │ │ │ └── using-n8n-nodes-base-mock.test.ts │ │ ├── flexible-instance-security-advanced.test.ts │ │ ├── flexible-instance-security.test.ts │ │ ├── http-server │ │ │ └── multi-tenant-support.test.ts │ │ ├── http-server-n8n-mode.test.ts │ │ ├── http-server-n8n-reinit.test.ts │ │ ├── http-server-session-management.test.ts │ │ ├── loaders │ │ │ └── node-loader.test.ts │ │ ├── mappers │ │ │ └── docs-mapper.test.ts │ │ ├── mcp │ │ │ ├── get-node-essentials-examples.test.ts │ │ │ ├── handlers-n8n-manager-simple.test.ts │ │ │ ├── handlers-n8n-manager.test.ts │ │ │ ├── handlers-workflow-diff.test.ts │ │ │ ├── lru-cache-behavior.test.ts │ │ │ ├── multi-tenant-tool-listing.test.ts.disabled │ │ │ ├── parameter-validation.test.ts │ │ │ ├── search-nodes-examples.test.ts │ │ │ ├── tools-documentation.test.ts │ │ │ └── tools.test.ts │ │ ├── monitoring │ │ │ └── cache-metrics.test.ts │ │ ├── MULTI_TENANT_TEST_COVERAGE.md │ │ ├── multi-tenant-integration.test.ts │ │ ├── parsers │ │ │ ├── node-parser-outputs.test.ts │ │ │ ├── node-parser.test.ts │ │ │ ├── property-extractor.test.ts │ │ │ └── simple-parser.test.ts │ │ ├── scripts │ │ │ └── fetch-templates-extraction.test.ts │ │ ├── services │ │ │ ├── ai-node-validator.test.ts │ │ │ ├── ai-tool-validators.test.ts │ │ │ ├── confidence-scorer.test.ts │ │ │ ├── config-validator-basic.test.ts │ │ │ ├── config-validator-edge-cases.test.ts │ │ │ ├── config-validator-node-specific.test.ts │ │ │ ├── config-validator-security.test.ts │ │ │ ├── debug-validator.test.ts │ │ │ ├── enhanced-config-validator-integration.test.ts │ │ │ ├── enhanced-config-validator-operations.test.ts │ │ │ ├── enhanced-config-validator.test.ts │ │ │ ├── example-generator.test.ts │ │ │ ├── execution-processor.test.ts │ │ │ ├── expression-format-validator.test.ts │ │ │ ├── expression-validator-edge-cases.test.ts │ │ │ ├── expression-validator.test.ts │ │ │ ├── fixed-collection-validation.test.ts │ │ │ ├── loop-output-edge-cases.test.ts │ │ │ ├── n8n-api-client.test.ts │ │ │ ├── n8n-validation.test.ts │ │ │ ├── node-sanitizer.test.ts │ │ │ ├── node-similarity-service.test.ts │ │ │ ├── node-specific-validators.test.ts │ │ │ ├── operation-similarity-service-comprehensive.test.ts │ │ │ ├── operation-similarity-service.test.ts │ │ │ ├── property-dependencies.test.ts │ │ │ ├── property-filter-edge-cases.test.ts │ │ │ ├── property-filter.test.ts │ │ │ ├── resource-similarity-service-comprehensive.test.ts │ │ │ ├── resource-similarity-service.test.ts │ │ │ ├── task-templates.test.ts │ │ │ ├── template-service.test.ts │ │ │ ├── universal-expression-validator.test.ts │ │ │ ├── validation-fixes.test.ts │ │ │ ├── workflow-auto-fixer.test.ts │ │ │ ├── workflow-diff-engine.test.ts │ │ │ ├── workflow-fixed-collection-validation.test.ts │ │ │ ├── workflow-validator-comprehensive.test.ts │ │ │ ├── workflow-validator-edge-cases.test.ts │ │ │ ├── workflow-validator-error-outputs.test.ts │ │ │ ├── workflow-validator-expression-format.test.ts │ │ │ ├── workflow-validator-loops-simple.test.ts │ │ │ ├── workflow-validator-loops.test.ts │ │ │ ├── workflow-validator-mocks.test.ts │ │ │ ├── workflow-validator-performance.test.ts │ │ │ ├── workflow-validator-with-mocks.test.ts │ │ │ └── workflow-validator.test.ts │ │ ├── telemetry │ │ │ ├── batch-processor.test.ts │ │ │ ├── config-manager.test.ts │ │ │ ├── event-tracker.test.ts │ │ │ ├── event-validator.test.ts │ │ │ ├── rate-limiter.test.ts │ │ │ ├── telemetry-error.test.ts │ │ │ ├── telemetry-manager.test.ts │ │ │ ├── v2.18.3-fixes-verification.test.ts │ │ │ └── workflow-sanitizer.test.ts │ │ ├── templates │ │ │ ├── batch-processor.test.ts │ │ │ ├── metadata-generator.test.ts │ │ │ ├── template-repository-metadata.test.ts │ │ │ └── template-repository-security.test.ts │ │ ├── test-env-example.test.ts │ │ ├── test-infrastructure.test.ts │ │ ├── types │ │ │ ├── instance-context-coverage.test.ts │ │ │ └── instance-context-multi-tenant.test.ts │ │ ├── utils │ │ │ ├── auth-timing-safe.test.ts │ │ │ ├── cache-utils.test.ts │ │ │ ├── console-manager.test.ts │ │ │ ├── database-utils.test.ts │ │ │ ├── fixed-collection-validator.test.ts │ │ │ ├── n8n-errors.test.ts │ │ │ ├── node-type-normalizer.test.ts │ │ │ ├── node-type-utils.test.ts │ │ │ ├── node-utils.test.ts │ │ │ ├── simple-cache-memory-leak-fix.test.ts │ │ │ ├── ssrf-protection.test.ts │ │ │ └── template-node-resolver.test.ts │ │ └── validation-fixes.test.ts │ └── utils │ ├── assertions.ts │ ├── builders │ │ └── workflow.builder.ts │ ├── data-generators.ts │ ├── database-utils.ts │ ├── README.md │ └── test-helpers.ts ├── thumbnail.png ├── tsconfig.build.json ├── tsconfig.json ├── types │ ├── mcp.d.ts │ └── test-env.d.ts ├── verify-telemetry-fix.js ├── versioned-nodes.md ├── vitest.config.benchmark.ts ├── vitest.config.integration.ts └── vitest.config.ts ``` # Files -------------------------------------------------------------------------------- /tests/integration/database/connection-management.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, beforeEach, afterEach } from 'vitest'; 2 | import Database from 'better-sqlite3'; 3 | import * as fs from 'fs'; 4 | import * as path from 'path'; 5 | import { TestDatabase, TestDataGenerator } from './test-utils'; 6 | 7 | describe('Database Connection Management', () => { 8 | let testDb: TestDatabase; 9 | 10 | afterEach(async () => { 11 | if (testDb) { 12 | await testDb.cleanup(); 13 | } 14 | }); 15 | 16 | describe('In-Memory Database', () => { 17 | it('should create and connect to in-memory database', async () => { 18 | testDb = new TestDatabase({ mode: 'memory' }); 19 | const db = await testDb.initialize(); 20 | 21 | expect(db).toBeDefined(); 22 | expect(db.open).toBe(true); 23 | expect(db.name).toBe(':memory:'); 24 | }); 25 | 26 | it('should execute queries on in-memory database', async () => { 27 | testDb = new TestDatabase({ mode: 'memory' }); 28 | const db = await testDb.initialize(); 29 | 30 | // Test basic query 31 | const result = db.prepare('SELECT 1 as value').get() as { value: number }; 32 | expect(result.value).toBe(1); 33 | 34 | // Test table exists 35 | const tables = db.prepare( 36 | "SELECT name FROM sqlite_master WHERE type='table' AND name='nodes'" 37 | ).all(); 38 | expect(tables.length).toBe(1); 39 | }); 40 | 41 | it('should handle multiple connections to same in-memory database', async () => { 42 | // Each in-memory database is isolated 43 | const db1 = new TestDatabase({ mode: 'memory' }); 44 | const db2 = new TestDatabase({ mode: 'memory' }); 45 | 46 | const conn1 = await db1.initialize(); 47 | const conn2 = await db2.initialize(); 48 | 49 | // Insert data in first connection 50 | const node = TestDataGenerator.generateNode(); 51 | conn1.prepare(` 52 | INSERT INTO nodes ( 53 | node_type, package_name, display_name, description, category, 54 | development_style, is_ai_tool, is_trigger, is_webhook, 55 | is_versioned, version, documentation, properties_schema, 56 | operations, credentials_required 57 | ) 58 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 59 | `).run( 60 | node.nodeType, 61 | node.packageName, 62 | node.displayName, 63 | node.description || '', 64 | node.category || 'Core Nodes', 65 | node.developmentStyle || 'programmatic', 66 | node.isAITool ? 1 : 0, 67 | node.isTrigger ? 1 : 0, 68 | node.isWebhook ? 1 : 0, 69 | node.isVersioned ? 1 : 0, 70 | node.version, 71 | node.documentation, 72 | JSON.stringify(node.properties || []), 73 | JSON.stringify(node.operations || []), 74 | JSON.stringify(node.credentials || []) 75 | ); 76 | 77 | // Verify data is isolated 78 | const count1 = conn1.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number }; 79 | const count2 = conn2.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number }; 80 | 81 | expect(count1.count).toBe(1); 82 | expect(count2.count).toBe(0); 83 | 84 | await db1.cleanup(); 85 | await db2.cleanup(); 86 | }); 87 | }); 88 | 89 | describe('File-Based Database', () => { 90 | it('should create and connect to file database', async () => { 91 | testDb = new TestDatabase({ mode: 'file', name: 'test-connection.db' }); 92 | const db = await testDb.initialize(); 93 | 94 | expect(db).toBeDefined(); 95 | expect(db.open).toBe(true); 96 | expect(db.name).toContain('test-connection.db'); 97 | 98 | // Verify file exists 99 | const dbPath = path.join(__dirname, '../../../.test-dbs/test-connection.db'); 100 | expect(fs.existsSync(dbPath)).toBe(true); 101 | }); 102 | 103 | it('should enable WAL mode by default for file databases', async () => { 104 | testDb = new TestDatabase({ mode: 'file', name: 'test-wal.db' }); 105 | const db = await testDb.initialize(); 106 | 107 | const mode = db.prepare('PRAGMA journal_mode').get() as { journal_mode: string }; 108 | expect(mode.journal_mode).toBe('wal'); 109 | 110 | // Verify WAL files are created 111 | const dbPath = path.join(__dirname, '../../../.test-dbs/test-wal.db'); 112 | expect(fs.existsSync(`${dbPath}-wal`)).toBe(true); 113 | expect(fs.existsSync(`${dbPath}-shm`)).toBe(true); 114 | }); 115 | 116 | it('should allow disabling WAL mode', async () => { 117 | testDb = new TestDatabase({ 118 | mode: 'file', 119 | name: 'test-no-wal.db', 120 | enableWAL: false 121 | }); 122 | const db = await testDb.initialize(); 123 | 124 | const mode = db.prepare('PRAGMA journal_mode').get() as { journal_mode: string }; 125 | expect(mode.journal_mode).not.toBe('wal'); 126 | }); 127 | 128 | it('should handle connection pooling simulation', async () => { 129 | const dbPath = path.join(__dirname, '../../../.test-dbs/test-pool.db'); 130 | 131 | // Create initial database 132 | testDb = new TestDatabase({ mode: 'file', name: 'test-pool.db' }); 133 | const initialDb = await testDb.initialize(); 134 | 135 | // Close the initial connection but keep the file 136 | initialDb.close(); 137 | 138 | // Simulate multiple connections 139 | const connections: Database.Database[] = []; 140 | const connectionCount = 5; 141 | 142 | try { 143 | for (let i = 0; i < connectionCount; i++) { 144 | const conn = new Database(dbPath, { 145 | readonly: false, 146 | fileMustExist: true 147 | }); 148 | connections.push(conn); 149 | } 150 | 151 | // All connections should be open 152 | expect(connections.every(conn => conn.open)).toBe(true); 153 | 154 | // Test concurrent reads 155 | const promises = connections.map((conn, index) => { 156 | return new Promise((resolve, reject) => { 157 | try { 158 | const result = conn.prepare('SELECT ? as id').get(index); 159 | resolve(result); 160 | } catch (error) { 161 | reject(error); 162 | } 163 | }); 164 | }); 165 | 166 | const results = await Promise.all(promises); 167 | expect(results).toHaveLength(connectionCount); 168 | 169 | } finally { 170 | // Cleanup connections - ensure all are closed even if some fail 171 | await Promise.all( 172 | connections.map(async (conn) => { 173 | try { 174 | if (conn.open) { 175 | conn.close(); 176 | } 177 | } catch (error) { 178 | // Ignore close errors 179 | } 180 | }) 181 | ); 182 | 183 | // Clean up files with error handling 184 | try { 185 | if (fs.existsSync(dbPath)) { 186 | fs.unlinkSync(dbPath); 187 | } 188 | if (fs.existsSync(`${dbPath}-wal`)) { 189 | fs.unlinkSync(`${dbPath}-wal`); 190 | } 191 | if (fs.existsSync(`${dbPath}-shm`)) { 192 | fs.unlinkSync(`${dbPath}-shm`); 193 | } 194 | } catch (error) { 195 | // Ignore cleanup errors 196 | } 197 | 198 | // Mark testDb as cleaned up to avoid double cleanup 199 | testDb = null as any; 200 | } 201 | }); 202 | }); 203 | 204 | describe('Connection Error Handling', () => { 205 | it('should handle invalid file path gracefully', async () => { 206 | const invalidPath = '/invalid/path/that/does/not/exist/test.db'; 207 | 208 | expect(() => { 209 | new Database(invalidPath); 210 | }).toThrow(); 211 | }); 212 | 213 | it('should handle database file corruption', async () => { 214 | const corruptPath = path.join(__dirname, '../../../.test-dbs/corrupt.db'); 215 | 216 | // Create directory if it doesn't exist 217 | const dir = path.dirname(corruptPath); 218 | if (!fs.existsSync(dir)) { 219 | fs.mkdirSync(dir, { recursive: true }); 220 | } 221 | 222 | // Create a corrupt database file 223 | fs.writeFileSync(corruptPath, 'This is not a valid SQLite database'); 224 | 225 | try { 226 | // SQLite may not immediately throw on construction, but on first operation 227 | let db: Database.Database | null = null; 228 | let errorThrown = false; 229 | 230 | try { 231 | db = new Database(corruptPath); 232 | // Try to use the database - this should fail 233 | db.prepare('SELECT 1').get(); 234 | } catch (error) { 235 | errorThrown = true; 236 | expect(error).toBeDefined(); 237 | } finally { 238 | if (db && db.open) { 239 | db.close(); 240 | } 241 | } 242 | 243 | expect(errorThrown).toBe(true); 244 | } finally { 245 | if (fs.existsSync(corruptPath)) { 246 | fs.unlinkSync(corruptPath); 247 | } 248 | } 249 | }); 250 | 251 | it('should handle readonly database access', async () => { 252 | // Create a database first 253 | testDb = new TestDatabase({ mode: 'file', name: 'test-readonly.db' }); 254 | const db = await testDb.initialize(); 255 | 256 | // Insert test data using correct schema 257 | const node = TestDataGenerator.generateNode(); 258 | db.prepare(` 259 | INSERT INTO nodes ( 260 | node_type, package_name, display_name, description, category, 261 | development_style, is_ai_tool, is_trigger, is_webhook, 262 | is_versioned, version, documentation, properties_schema, 263 | operations, credentials_required 264 | ) 265 | VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 266 | `).run( 267 | node.nodeType, 268 | node.packageName, 269 | node.displayName, 270 | node.description || '', 271 | node.category || 'Core Nodes', 272 | node.developmentStyle || 'programmatic', 273 | node.isAITool ? 1 : 0, 274 | node.isTrigger ? 1 : 0, 275 | node.isWebhook ? 1 : 0, 276 | node.isVersioned ? 1 : 0, 277 | node.version, 278 | node.documentation, 279 | JSON.stringify(node.properties || []), 280 | JSON.stringify(node.operations || []), 281 | JSON.stringify(node.credentials || []) 282 | ); 283 | 284 | // Close the write database first 285 | db.close(); 286 | 287 | // Get the actual path from the database name 288 | const dbPath = db.name; 289 | 290 | // Open as readonly 291 | const readonlyDb = new Database(dbPath, { readonly: true }); 292 | 293 | try { 294 | // Reading should work 295 | const count = readonlyDb.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number }; 296 | expect(count.count).toBe(1); 297 | 298 | // Writing should fail 299 | expect(() => { 300 | readonlyDb.prepare('DELETE FROM nodes').run(); 301 | }).toThrow(/readonly/); 302 | 303 | } finally { 304 | readonlyDb.close(); 305 | } 306 | }); 307 | }); 308 | 309 | describe('Connection Lifecycle', () => { 310 | it('should properly close database connections', async () => { 311 | testDb = new TestDatabase({ mode: 'file', name: 'test-lifecycle.db' }); 312 | const db = await testDb.initialize(); 313 | 314 | expect(db.open).toBe(true); 315 | 316 | await testDb.cleanup(); 317 | 318 | expect(db.open).toBe(false); 319 | }); 320 | 321 | it('should handle multiple open/close cycles', async () => { 322 | const dbPath = path.join(__dirname, '../../../.test-dbs/test-cycles.db'); 323 | 324 | for (let i = 0; i < 3; i++) { 325 | const db = new TestDatabase({ mode: 'file', name: 'test-cycles.db' }); 326 | const conn = await db.initialize(); 327 | 328 | // Perform operation 329 | const result = conn.prepare('SELECT ? as cycle').get(i) as { cycle: number }; 330 | expect(result.cycle).toBe(i); 331 | 332 | await db.cleanup(); 333 | } 334 | 335 | // Ensure file is cleaned up 336 | expect(fs.existsSync(dbPath)).toBe(false); 337 | }); 338 | 339 | it('should handle connection timeout simulation', async () => { 340 | testDb = new TestDatabase({ mode: 'file', name: 'test-timeout.db' }); 341 | const db = await testDb.initialize(); 342 | 343 | // Set a busy timeout 344 | db.exec('PRAGMA busy_timeout = 100'); // 100ms timeout 345 | 346 | // Start a transaction to lock the database 347 | db.exec('BEGIN EXCLUSIVE'); 348 | 349 | // Try to access from another connection (should timeout) 350 | const dbPath = path.join(__dirname, '../../../.test-dbs/test-timeout.db'); 351 | const conn2 = new Database(dbPath); 352 | conn2.exec('PRAGMA busy_timeout = 100'); 353 | 354 | try { 355 | expect(() => { 356 | conn2.exec('BEGIN EXCLUSIVE'); 357 | }).toThrow(/database is locked/); 358 | } finally { 359 | db.exec('ROLLBACK'); 360 | conn2.close(); 361 | } 362 | }, { timeout: 5000 }); // Add explicit timeout 363 | }); 364 | 365 | describe('Database Configuration', () => { 366 | it('should apply optimal pragmas for performance', async () => { 367 | testDb = new TestDatabase({ mode: 'file', name: 'test-pragmas.db' }); 368 | const db = await testDb.initialize(); 369 | 370 | // Apply performance pragmas 371 | db.exec('PRAGMA synchronous = NORMAL'); 372 | db.exec('PRAGMA cache_size = -64000'); // 64MB cache 373 | db.exec('PRAGMA temp_store = MEMORY'); 374 | db.exec('PRAGMA mmap_size = 268435456'); // 256MB mmap 375 | 376 | // Verify pragmas 377 | const sync = db.prepare('PRAGMA synchronous').get() as { synchronous: number }; 378 | const cache = db.prepare('PRAGMA cache_size').get() as { cache_size: number }; 379 | const temp = db.prepare('PRAGMA temp_store').get() as { temp_store: number }; 380 | const mmap = db.prepare('PRAGMA mmap_size').get() as { mmap_size: number }; 381 | 382 | expect(sync.synchronous).toBe(1); // NORMAL = 1 383 | expect(cache.cache_size).toBe(-64000); 384 | expect(temp.temp_store).toBe(2); // MEMORY = 2 385 | expect(mmap.mmap_size).toBeGreaterThan(0); 386 | }); 387 | 388 | it('should have foreign key support enabled', async () => { 389 | testDb = new TestDatabase({ mode: 'memory' }); 390 | const db = await testDb.initialize(); 391 | 392 | // Foreign keys should be enabled by default 393 | const fkEnabled = db.prepare('PRAGMA foreign_keys').get() as { foreign_keys: number }; 394 | expect(fkEnabled.foreign_keys).toBe(1); 395 | 396 | // Note: The current schema doesn't define foreign key constraints, 397 | // but the setting is enabled for future use 398 | }); 399 | }); 400 | }); ``` -------------------------------------------------------------------------------- /src/services/execution-processor.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Execution Processor Service 3 | * 4 | * Intelligent processing and filtering of n8n execution data to enable 5 | * AI agents to inspect executions without exceeding token limits. 6 | * 7 | * Features: 8 | * - Preview mode: Show structure and counts without values 9 | * - Summary mode: Smart default with 2 sample items per node 10 | * - Filtered mode: Granular control (node filtering, item limits) 11 | * - Smart recommendations: Guide optimal retrieval strategy 12 | */ 13 | 14 | import { 15 | Execution, 16 | ExecutionMode, 17 | ExecutionPreview, 18 | NodePreview, 19 | ExecutionRecommendation, 20 | ExecutionFilterOptions, 21 | FilteredExecutionResponse, 22 | FilteredNodeData, 23 | ExecutionStatus, 24 | } from '../types/n8n-api'; 25 | import { logger } from '../utils/logger'; 26 | 27 | /** 28 | * Size estimation and threshold constants 29 | */ 30 | const THRESHOLDS = { 31 | CHAR_SIZE_BYTES: 2, // UTF-16 characters 32 | OVERHEAD_PER_OBJECT: 50, // Approximate JSON overhead 33 | MAX_RECOMMENDED_SIZE_KB: 100, // Threshold for "can fetch full" 34 | SMALL_DATASET_ITEMS: 20, // <= this is considered small 35 | MODERATE_DATASET_ITEMS: 50, // <= this is considered moderate 36 | MODERATE_DATASET_SIZE_KB: 200, // <= this is considered moderate 37 | MAX_DEPTH: 3, // Maximum depth for structure extraction 38 | MAX_ITEMS_LIMIT: 1000, // Maximum allowed itemsLimit value 39 | } as const; 40 | 41 | /** 42 | * Helper function to extract error message from various error formats 43 | */ 44 | function extractErrorMessage(error: unknown): string { 45 | if (typeof error === 'string') { 46 | return error; 47 | } 48 | if (error && typeof error === 'object') { 49 | if ('message' in error && typeof error.message === 'string') { 50 | return error.message; 51 | } 52 | if ('error' in error && typeof error.error === 'string') { 53 | return error.error; 54 | } 55 | } 56 | return 'Unknown error'; 57 | } 58 | 59 | /** 60 | * Extract data structure (JSON schema-like) from items 61 | */ 62 | function extractStructure(data: unknown, maxDepth = THRESHOLDS.MAX_DEPTH, currentDepth = 0): Record<string, unknown> | string | unknown[] { 63 | if (currentDepth >= maxDepth) { 64 | return typeof data; 65 | } 66 | 67 | if (data === null || data === undefined) { 68 | return 'null'; 69 | } 70 | 71 | if (Array.isArray(data)) { 72 | if (data.length === 0) { 73 | return []; 74 | } 75 | // Extract structure from first item 76 | return [extractStructure(data[0], maxDepth, currentDepth + 1)]; 77 | } 78 | 79 | if (typeof data === 'object') { 80 | const structure: Record<string, unknown> = {}; 81 | for (const key in data) { 82 | if (Object.prototype.hasOwnProperty.call(data, key)) { 83 | structure[key] = extractStructure((data as Record<string, unknown>)[key], maxDepth, currentDepth + 1); 84 | } 85 | } 86 | return structure; 87 | } 88 | 89 | return typeof data; 90 | } 91 | 92 | /** 93 | * Estimate size of data in KB 94 | */ 95 | function estimateDataSize(data: unknown): number { 96 | try { 97 | const jsonString = JSON.stringify(data); 98 | const sizeBytes = jsonString.length * THRESHOLDS.CHAR_SIZE_BYTES; 99 | return Math.ceil(sizeBytes / 1024); 100 | } catch (error) { 101 | logger.warn('Failed to estimate data size', { error }); 102 | return 0; 103 | } 104 | } 105 | 106 | /** 107 | * Count items in execution data 108 | */ 109 | function countItems(nodeData: unknown): { input: number; output: number } { 110 | const counts = { input: 0, output: 0 }; 111 | 112 | if (!nodeData || !Array.isArray(nodeData)) { 113 | return counts; 114 | } 115 | 116 | for (const run of nodeData) { 117 | if (run?.data?.main) { 118 | const mainData = run.data.main; 119 | if (Array.isArray(mainData)) { 120 | for (const output of mainData) { 121 | if (Array.isArray(output)) { 122 | counts.output += output.length; 123 | } 124 | } 125 | } 126 | } 127 | } 128 | 129 | return counts; 130 | } 131 | 132 | /** 133 | * Generate preview for an execution 134 | */ 135 | export function generatePreview(execution: Execution): { 136 | preview: ExecutionPreview; 137 | recommendation: ExecutionRecommendation; 138 | } { 139 | const preview: ExecutionPreview = { 140 | totalNodes: 0, 141 | executedNodes: 0, 142 | estimatedSizeKB: 0, 143 | nodes: {}, 144 | }; 145 | 146 | if (!execution.data?.resultData?.runData) { 147 | return { 148 | preview, 149 | recommendation: { 150 | canFetchFull: true, 151 | suggestedMode: 'summary', 152 | reason: 'No execution data available', 153 | }, 154 | }; 155 | } 156 | 157 | const runData = execution.data.resultData.runData; 158 | const nodeNames = Object.keys(runData); 159 | preview.totalNodes = nodeNames.length; 160 | 161 | let totalItemsOutput = 0; 162 | let largestNodeItems = 0; 163 | 164 | for (const nodeName of nodeNames) { 165 | const nodeData = runData[nodeName]; 166 | const itemCounts = countItems(nodeData); 167 | 168 | // Extract structure from first run's first output item 169 | let dataStructure: Record<string, unknown> = {}; 170 | if (Array.isArray(nodeData) && nodeData.length > 0) { 171 | const firstRun = nodeData[0]; 172 | const firstItem = firstRun?.data?.main?.[0]?.[0]; 173 | if (firstItem) { 174 | dataStructure = extractStructure(firstItem) as Record<string, unknown>; 175 | } 176 | } 177 | 178 | const nodeSize = estimateDataSize(nodeData); 179 | 180 | const nodePreview: NodePreview = { 181 | status: 'success', 182 | itemCounts, 183 | dataStructure, 184 | estimatedSizeKB: nodeSize, 185 | }; 186 | 187 | // Check for errors 188 | if (Array.isArray(nodeData)) { 189 | for (const run of nodeData) { 190 | if (run.error) { 191 | nodePreview.status = 'error'; 192 | nodePreview.error = extractErrorMessage(run.error); 193 | break; 194 | } 195 | } 196 | } 197 | 198 | preview.nodes[nodeName] = nodePreview; 199 | preview.estimatedSizeKB += nodeSize; 200 | preview.executedNodes++; 201 | totalItemsOutput += itemCounts.output; 202 | largestNodeItems = Math.max(largestNodeItems, itemCounts.output); 203 | } 204 | 205 | // Generate recommendation 206 | const recommendation = generateRecommendation( 207 | preview.estimatedSizeKB, 208 | totalItemsOutput, 209 | largestNodeItems 210 | ); 211 | 212 | return { preview, recommendation }; 213 | } 214 | 215 | /** 216 | * Generate smart recommendation based on data characteristics 217 | */ 218 | function generateRecommendation( 219 | totalSizeKB: number, 220 | totalItems: number, 221 | largestNodeItems: number 222 | ): ExecutionRecommendation { 223 | // Can safely fetch full data 224 | if (totalSizeKB <= THRESHOLDS.MAX_RECOMMENDED_SIZE_KB && totalItems <= THRESHOLDS.SMALL_DATASET_ITEMS) { 225 | return { 226 | canFetchFull: true, 227 | suggestedMode: 'full', 228 | reason: `Small dataset (${totalSizeKB}KB, ${totalItems} items). Safe to fetch full data.`, 229 | }; 230 | } 231 | 232 | // Moderate size - use summary 233 | if (totalSizeKB <= THRESHOLDS.MODERATE_DATASET_SIZE_KB && totalItems <= THRESHOLDS.MODERATE_DATASET_ITEMS) { 234 | return { 235 | canFetchFull: false, 236 | suggestedMode: 'summary', 237 | suggestedItemsLimit: 2, 238 | reason: `Moderate dataset (${totalSizeKB}KB, ${totalItems} items). Summary mode recommended.`, 239 | }; 240 | } 241 | 242 | // Large dataset - filter with limits 243 | const suggestedLimit = Math.max(1, Math.min(5, Math.floor(100 / largestNodeItems))); 244 | 245 | return { 246 | canFetchFull: false, 247 | suggestedMode: 'filtered', 248 | suggestedItemsLimit: suggestedLimit, 249 | reason: `Large dataset (${totalSizeKB}KB, ${totalItems} items). Use filtered mode with itemsLimit: ${suggestedLimit}.`, 250 | }; 251 | } 252 | 253 | /** 254 | * Truncate items array with metadata 255 | */ 256 | function truncateItems( 257 | items: unknown[][], 258 | limit: number 259 | ): { 260 | truncated: unknown[][]; 261 | metadata: { totalItems: number; itemsShown: number; truncated: boolean }; 262 | } { 263 | if (!Array.isArray(items) || items.length === 0) { 264 | return { 265 | truncated: items || [], 266 | metadata: { 267 | totalItems: 0, 268 | itemsShown: 0, 269 | truncated: false, 270 | }, 271 | }; 272 | } 273 | 274 | let totalItems = 0; 275 | for (const output of items) { 276 | if (Array.isArray(output)) { 277 | totalItems += output.length; 278 | } 279 | } 280 | 281 | // Special case: limit = 0 means structure only 282 | if (limit === 0) { 283 | const structureOnly = items.map(output => { 284 | if (!Array.isArray(output) || output.length === 0) { 285 | return []; 286 | } 287 | return [extractStructure(output[0])]; 288 | }); 289 | 290 | return { 291 | truncated: structureOnly, 292 | metadata: { 293 | totalItems, 294 | itemsShown: 0, 295 | truncated: true, 296 | }, 297 | }; 298 | } 299 | 300 | // Limit = -1 means unlimited 301 | if (limit < 0) { 302 | return { 303 | truncated: items, 304 | metadata: { 305 | totalItems, 306 | itemsShown: totalItems, 307 | truncated: false, 308 | }, 309 | }; 310 | } 311 | 312 | // Apply limit 313 | const result: unknown[][] = []; 314 | let itemsShown = 0; 315 | 316 | for (const output of items) { 317 | if (!Array.isArray(output)) { 318 | result.push(output); 319 | continue; 320 | } 321 | 322 | if (itemsShown >= limit) { 323 | break; 324 | } 325 | 326 | const remaining = limit - itemsShown; 327 | const toTake = Math.min(remaining, output.length); 328 | result.push(output.slice(0, toTake)); 329 | itemsShown += toTake; 330 | } 331 | 332 | return { 333 | truncated: result, 334 | metadata: { 335 | totalItems, 336 | itemsShown, 337 | truncated: itemsShown < totalItems, 338 | }, 339 | }; 340 | } 341 | 342 | /** 343 | * Filter execution data based on options 344 | */ 345 | export function filterExecutionData( 346 | execution: Execution, 347 | options: ExecutionFilterOptions 348 | ): FilteredExecutionResponse { 349 | const mode = options.mode || 'summary'; 350 | 351 | // Validate and bound itemsLimit 352 | let itemsLimit = options.itemsLimit !== undefined ? options.itemsLimit : 2; 353 | if (itemsLimit !== -1) { // -1 means unlimited 354 | if (itemsLimit < 0) { 355 | logger.warn('Invalid itemsLimit, defaulting to 2', { provided: itemsLimit }); 356 | itemsLimit = 2; 357 | } 358 | if (itemsLimit > THRESHOLDS.MAX_ITEMS_LIMIT) { 359 | logger.warn(`itemsLimit capped at ${THRESHOLDS.MAX_ITEMS_LIMIT}`, { provided: itemsLimit }); 360 | itemsLimit = THRESHOLDS.MAX_ITEMS_LIMIT; 361 | } 362 | } 363 | 364 | const includeInputData = options.includeInputData || false; 365 | const nodeNamesFilter = options.nodeNames; 366 | 367 | // Calculate duration 368 | const duration = execution.stoppedAt && execution.startedAt 369 | ? new Date(execution.stoppedAt).getTime() - new Date(execution.startedAt).getTime() 370 | : undefined; 371 | 372 | const response: FilteredExecutionResponse = { 373 | id: execution.id, 374 | workflowId: execution.workflowId, 375 | status: execution.status, 376 | mode, 377 | startedAt: execution.startedAt, 378 | stoppedAt: execution.stoppedAt, 379 | duration, 380 | finished: execution.finished, 381 | }; 382 | 383 | // Handle preview mode 384 | if (mode === 'preview') { 385 | const { preview, recommendation } = generatePreview(execution); 386 | response.preview = preview; 387 | response.recommendation = recommendation; 388 | return response; 389 | } 390 | 391 | // Handle no data case 392 | if (!execution.data?.resultData?.runData) { 393 | response.summary = { 394 | totalNodes: 0, 395 | executedNodes: 0, 396 | totalItems: 0, 397 | hasMoreData: false, 398 | }; 399 | response.nodes = {}; 400 | 401 | if (execution.data?.resultData?.error) { 402 | response.error = execution.data.resultData.error; 403 | } 404 | 405 | return response; 406 | } 407 | 408 | const runData = execution.data.resultData.runData; 409 | let nodeNames = Object.keys(runData); 410 | 411 | // Apply node name filter 412 | if (nodeNamesFilter && nodeNamesFilter.length > 0) { 413 | nodeNames = nodeNames.filter(name => nodeNamesFilter.includes(name)); 414 | } 415 | 416 | // Process nodes 417 | const processedNodes: Record<string, FilteredNodeData> = {}; 418 | let totalItems = 0; 419 | let hasMoreData = false; 420 | 421 | for (const nodeName of nodeNames) { 422 | const nodeData = runData[nodeName]; 423 | 424 | if (!Array.isArray(nodeData) || nodeData.length === 0) { 425 | processedNodes[nodeName] = { 426 | itemsInput: 0, 427 | itemsOutput: 0, 428 | status: 'success', 429 | }; 430 | continue; 431 | } 432 | 433 | // Get first run data 434 | const firstRun = nodeData[0]; 435 | const itemCounts = countItems(nodeData); 436 | totalItems += itemCounts.output; 437 | 438 | const nodeResult: FilteredNodeData = { 439 | executionTime: firstRun.executionTime, 440 | itemsInput: itemCounts.input, 441 | itemsOutput: itemCounts.output, 442 | status: 'success', 443 | }; 444 | 445 | // Check for errors 446 | if (firstRun.error) { 447 | nodeResult.status = 'error'; 448 | nodeResult.error = extractErrorMessage(firstRun.error); 449 | } 450 | 451 | // Handle full mode - include all data 452 | if (mode === 'full') { 453 | nodeResult.data = { 454 | output: firstRun.data?.main || [], 455 | metadata: { 456 | totalItems: itemCounts.output, 457 | itemsShown: itemCounts.output, 458 | truncated: false, 459 | }, 460 | }; 461 | 462 | if (includeInputData && firstRun.inputData) { 463 | nodeResult.data.input = firstRun.inputData; 464 | } 465 | } else { 466 | // Summary or filtered mode - apply limits 467 | const outputData = firstRun.data?.main || []; 468 | const { truncated, metadata } = truncateItems(outputData, itemsLimit); 469 | 470 | if (metadata.truncated) { 471 | hasMoreData = true; 472 | } 473 | 474 | nodeResult.data = { 475 | output: truncated, 476 | metadata, 477 | }; 478 | 479 | if (includeInputData && firstRun.inputData) { 480 | nodeResult.data.input = firstRun.inputData; 481 | } 482 | } 483 | 484 | processedNodes[nodeName] = nodeResult; 485 | } 486 | 487 | // Add summary 488 | response.summary = { 489 | totalNodes: Object.keys(runData).length, 490 | executedNodes: nodeNames.length, 491 | totalItems, 492 | hasMoreData, 493 | }; 494 | 495 | response.nodes = processedNodes; 496 | 497 | // Include error if present 498 | if (execution.data?.resultData?.error) { 499 | response.error = execution.data.resultData.error; 500 | } 501 | 502 | return response; 503 | } 504 | 505 | /** 506 | * Process execution based on mode and options 507 | * Main entry point for the service 508 | */ 509 | export function processExecution( 510 | execution: Execution, 511 | options: ExecutionFilterOptions = {} 512 | ): FilteredExecutionResponse | Execution { 513 | // Legacy behavior: if no mode specified and no filtering options, return original 514 | if (!options.mode && !options.nodeNames && options.itemsLimit === undefined) { 515 | return execution; 516 | } 517 | 518 | return filterExecutionData(execution, options); 519 | } 520 | ``` -------------------------------------------------------------------------------- /tests/integration/n8n-api/workflows/validate-workflow.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Integration Tests: handleValidateWorkflow 3 | * 4 | * Tests workflow validation against a real n8n instance. 5 | * Covers validation profiles, validation types, and error detection. 6 | */ 7 | 8 | import { describe, it, expect, beforeEach, afterEach, afterAll } from 'vitest'; 9 | import { createTestContext, TestContext, createTestWorkflowName } from '../utils/test-context'; 10 | import { getTestN8nClient } from '../utils/n8n-client'; 11 | import { N8nApiClient } from '../../../../src/services/n8n-api-client'; 12 | import { SIMPLE_WEBHOOK_WORKFLOW } from '../utils/fixtures'; 13 | import { cleanupOrphanedWorkflows } from '../utils/cleanup-helpers'; 14 | import { createMcpContext } from '../utils/mcp-context'; 15 | import { InstanceContext } from '../../../../src/types/instance-context'; 16 | import { handleValidateWorkflow } from '../../../../src/mcp/handlers-n8n-manager'; 17 | import { getNodeRepository, closeNodeRepository } from '../utils/node-repository'; 18 | import { NodeRepository } from '../../../../src/database/node-repository'; 19 | import { ValidationResponse } from '../types/mcp-responses'; 20 | 21 | describe('Integration: handleValidateWorkflow', () => { 22 | let context: TestContext; 23 | let client: N8nApiClient; 24 | let mcpContext: InstanceContext; 25 | let repository: NodeRepository; 26 | 27 | beforeEach(async () => { 28 | context = createTestContext(); 29 | client = getTestN8nClient(); 30 | mcpContext = createMcpContext(); 31 | repository = await getNodeRepository(); 32 | }); 33 | 34 | afterEach(async () => { 35 | await context.cleanup(); 36 | }); 37 | 38 | afterAll(async () => { 39 | await closeNodeRepository(); 40 | if (!process.env.CI) { 41 | await cleanupOrphanedWorkflows(); 42 | } 43 | }); 44 | 45 | // ====================================================================== 46 | // Valid Workflow - All Profiles 47 | // ====================================================================== 48 | 49 | describe('Valid Workflow', () => { 50 | it('should validate valid workflow with default profile (runtime)', async () => { 51 | // Create valid workflow 52 | const workflow = { 53 | ...SIMPLE_WEBHOOK_WORKFLOW, 54 | name: createTestWorkflowName('Validate - Valid Default'), 55 | tags: ['mcp-integration-test'] 56 | }; 57 | 58 | const created = await client.createWorkflow(workflow); 59 | context.trackWorkflow(created.id!); 60 | 61 | // Validate with default profile 62 | const response = await handleValidateWorkflow( 63 | { id: created.id }, 64 | repository, 65 | mcpContext 66 | ); 67 | 68 | expect(response.success).toBe(true); 69 | const data = response.data as ValidationResponse; 70 | 71 | // Verify response structure 72 | expect(data.valid).toBe(true); 73 | expect(data.errors).toBeUndefined(); // Only present if errors exist 74 | expect(data.summary).toBeDefined(); 75 | expect(data.summary.errorCount).toBe(0); 76 | }); 77 | 78 | it('should validate with strict profile', async () => { 79 | const workflow = { 80 | ...SIMPLE_WEBHOOK_WORKFLOW, 81 | name: createTestWorkflowName('Validate - Valid Strict'), 82 | tags: ['mcp-integration-test'] 83 | }; 84 | 85 | const created = await client.createWorkflow(workflow); 86 | context.trackWorkflow(created.id!); 87 | 88 | const response = await handleValidateWorkflow( 89 | { 90 | id: created.id, 91 | options: { profile: 'strict' } 92 | }, 93 | repository, 94 | mcpContext 95 | ); 96 | 97 | expect(response.success).toBe(true); 98 | const data = response.data as any; 99 | expect(data.valid).toBe(true); 100 | }); 101 | 102 | it('should validate with ai-friendly profile', async () => { 103 | const workflow = { 104 | ...SIMPLE_WEBHOOK_WORKFLOW, 105 | name: createTestWorkflowName('Validate - Valid AI Friendly'), 106 | tags: ['mcp-integration-test'] 107 | }; 108 | 109 | const created = await client.createWorkflow(workflow); 110 | context.trackWorkflow(created.id!); 111 | 112 | const response = await handleValidateWorkflow( 113 | { 114 | id: created.id, 115 | options: { profile: 'ai-friendly' } 116 | }, 117 | repository, 118 | mcpContext 119 | ); 120 | 121 | expect(response.success).toBe(true); 122 | const data = response.data as any; 123 | expect(data.valid).toBe(true); 124 | }); 125 | 126 | it('should validate with minimal profile', async () => { 127 | const workflow = { 128 | ...SIMPLE_WEBHOOK_WORKFLOW, 129 | name: createTestWorkflowName('Validate - Valid Minimal'), 130 | tags: ['mcp-integration-test'] 131 | }; 132 | 133 | const created = await client.createWorkflow(workflow); 134 | context.trackWorkflow(created.id!); 135 | 136 | const response = await handleValidateWorkflow( 137 | { 138 | id: created.id, 139 | options: { profile: 'minimal' } 140 | }, 141 | repository, 142 | mcpContext 143 | ); 144 | 145 | expect(response.success).toBe(true); 146 | const data = response.data as any; 147 | expect(data.valid).toBe(true); 148 | }); 149 | }); 150 | 151 | // ====================================================================== 152 | // Invalid Workflow - Error Detection 153 | // ====================================================================== 154 | 155 | describe('Invalid Workflow Detection', () => { 156 | it('should detect invalid node type', async () => { 157 | // Create workflow with invalid node type 158 | const workflow = { 159 | name: createTestWorkflowName('Validate - Invalid Node Type'), 160 | nodes: [ 161 | { 162 | id: 'invalid-1', 163 | name: 'Invalid Node', 164 | type: 'invalid-node-type', 165 | typeVersion: 1, 166 | position: [250, 300] as [number, number], 167 | parameters: {} 168 | } 169 | ], 170 | connections: {}, 171 | settings: {}, 172 | tags: ['mcp-integration-test'] 173 | }; 174 | 175 | const created = await client.createWorkflow(workflow); 176 | context.trackWorkflow(created.id!); 177 | 178 | const response = await handleValidateWorkflow( 179 | { id: created.id }, 180 | repository, 181 | mcpContext 182 | ); 183 | 184 | expect(response.success).toBe(true); 185 | const data = response.data as any; 186 | 187 | // Should detect error 188 | expect(data.valid).toBe(false); 189 | expect(data.errors).toBeDefined(); 190 | expect(data.errors.length).toBeGreaterThan(0); 191 | expect(data.summary.errorCount).toBeGreaterThan(0); 192 | 193 | // Error should mention invalid node type 194 | const errorMessages = data.errors.map((e: any) => e.message).join(' '); 195 | expect(errorMessages).toMatch(/invalid-node-type|not found|unknown/i); 196 | }); 197 | 198 | it('should detect missing required connections', async () => { 199 | // Create workflow with 2 nodes but no connections 200 | const workflow = { 201 | name: createTestWorkflowName('Validate - Missing Connections'), 202 | nodes: [ 203 | { 204 | id: 'webhook-1', 205 | name: 'Webhook', 206 | type: 'n8n-nodes-base.webhook', 207 | typeVersion: 2, 208 | position: [250, 300] as [number, number], 209 | parameters: { 210 | httpMethod: 'GET', 211 | path: 'test' 212 | } 213 | }, 214 | { 215 | id: 'set-1', 216 | name: 'Set', 217 | type: 'n8n-nodes-base.set', 218 | typeVersion: 3.4, 219 | position: [450, 300] as [number, number], 220 | parameters: { 221 | assignments: { 222 | assignments: [] 223 | } 224 | } 225 | } 226 | ], 227 | connections: {}, // Empty connections - Set node is unreachable 228 | settings: {}, 229 | tags: ['mcp-integration-test'] 230 | }; 231 | 232 | const created = await client.createWorkflow(workflow); 233 | context.trackWorkflow(created.id!); 234 | 235 | const response = await handleValidateWorkflow( 236 | { id: created.id }, 237 | repository, 238 | mcpContext 239 | ); 240 | 241 | expect(response.success).toBe(true); 242 | const data = response.data as any; 243 | 244 | // Multi-node workflow with empty connections should produce warning/error 245 | // (depending on validation profile) 246 | expect(data.valid).toBe(false); 247 | }); 248 | }); 249 | 250 | // ====================================================================== 251 | // Selective Validation 252 | // ====================================================================== 253 | 254 | describe('Selective Validation', () => { 255 | it('should validate nodes only (skip connections)', async () => { 256 | const workflow = { 257 | ...SIMPLE_WEBHOOK_WORKFLOW, 258 | name: createTestWorkflowName('Validate - Nodes Only'), 259 | tags: ['mcp-integration-test'] 260 | }; 261 | 262 | const created = await client.createWorkflow(workflow); 263 | context.trackWorkflow(created.id!); 264 | 265 | const response = await handleValidateWorkflow( 266 | { 267 | id: created.id, 268 | options: { 269 | validateNodes: true, 270 | validateConnections: false, 271 | validateExpressions: false 272 | } 273 | }, 274 | repository, 275 | mcpContext 276 | ); 277 | 278 | expect(response.success).toBe(true); 279 | const data = response.data as any; 280 | expect(data.valid).toBe(true); 281 | }); 282 | 283 | it('should validate connections only (skip nodes)', async () => { 284 | const workflow = { 285 | ...SIMPLE_WEBHOOK_WORKFLOW, 286 | name: createTestWorkflowName('Validate - Connections Only'), 287 | tags: ['mcp-integration-test'] 288 | }; 289 | 290 | const created = await client.createWorkflow(workflow); 291 | context.trackWorkflow(created.id!); 292 | 293 | const response = await handleValidateWorkflow( 294 | { 295 | id: created.id, 296 | options: { 297 | validateNodes: false, 298 | validateConnections: true, 299 | validateExpressions: false 300 | } 301 | }, 302 | repository, 303 | mcpContext 304 | ); 305 | 306 | expect(response.success).toBe(true); 307 | const data = response.data as any; 308 | expect(data.valid).toBe(true); 309 | }); 310 | 311 | it('should validate expressions only', async () => { 312 | const workflow = { 313 | ...SIMPLE_WEBHOOK_WORKFLOW, 314 | name: createTestWorkflowName('Validate - Expressions Only'), 315 | tags: ['mcp-integration-test'] 316 | }; 317 | 318 | const created = await client.createWorkflow(workflow); 319 | context.trackWorkflow(created.id!); 320 | 321 | const response = await handleValidateWorkflow( 322 | { 323 | id: created.id, 324 | options: { 325 | validateNodes: false, 326 | validateConnections: false, 327 | validateExpressions: true 328 | } 329 | }, 330 | repository, 331 | mcpContext 332 | ); 333 | 334 | expect(response.success).toBe(true); 335 | // Expression validation may pass even if workflow has other issues 336 | expect(response.data).toBeDefined(); 337 | }); 338 | }); 339 | 340 | // ====================================================================== 341 | // Error Handling 342 | // ====================================================================== 343 | 344 | describe('Error Handling', () => { 345 | it('should handle non-existent workflow ID', async () => { 346 | const response = await handleValidateWorkflow( 347 | { id: '99999999' }, 348 | repository, 349 | mcpContext 350 | ); 351 | 352 | expect(response.success).toBe(false); 353 | expect(response.error).toBeDefined(); 354 | }); 355 | 356 | it('should handle invalid profile parameter', async () => { 357 | const workflow = { 358 | ...SIMPLE_WEBHOOK_WORKFLOW, 359 | name: createTestWorkflowName('Validate - Invalid Profile'), 360 | tags: ['mcp-integration-test'] 361 | }; 362 | 363 | const created = await client.createWorkflow(workflow); 364 | context.trackWorkflow(created.id!); 365 | 366 | const response = await handleValidateWorkflow( 367 | { 368 | id: created.id, 369 | options: { profile: 'invalid-profile' as any } 370 | }, 371 | repository, 372 | mcpContext 373 | ); 374 | 375 | // Should either fail validation or use default profile 376 | expect(response.success).toBe(false); 377 | }); 378 | }); 379 | 380 | // ====================================================================== 381 | // Response Format Verification 382 | // ====================================================================== 383 | 384 | describe('Response Format', () => { 385 | it('should return complete validation response structure', async () => { 386 | const workflow = { 387 | ...SIMPLE_WEBHOOK_WORKFLOW, 388 | name: createTestWorkflowName('Validate - Response Format'), 389 | tags: ['mcp-integration-test'] 390 | }; 391 | 392 | const created = await client.createWorkflow(workflow); 393 | context.trackWorkflow(created.id!); 394 | 395 | const response = await handleValidateWorkflow( 396 | { id: created.id }, 397 | repository, 398 | mcpContext 399 | ); 400 | 401 | expect(response.success).toBe(true); 402 | const data = response.data as any; 403 | 404 | // Verify required fields 405 | expect(data).toHaveProperty('workflowId'); 406 | expect(data).toHaveProperty('workflowName'); 407 | expect(data).toHaveProperty('valid'); 408 | expect(data).toHaveProperty('summary'); 409 | 410 | // errors and warnings only present if they exist 411 | // For valid workflow, they should be undefined 412 | if (data.errors) { 413 | expect(Array.isArray(data.errors)).toBe(true); 414 | } 415 | if (data.warnings) { 416 | expect(Array.isArray(data.warnings)).toBe(true); 417 | } 418 | 419 | // Verify summary structure 420 | expect(data.summary).toHaveProperty('errorCount'); 421 | expect(data.summary).toHaveProperty('warningCount'); 422 | expect(data.summary).toHaveProperty('totalNodes'); 423 | expect(data.summary).toHaveProperty('enabledNodes'); 424 | expect(data.summary).toHaveProperty('triggerNodes'); 425 | 426 | // Verify types 427 | expect(typeof data.valid).toBe('boolean'); 428 | expect(typeof data.summary.errorCount).toBe('number'); 429 | expect(typeof data.summary.warningCount).toBe('number'); 430 | }); 431 | }); 432 | }); 433 | ``` -------------------------------------------------------------------------------- /tests/comprehensive-extraction-test.js: -------------------------------------------------------------------------------- ```javascript 1 | #!/usr/bin/env node 2 | 3 | /** 4 | * Comprehensive test suite for n8n node extraction functionality 5 | * Tests all aspects of node extraction for database storage 6 | */ 7 | 8 | const fs = require('fs').promises; 9 | const path = require('path'); 10 | const crypto = require('crypto'); 11 | 12 | // Import our components 13 | const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor'); 14 | const { N8NMCPServer } = require('../dist/mcp/server'); 15 | 16 | // Test configuration 17 | const TEST_RESULTS_DIR = path.join(__dirname, 'test-results'); 18 | const EXTRACTED_NODES_FILE = path.join(TEST_RESULTS_DIR, 'extracted-nodes.json'); 19 | const TEST_SUMMARY_FILE = path.join(TEST_RESULTS_DIR, 'test-summary.json'); 20 | 21 | // Create results directory 22 | async function ensureTestDir() { 23 | try { 24 | await fs.mkdir(TEST_RESULTS_DIR, { recursive: true }); 25 | } catch (error) { 26 | console.error('Failed to create test directory:', error); 27 | } 28 | } 29 | 30 | // Test results tracking 31 | const testResults = { 32 | totalTests: 0, 33 | passed: 0, 34 | failed: 0, 35 | startTime: new Date(), 36 | endTime: null, 37 | tests: [], 38 | extractedNodes: [], 39 | databaseSchema: null 40 | }; 41 | 42 | // Helper function to run a test 43 | async function runTest(name, testFn) { 44 | console.log(`\n📋 Running: ${name}`); 45 | testResults.totalTests++; 46 | 47 | const testResult = { 48 | name, 49 | status: 'pending', 50 | startTime: new Date(), 51 | endTime: null, 52 | error: null, 53 | details: {} 54 | }; 55 | 56 | try { 57 | const result = await testFn(); 58 | testResult.status = 'passed'; 59 | testResult.details = result; 60 | testResults.passed++; 61 | console.log(`✅ PASSED: ${name}`); 62 | } catch (error) { 63 | testResult.status = 'failed'; 64 | testResult.error = error.message; 65 | testResults.failed++; 66 | console.error(`❌ FAILED: ${name}`); 67 | console.error(` Error: ${error.message}`); 68 | if (process.env.DEBUG) { 69 | console.error(error.stack); 70 | } 71 | } 72 | 73 | testResult.endTime = new Date(); 74 | testResults.tests.push(testResult); 75 | return testResult; 76 | } 77 | 78 | // Test 1: Basic extraction functionality 79 | async function testBasicExtraction() { 80 | const extractor = new NodeSourceExtractor(); 81 | 82 | // Test a known node 83 | const testNodes = [ 84 | '@n8n/n8n-nodes-langchain.Agent', 85 | 'n8n-nodes-base.Function', 86 | 'n8n-nodes-base.Webhook' 87 | ]; 88 | 89 | const results = []; 90 | 91 | for (const nodeType of testNodes) { 92 | try { 93 | console.log(` - Extracting ${nodeType}...`); 94 | const nodeInfo = await extractor.extractNodeSource(nodeType); 95 | 96 | results.push({ 97 | nodeType, 98 | extracted: true, 99 | codeLength: nodeInfo.sourceCode.length, 100 | hasCredentials: !!nodeInfo.credentialCode, 101 | hasPackageInfo: !!nodeInfo.packageInfo, 102 | location: nodeInfo.location 103 | }); 104 | 105 | console.log(` ✓ Extracted: ${nodeInfo.sourceCode.length} bytes`); 106 | } catch (error) { 107 | results.push({ 108 | nodeType, 109 | extracted: false, 110 | error: error.message 111 | }); 112 | console.log(` ✗ Failed: ${error.message}`); 113 | } 114 | } 115 | 116 | // At least one should succeed 117 | const successCount = results.filter(r => r.extracted).length; 118 | if (successCount === 0) { 119 | throw new Error('No nodes could be extracted'); 120 | } 121 | 122 | return { results, successCount, totalTested: testNodes.length }; 123 | } 124 | 125 | // Test 2: List available nodes 126 | async function testListAvailableNodes() { 127 | const extractor = new NodeSourceExtractor(); 128 | 129 | console.log(' - Listing all available nodes...'); 130 | const nodes = await extractor.listAvailableNodes(); 131 | 132 | console.log(` - Found ${nodes.length} nodes`); 133 | 134 | // Group by package 135 | const nodesByPackage = {}; 136 | nodes.forEach(node => { 137 | const pkg = node.packageName || 'unknown'; 138 | if (!nodesByPackage[pkg]) { 139 | nodesByPackage[pkg] = []; 140 | } 141 | nodesByPackage[pkg].push(node.name); 142 | }); 143 | 144 | // Show summary 145 | console.log(' - Node distribution by package:'); 146 | Object.entries(nodesByPackage).forEach(([pkg, nodeList]) => { 147 | console.log(` ${pkg}: ${nodeList.length} nodes`); 148 | }); 149 | 150 | if (nodes.length === 0) { 151 | throw new Error('No nodes found'); 152 | } 153 | 154 | return { 155 | totalNodes: nodes.length, 156 | packages: Object.keys(nodesByPackage), 157 | nodesByPackage, 158 | sampleNodes: nodes.slice(0, 5) 159 | }; 160 | } 161 | 162 | // Test 3: Bulk extraction simulation 163 | async function testBulkExtraction() { 164 | const extractor = new NodeSourceExtractor(); 165 | 166 | // First get list of nodes 167 | const allNodes = await extractor.listAvailableNodes(); 168 | 169 | // Limit to a reasonable number for testing 170 | const nodesToExtract = allNodes.slice(0, 10); 171 | console.log(` - Testing bulk extraction of ${nodesToExtract.length} nodes...`); 172 | 173 | const extractionResults = []; 174 | const startTime = Date.now(); 175 | 176 | for (const node of nodesToExtract) { 177 | const nodeType = node.packageName ? `${node.packageName}.${node.name}` : node.name; 178 | 179 | try { 180 | const nodeInfo = await extractor.extractNodeSource(nodeType); 181 | 182 | // Calculate hash for deduplication 183 | const codeHash = crypto.createHash('sha256').update(nodeInfo.sourceCode).digest('hex'); 184 | 185 | const extractedData = { 186 | nodeType, 187 | name: node.name, 188 | packageName: node.packageName, 189 | codeLength: nodeInfo.sourceCode.length, 190 | codeHash, 191 | hasCredentials: !!nodeInfo.credentialCode, 192 | hasPackageInfo: !!nodeInfo.packageInfo, 193 | location: nodeInfo.location, 194 | extractedAt: new Date().toISOString() 195 | }; 196 | 197 | extractionResults.push({ 198 | success: true, 199 | data: extractedData 200 | }); 201 | 202 | // Store for database simulation 203 | testResults.extractedNodes.push({ 204 | ...extractedData, 205 | sourceCode: nodeInfo.sourceCode, 206 | credentialCode: nodeInfo.credentialCode, 207 | packageInfo: nodeInfo.packageInfo 208 | }); 209 | 210 | } catch (error) { 211 | extractionResults.push({ 212 | success: false, 213 | nodeType, 214 | error: error.message 215 | }); 216 | } 217 | } 218 | 219 | const endTime = Date.now(); 220 | const successCount = extractionResults.filter(r => r.success).length; 221 | 222 | console.log(` - Extraction completed in ${endTime - startTime}ms`); 223 | console.log(` - Success rate: ${successCount}/${nodesToExtract.length} (${(successCount/nodesToExtract.length*100).toFixed(1)}%)`); 224 | 225 | return { 226 | totalAttempted: nodesToExtract.length, 227 | successCount, 228 | failureCount: nodesToExtract.length - successCount, 229 | timeElapsed: endTime - startTime, 230 | results: extractionResults 231 | }; 232 | } 233 | 234 | // Test 4: Database schema simulation 235 | async function testDatabaseSchema() { 236 | console.log(' - Simulating database schema for extracted nodes...'); 237 | 238 | // Define a schema that would work for storing extracted nodes 239 | const schema = { 240 | tables: { 241 | nodes: { 242 | columns: { 243 | id: 'UUID PRIMARY KEY', 244 | node_type: 'VARCHAR(255) UNIQUE NOT NULL', 245 | name: 'VARCHAR(255) NOT NULL', 246 | package_name: 'VARCHAR(255)', 247 | display_name: 'VARCHAR(255)', 248 | description: 'TEXT', 249 | version: 'VARCHAR(50)', 250 | code_hash: 'VARCHAR(64) NOT NULL', 251 | code_length: 'INTEGER NOT NULL', 252 | source_location: 'TEXT', 253 | extracted_at: 'TIMESTAMP NOT NULL', 254 | updated_at: 'TIMESTAMP' 255 | }, 256 | indexes: ['node_type', 'package_name', 'code_hash'] 257 | }, 258 | node_source_code: { 259 | columns: { 260 | id: 'UUID PRIMARY KEY', 261 | node_id: 'UUID REFERENCES nodes(id)', 262 | source_code: 'TEXT NOT NULL', 263 | compiled_code: 'TEXT', 264 | source_map: 'TEXT' 265 | } 266 | }, 267 | node_credentials: { 268 | columns: { 269 | id: 'UUID PRIMARY KEY', 270 | node_id: 'UUID REFERENCES nodes(id)', 271 | credential_type: 'VARCHAR(255) NOT NULL', 272 | credential_code: 'TEXT NOT NULL', 273 | required_fields: 'JSONB' 274 | } 275 | }, 276 | node_metadata: { 277 | columns: { 278 | id: 'UUID PRIMARY KEY', 279 | node_id: 'UUID REFERENCES nodes(id)', 280 | package_info: 'JSONB', 281 | dependencies: 'JSONB', 282 | icon: 'TEXT', 283 | categories: 'TEXT[]', 284 | documentation_url: 'TEXT' 285 | } 286 | } 287 | } 288 | }; 289 | 290 | // Validate that our extracted data fits the schema 291 | const sampleNode = testResults.extractedNodes[0]; 292 | if (sampleNode) { 293 | console.log(' - Validating extracted data against schema...'); 294 | 295 | // Simulate database record 296 | const dbRecord = { 297 | nodes: { 298 | id: crypto.randomUUID(), 299 | node_type: sampleNode.nodeType, 300 | name: sampleNode.name, 301 | package_name: sampleNode.packageName, 302 | code_hash: sampleNode.codeHash, 303 | code_length: sampleNode.codeLength, 304 | source_location: sampleNode.location, 305 | extracted_at: new Date() 306 | }, 307 | node_source_code: { 308 | source_code: sampleNode.sourceCode 309 | }, 310 | node_credentials: sampleNode.credentialCode ? { 311 | credential_code: sampleNode.credentialCode 312 | } : null, 313 | node_metadata: { 314 | package_info: sampleNode.packageInfo 315 | } 316 | }; 317 | 318 | console.log(' - Sample database record created successfully'); 319 | } 320 | 321 | testResults.databaseSchema = schema; 322 | 323 | return { 324 | schemaValid: true, 325 | tablesCount: Object.keys(schema.tables).length, 326 | estimatedStoragePerNode: sampleNode ? sampleNode.codeLength + 1024 : 0 // code + metadata overhead 327 | }; 328 | } 329 | 330 | // Test 5: Error handling 331 | async function testErrorHandling() { 332 | const extractor = new NodeSourceExtractor(); 333 | 334 | const errorTests = [ 335 | { 336 | name: 'Non-existent node', 337 | nodeType: 'non-existent-package.FakeNode', 338 | expectedError: 'not found' 339 | }, 340 | { 341 | name: 'Invalid node type format', 342 | nodeType: '', 343 | expectedError: 'invalid' 344 | }, 345 | { 346 | name: 'Malformed package name', 347 | nodeType: '@[email protected]', 348 | expectedError: 'not found' 349 | } 350 | ]; 351 | 352 | const results = []; 353 | 354 | for (const test of errorTests) { 355 | try { 356 | console.log(` - Testing: ${test.name}`); 357 | await extractor.extractNodeSource(test.nodeType); 358 | results.push({ 359 | ...test, 360 | passed: false, 361 | error: 'Expected error but extraction succeeded' 362 | }); 363 | } catch (error) { 364 | const passed = error.message.toLowerCase().includes(test.expectedError); 365 | results.push({ 366 | ...test, 367 | passed, 368 | actualError: error.message 369 | }); 370 | console.log(` ${passed ? '✓' : '✗'} Got expected error type`); 371 | } 372 | } 373 | 374 | const passedCount = results.filter(r => r.passed).length; 375 | return { 376 | totalTests: errorTests.length, 377 | passed: passedCount, 378 | results 379 | }; 380 | } 381 | 382 | // Test 6: MCP server integration 383 | async function testMCPServerIntegration() { 384 | console.log(' - Testing MCP server tool handlers...'); 385 | 386 | const config = { 387 | port: 3000, 388 | host: '0.0.0.0', 389 | authToken: 'test-token' 390 | }; 391 | 392 | const n8nConfig = { 393 | apiUrl: 'http://localhost:5678', 394 | apiKey: 'test-key' 395 | }; 396 | 397 | // Note: We can't fully test the server without running it, 398 | // but we can verify the handlers are set up correctly 399 | const server = new N8NMCPServer(config, n8nConfig); 400 | 401 | // Verify the server instance is created 402 | if (!server) { 403 | throw new Error('Failed to create MCP server instance'); 404 | } 405 | 406 | console.log(' - MCP server instance created successfully'); 407 | 408 | return { 409 | serverCreated: true, 410 | config 411 | }; 412 | } 413 | 414 | // Main test runner 415 | async function runAllTests() { 416 | console.log('=== Comprehensive n8n Node Extraction Test Suite ===\n'); 417 | console.log('This test suite validates the extraction of n8n nodes for database storage.\n'); 418 | 419 | await ensureTestDir(); 420 | 421 | // Update todo status 422 | console.log('Starting test execution...\n'); 423 | 424 | // Run all tests 425 | await runTest('Basic Node Extraction', testBasicExtraction); 426 | await runTest('List Available Nodes', testListAvailableNodes); 427 | await runTest('Bulk Node Extraction', testBulkExtraction); 428 | await runTest('Database Schema Validation', testDatabaseSchema); 429 | await runTest('Error Handling', testErrorHandling); 430 | await runTest('MCP Server Integration', testMCPServerIntegration); 431 | 432 | // Calculate final results 433 | testResults.endTime = new Date(); 434 | const duration = (testResults.endTime - testResults.startTime) / 1000; 435 | 436 | // Save extracted nodes data 437 | if (testResults.extractedNodes.length > 0) { 438 | await fs.writeFile( 439 | EXTRACTED_NODES_FILE, 440 | JSON.stringify(testResults.extractedNodes, null, 2) 441 | ); 442 | console.log(`\n📁 Extracted nodes saved to: ${EXTRACTED_NODES_FILE}`); 443 | } 444 | 445 | // Save test summary 446 | const summary = { 447 | ...testResults, 448 | extractedNodes: testResults.extractedNodes.length // Just count, not full data 449 | }; 450 | await fs.writeFile( 451 | TEST_SUMMARY_FILE, 452 | JSON.stringify(summary, null, 2) 453 | ); 454 | 455 | // Print summary 456 | console.log('\n' + '='.repeat(60)); 457 | console.log('TEST SUMMARY'); 458 | console.log('='.repeat(60)); 459 | console.log(`Total Tests: ${testResults.totalTests}`); 460 | console.log(`Passed: ${testResults.passed} ✅`); 461 | console.log(`Failed: ${testResults.failed} ❌`); 462 | console.log(`Duration: ${duration.toFixed(2)}s`); 463 | console.log(`Nodes Extracted: ${testResults.extractedNodes.length}`); 464 | 465 | if (testResults.databaseSchema) { 466 | console.log('\nDatabase Schema:'); 467 | console.log(`- Tables: ${Object.keys(testResults.databaseSchema.tables).join(', ')}`); 468 | console.log(`- Ready for bulk storage: YES`); 469 | } 470 | 471 | console.log('\n' + '='.repeat(60)); 472 | 473 | // Exit with appropriate code 474 | process.exit(testResults.failed > 0 ? 1 : 0); 475 | } 476 | 477 | // Handle errors 478 | process.on('unhandledRejection', (error) => { 479 | console.error('\n💥 Unhandled error:', error); 480 | process.exit(1); 481 | }); 482 | 483 | // Run tests 484 | runAllTests(); ``` -------------------------------------------------------------------------------- /tests/unit/utils/database-utils.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest'; 2 | import * as fs from 'fs'; 3 | import * as path from 'path'; 4 | import { 5 | createTestDatabase, 6 | seedTestNodes, 7 | seedTestTemplates, 8 | createTestNode, 9 | createTestTemplate, 10 | resetDatabase, 11 | createDatabaseSnapshot, 12 | restoreDatabaseSnapshot, 13 | loadFixtures, 14 | dbHelpers, 15 | createMockDatabaseAdapter, 16 | withTransaction, 17 | measureDatabaseOperation, 18 | TestDatabase 19 | } from '../../utils/database-utils'; 20 | 21 | describe('Database Utils', () => { 22 | let testDb: TestDatabase; 23 | 24 | afterEach(async () => { 25 | if (testDb) { 26 | await testDb.cleanup(); 27 | } 28 | }); 29 | 30 | describe('createTestDatabase', () => { 31 | it('should create an in-memory database by default', async () => { 32 | testDb = await createTestDatabase(); 33 | 34 | expect(testDb.adapter).toBeDefined(); 35 | expect(testDb.nodeRepository).toBeDefined(); 36 | expect(testDb.templateRepository).toBeDefined(); 37 | expect(testDb.path).toBe(':memory:'); 38 | }); 39 | 40 | it('should create a file-based database when requested', async () => { 41 | const dbPath = path.join(__dirname, '../../temp/test-file.db'); 42 | testDb = await createTestDatabase({ inMemory: false, dbPath }); 43 | 44 | expect(testDb.path).toBe(dbPath); 45 | expect(fs.existsSync(dbPath)).toBe(true); 46 | }); 47 | 48 | it('should initialize schema when requested', async () => { 49 | testDb = await createTestDatabase({ initSchema: true }); 50 | 51 | // Verify tables exist 52 | const tables = testDb.adapter 53 | .prepare("SELECT name FROM sqlite_master WHERE type='table'") 54 | .all() as { name: string }[]; 55 | 56 | const tableNames = tables.map(t => t.name); 57 | expect(tableNames).toContain('nodes'); 58 | expect(tableNames).toContain('templates'); 59 | }); 60 | 61 | it('should skip schema initialization when requested', async () => { 62 | testDb = await createTestDatabase({ initSchema: false }); 63 | 64 | // Verify tables don't exist (SQLite has internal tables, so check for our specific tables) 65 | const tables = testDb.adapter 66 | .prepare("SELECT name FROM sqlite_master WHERE type='table' AND name IN ('nodes', 'templates')") 67 | .all() as { name: string }[]; 68 | 69 | expect(tables.length).toBe(0); 70 | }); 71 | }); 72 | 73 | describe('seedTestNodes', () => { 74 | beforeEach(async () => { 75 | testDb = await createTestDatabase(); 76 | }); 77 | 78 | it('should seed default test nodes', async () => { 79 | const nodes = await seedTestNodes(testDb.nodeRepository); 80 | 81 | expect(nodes).toHaveLength(3); 82 | expect(nodes[0].nodeType).toBe('nodes-base.httpRequest'); 83 | expect(nodes[1].nodeType).toBe('nodes-base.webhook'); 84 | expect(nodes[2].nodeType).toBe('nodes-base.slack'); 85 | }); 86 | 87 | it('should seed custom nodes along with defaults', async () => { 88 | const customNodes = [ 89 | { nodeType: 'nodes-base.custom1', displayName: 'Custom 1' }, 90 | { nodeType: 'nodes-base.custom2', displayName: 'Custom 2' } 91 | ]; 92 | 93 | const nodes = await seedTestNodes(testDb.nodeRepository, customNodes); 94 | 95 | expect(nodes).toHaveLength(5); // 3 default + 2 custom 96 | expect(nodes[3].nodeType).toBe('nodes-base.custom1'); 97 | expect(nodes[4].nodeType).toBe('nodes-base.custom2'); 98 | }); 99 | 100 | it('should save nodes to database', async () => { 101 | await seedTestNodes(testDb.nodeRepository); 102 | 103 | const count = dbHelpers.countRows(testDb.adapter, 'nodes'); 104 | expect(count).toBe(3); 105 | 106 | const httpNode = testDb.nodeRepository.getNode('nodes-base.httpRequest'); 107 | expect(httpNode).toBeDefined(); 108 | expect(httpNode.displayName).toBe('HTTP Request'); 109 | }); 110 | }); 111 | 112 | describe('seedTestTemplates', () => { 113 | beforeEach(async () => { 114 | testDb = await createTestDatabase(); 115 | }); 116 | 117 | it('should seed default test templates', async () => { 118 | const templates = await seedTestTemplates(testDb.templateRepository); 119 | 120 | expect(templates).toHaveLength(2); 121 | expect(templates[0].name).toBe('Simple HTTP Workflow'); 122 | expect(templates[1].name).toBe('Webhook to Slack'); 123 | }); 124 | 125 | it('should seed custom templates', async () => { 126 | const customTemplates = [ 127 | { id: 100, name: 'Custom Template' } 128 | ]; 129 | 130 | const templates = await seedTestTemplates(testDb.templateRepository, customTemplates); 131 | 132 | expect(templates).toHaveLength(3); 133 | expect(templates[2].id).toBe(100); 134 | expect(templates[2].name).toBe('Custom Template'); 135 | }); 136 | }); 137 | 138 | describe('createTestNode', () => { 139 | it('should create a node with defaults', () => { 140 | const node = createTestNode(); 141 | 142 | expect(node.nodeType).toBe('nodes-base.test'); 143 | expect(node.displayName).toBe('Test Node'); 144 | expect(node.style).toBe('programmatic'); 145 | expect(node.isAITool).toBe(false); 146 | }); 147 | 148 | it('should override defaults', () => { 149 | const node = createTestNode({ 150 | nodeType: 'nodes-base.custom', 151 | displayName: 'Custom Node', 152 | isAITool: true 153 | }); 154 | 155 | expect(node.nodeType).toBe('nodes-base.custom'); 156 | expect(node.displayName).toBe('Custom Node'); 157 | expect(node.isAITool).toBe(true); 158 | }); 159 | }); 160 | 161 | describe('resetDatabase', () => { 162 | beforeEach(async () => { 163 | testDb = await createTestDatabase(); 164 | }); 165 | 166 | it('should clear all data and reinitialize schema', async () => { 167 | // Add some data 168 | await seedTestNodes(testDb.nodeRepository); 169 | await seedTestTemplates(testDb.templateRepository); 170 | 171 | // Verify data exists 172 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(3); 173 | expect(dbHelpers.countRows(testDb.adapter, 'templates')).toBe(2); 174 | 175 | // Reset database 176 | await resetDatabase(testDb.adapter); 177 | 178 | // Verify data is cleared 179 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(0); 180 | expect(dbHelpers.countRows(testDb.adapter, 'templates')).toBe(0); 181 | 182 | // Verify tables still exist 183 | const tables = testDb.adapter 184 | .prepare("SELECT name FROM sqlite_master WHERE type='table'") 185 | .all() as { name: string }[]; 186 | 187 | const tableNames = tables.map(t => t.name); 188 | expect(tableNames).toContain('nodes'); 189 | expect(tableNames).toContain('templates'); 190 | }); 191 | }); 192 | 193 | describe('Database Snapshots', () => { 194 | beforeEach(async () => { 195 | testDb = await createTestDatabase(); 196 | }); 197 | 198 | it('should create and restore database snapshot', async () => { 199 | // Seed initial data 200 | await seedTestNodes(testDb.nodeRepository); 201 | await seedTestTemplates(testDb.templateRepository); 202 | 203 | // Create snapshot 204 | const snapshot = await createDatabaseSnapshot(testDb.adapter); 205 | 206 | expect(snapshot.metadata.nodeCount).toBe(3); 207 | expect(snapshot.metadata.templateCount).toBe(2); 208 | expect(snapshot.nodes).toHaveLength(3); 209 | expect(snapshot.templates).toHaveLength(2); 210 | 211 | // Clear database 212 | await resetDatabase(testDb.adapter); 213 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(0); 214 | 215 | // Restore from snapshot 216 | await restoreDatabaseSnapshot(testDb.adapter, snapshot); 217 | 218 | // Verify data is restored 219 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(3); 220 | expect(dbHelpers.countRows(testDb.adapter, 'templates')).toBe(2); 221 | 222 | const httpNode = testDb.nodeRepository.getNode('nodes-base.httpRequest'); 223 | expect(httpNode).toBeDefined(); 224 | expect(httpNode.displayName).toBe('HTTP Request'); 225 | }); 226 | }); 227 | 228 | describe('loadFixtures', () => { 229 | beforeEach(async () => { 230 | testDb = await createTestDatabase(); 231 | }); 232 | 233 | it('should load fixtures from JSON file', async () => { 234 | // Create a temporary fixture file 235 | const fixturePath = path.join(__dirname, '../../temp/test-fixtures.json'); 236 | const fixtures = { 237 | nodes: [ 238 | createTestNode({ nodeType: 'nodes-base.fixture1' }), 239 | createTestNode({ nodeType: 'nodes-base.fixture2' }) 240 | ], 241 | templates: [ 242 | createTestTemplate({ id: 1000, name: 'Fixture Template' }) 243 | ] 244 | }; 245 | 246 | // Ensure directory exists 247 | const dir = path.dirname(fixturePath); 248 | if (!fs.existsSync(dir)) { 249 | fs.mkdirSync(dir, { recursive: true }); 250 | } 251 | 252 | fs.writeFileSync(fixturePath, JSON.stringify(fixtures, null, 2)); 253 | 254 | // Load fixtures 255 | await loadFixtures(testDb.adapter, fixturePath); 256 | 257 | // Verify data was loaded 258 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(2); 259 | expect(dbHelpers.countRows(testDb.adapter, 'templates')).toBe(1); 260 | 261 | expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.fixture1')).toBe(true); 262 | expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.fixture2')).toBe(true); 263 | 264 | // Cleanup 265 | fs.unlinkSync(fixturePath); 266 | }); 267 | }); 268 | 269 | describe('dbHelpers', () => { 270 | beforeEach(async () => { 271 | testDb = await createTestDatabase(); 272 | await seedTestNodes(testDb.nodeRepository); 273 | }); 274 | 275 | it('should count rows correctly', () => { 276 | const count = dbHelpers.countRows(testDb.adapter, 'nodes'); 277 | expect(count).toBe(3); 278 | }); 279 | 280 | it('should check if node exists', () => { 281 | expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.httpRequest')).toBe(true); 282 | expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.nonexistent')).toBe(false); 283 | }); 284 | 285 | it('should get all node types', () => { 286 | const nodeTypes = dbHelpers.getAllNodeTypes(testDb.adapter); 287 | expect(nodeTypes).toHaveLength(3); 288 | expect(nodeTypes).toContain('nodes-base.httpRequest'); 289 | expect(nodeTypes).toContain('nodes-base.webhook'); 290 | expect(nodeTypes).toContain('nodes-base.slack'); 291 | }); 292 | 293 | it('should clear table', () => { 294 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(3); 295 | 296 | dbHelpers.clearTable(testDb.adapter, 'nodes'); 297 | 298 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(0); 299 | }); 300 | }); 301 | 302 | describe('createMockDatabaseAdapter', () => { 303 | it('should create a mock adapter with all required methods', () => { 304 | const mockAdapter = createMockDatabaseAdapter(); 305 | 306 | expect(mockAdapter.prepare).toBeDefined(); 307 | expect(mockAdapter.exec).toBeDefined(); 308 | expect(mockAdapter.close).toBeDefined(); 309 | expect(mockAdapter.pragma).toBeDefined(); 310 | expect(mockAdapter.transaction).toBeDefined(); 311 | expect(mockAdapter.checkFTS5Support).toBeDefined(); 312 | 313 | // Test that methods are mocked 314 | expect(vi.isMockFunction(mockAdapter.prepare)).toBe(true); 315 | expect(vi.isMockFunction(mockAdapter.exec)).toBe(true); 316 | }); 317 | }); 318 | 319 | describe('withTransaction', () => { 320 | beforeEach(async () => { 321 | testDb = await createTestDatabase(); 322 | }); 323 | 324 | it('should rollback transaction for testing', async () => { 325 | // Insert a node 326 | await seedTestNodes(testDb.nodeRepository, [ 327 | { nodeType: 'nodes-base.transaction-test' } 328 | ]); 329 | 330 | const initialCount = dbHelpers.countRows(testDb.adapter, 'nodes'); 331 | 332 | // Try to insert in a transaction that will rollback 333 | const result = await withTransaction(testDb.adapter, async () => { 334 | testDb.nodeRepository.saveNode(createTestNode({ 335 | nodeType: 'nodes-base.should-rollback' 336 | })); 337 | 338 | // Verify it was inserted within transaction 339 | const midCount = dbHelpers.countRows(testDb.adapter, 'nodes'); 340 | expect(midCount).toBe(initialCount + 1); 341 | 342 | return 'test-result'; 343 | }); 344 | 345 | // Transaction should have rolled back 346 | expect(result).toBeNull(); 347 | const finalCount = dbHelpers.countRows(testDb.adapter, 'nodes'); 348 | expect(finalCount).toBe(initialCount); 349 | }); 350 | }); 351 | 352 | describe('measureDatabaseOperation', () => { 353 | beforeEach(async () => { 354 | testDb = await createTestDatabase(); 355 | }); 356 | 357 | it('should measure operation duration', async () => { 358 | const duration = await measureDatabaseOperation('test operation', async () => { 359 | await seedTestNodes(testDb.nodeRepository); 360 | // Add a small delay to ensure measurable time passes 361 | await new Promise(resolve => setTimeout(resolve, 1)); 362 | }); 363 | 364 | expect(duration).toBeGreaterThanOrEqual(0); 365 | expect(duration).toBeLessThan(1000); // Should be fast 366 | }); 367 | }); 368 | 369 | describe('Integration Tests', () => { 370 | it('should handle complex database operations', async () => { 371 | testDb = await createTestDatabase({ enableFTS5: true }); 372 | 373 | // Seed initial data 374 | const nodes = await seedTestNodes(testDb.nodeRepository); 375 | const templates = await seedTestTemplates(testDb.templateRepository); 376 | 377 | // Create snapshot 378 | const snapshot = await createDatabaseSnapshot(testDb.adapter); 379 | 380 | // Add more data 381 | await seedTestNodes(testDb.nodeRepository, [ 382 | { nodeType: 'nodes-base.extra1' }, 383 | { nodeType: 'nodes-base.extra2' } 384 | ]); 385 | 386 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(5); 387 | 388 | // Restore snapshot 389 | await restoreDatabaseSnapshot(testDb.adapter, snapshot); 390 | 391 | // Should be back to original state 392 | expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(3); 393 | 394 | // Test FTS5 if supported 395 | if (testDb.adapter.checkFTS5Support()) { 396 | // FTS5 operations would go here 397 | expect(true).toBe(true); 398 | } 399 | }); 400 | }); 401 | }); ``` -------------------------------------------------------------------------------- /tests/unit/services/config-validator-security.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, vi, beforeEach } from 'vitest'; 2 | import { ConfigValidator } from '@/services/config-validator'; 3 | import type { ValidationResult, ValidationError, ValidationWarning } from '@/services/config-validator'; 4 | 5 | // Mock the database 6 | vi.mock('better-sqlite3'); 7 | 8 | describe('ConfigValidator - Security Validation', () => { 9 | beforeEach(() => { 10 | vi.clearAllMocks(); 11 | }); 12 | 13 | describe('Credential security', () => { 14 | it('should perform security checks for hardcoded credentials', () => { 15 | const nodeType = 'nodes-base.test'; 16 | const config = { 17 | api_key: 'sk-1234567890abcdef', 18 | password: 'my-secret-password', 19 | token: 'hardcoded-token' 20 | }; 21 | const properties = [ 22 | { name: 'api_key', type: 'string' }, 23 | { name: 'password', type: 'string' }, 24 | { name: 'token', type: 'string' } 25 | ]; 26 | 27 | const result = ConfigValidator.validate(nodeType, config, properties); 28 | 29 | expect(result.warnings.filter(w => w.type === 'security')).toHaveLength(3); 30 | expect(result.warnings.some(w => w.property === 'api_key')).toBe(true); 31 | expect(result.warnings.some(w => w.property === 'password')).toBe(true); 32 | expect(result.warnings.some(w => w.property === 'token')).toBe(true); 33 | }); 34 | 35 | it('should validate HTTP Request with authentication in API URLs', () => { 36 | const nodeType = 'nodes-base.httpRequest'; 37 | const config = { 38 | method: 'GET', 39 | url: 'https://api.github.com/user/repos', 40 | authentication: 'none' 41 | }; 42 | const properties = [ 43 | { name: 'method', type: 'options' }, 44 | { name: 'url', type: 'string' }, 45 | { name: 'authentication', type: 'options' } 46 | ]; 47 | 48 | const result = ConfigValidator.validate(nodeType, config, properties); 49 | 50 | expect(result.warnings.some(w => 51 | w.type === 'security' && 52 | w.message.includes('API endpoints typically require authentication') 53 | )).toBe(true); 54 | }); 55 | }); 56 | 57 | describe('Code execution security', () => { 58 | it('should warn about security issues with eval/exec', () => { 59 | const nodeType = 'nodes-base.code'; 60 | const config = { 61 | language: 'javascript', 62 | jsCode: ` 63 | const userInput = items[0].json.code; 64 | const result = eval(userInput); 65 | return [{json: {result}}]; 66 | ` 67 | }; 68 | const properties = [ 69 | { name: 'language', type: 'options' }, 70 | { name: 'jsCode', type: 'string' } 71 | ]; 72 | 73 | const result = ConfigValidator.validate(nodeType, config, properties); 74 | 75 | expect(result.warnings.some(w => 76 | w.type === 'security' && 77 | w.message.includes('eval/exec which can be a security risk') 78 | )).toBe(true); 79 | }); 80 | 81 | it('should detect infinite loops', () => { 82 | const nodeType = 'nodes-base.code'; 83 | const config = { 84 | language: 'javascript', 85 | jsCode: ` 86 | while (true) { 87 | console.log('infinite loop'); 88 | } 89 | return items; 90 | ` 91 | }; 92 | const properties = [ 93 | { name: 'language', type: 'options' }, 94 | { name: 'jsCode', type: 'string' } 95 | ]; 96 | 97 | const result = ConfigValidator.validate(nodeType, config, properties); 98 | 99 | expect(result.warnings.some(w => 100 | w.type === 'security' && 101 | w.message.includes('Infinite loop detected') 102 | )).toBe(true); 103 | }); 104 | }); 105 | 106 | describe('Database security', () => { 107 | it('should validate database query security', () => { 108 | const nodeType = 'nodes-base.postgres'; 109 | const config = { 110 | query: 'DELETE FROM users;' // Missing WHERE clause 111 | }; 112 | const properties = [ 113 | { name: 'query', type: 'string' } 114 | ]; 115 | 116 | const result = ConfigValidator.validate(nodeType, config, properties); 117 | 118 | expect(result.warnings.some(w => 119 | w.type === 'security' && 120 | w.message.includes('DELETE query without WHERE clause') 121 | )).toBe(true); 122 | }); 123 | 124 | it('should check for SQL injection vulnerabilities', () => { 125 | const nodeType = 'nodes-base.mysql'; 126 | const config = { 127 | query: 'SELECT * FROM users WHERE id = ${userId}' 128 | }; 129 | const properties = [ 130 | { name: 'query', type: 'string' } 131 | ]; 132 | 133 | const result = ConfigValidator.validate(nodeType, config, properties); 134 | 135 | expect(result.warnings.some(w => 136 | w.type === 'security' && 137 | w.message.includes('SQL injection') 138 | )).toBe(true); 139 | }); 140 | 141 | // DROP TABLE warning not implemented in current validator 142 | it.skip('should warn about DROP TABLE operations', () => { 143 | const nodeType = 'nodes-base.postgres'; 144 | const config = { 145 | query: 'DROP TABLE IF EXISTS user_sessions;' 146 | }; 147 | const properties = [ 148 | { name: 'query', type: 'string' } 149 | ]; 150 | 151 | const result = ConfigValidator.validate(nodeType, config, properties); 152 | 153 | expect(result.warnings.some(w => 154 | w.type === 'security' && 155 | w.message.includes('DROP TABLE is a destructive operation') 156 | )).toBe(true); 157 | }); 158 | 159 | // TRUNCATE warning not implemented in current validator 160 | it.skip('should warn about TRUNCATE operations', () => { 161 | const nodeType = 'nodes-base.mysql'; 162 | const config = { 163 | query: 'TRUNCATE TABLE audit_logs;' 164 | }; 165 | const properties = [ 166 | { name: 'query', type: 'string' } 167 | ]; 168 | 169 | const result = ConfigValidator.validate(nodeType, config, properties); 170 | 171 | expect(result.warnings.some(w => 172 | w.type === 'security' && 173 | w.message.includes('TRUNCATE is a destructive operation') 174 | )).toBe(true); 175 | }); 176 | 177 | it('should check for unescaped user input in queries', () => { 178 | const nodeType = 'nodes-base.postgres'; 179 | const config = { 180 | query: `SELECT * FROM users WHERE name = '{{ $json.userName }}'` 181 | }; 182 | const properties = [ 183 | { name: 'query', type: 'string' } 184 | ]; 185 | 186 | const result = ConfigValidator.validate(nodeType, config, properties); 187 | 188 | expect(result.warnings.some(w => 189 | w.type === 'security' && 190 | w.message.includes('vulnerable to SQL injection') 191 | )).toBe(true); 192 | }); 193 | }); 194 | 195 | describe('Network security', () => { 196 | // HTTP vs HTTPS warning not implemented in current validator 197 | it.skip('should warn about HTTP (non-HTTPS) API calls', () => { 198 | const nodeType = 'nodes-base.httpRequest'; 199 | const config = { 200 | method: 'POST', 201 | url: 'http://api.example.com/sensitive-data', 202 | sendBody: true 203 | }; 204 | const properties = [ 205 | { name: 'method', type: 'options' }, 206 | { name: 'url', type: 'string' }, 207 | { name: 'sendBody', type: 'boolean' } 208 | ]; 209 | 210 | const result = ConfigValidator.validate(nodeType, config, properties); 211 | 212 | expect(result.warnings.some(w => 213 | w.type === 'security' && 214 | w.message.includes('Consider using HTTPS') 215 | )).toBe(true); 216 | }); 217 | 218 | // Localhost URL warning not implemented in current validator 219 | it.skip('should validate localhost/internal URLs', () => { 220 | const nodeType = 'nodes-base.httpRequest'; 221 | const config = { 222 | method: 'GET', 223 | url: 'http://localhost:8080/admin' 224 | }; 225 | const properties = [ 226 | { name: 'method', type: 'options' }, 227 | { name: 'url', type: 'string' } 228 | ]; 229 | 230 | const result = ConfigValidator.validate(nodeType, config, properties); 231 | 232 | expect(result.warnings.some(w => 233 | w.type === 'security' && 234 | w.message.includes('Accessing localhost/internal URLs') 235 | )).toBe(true); 236 | }); 237 | 238 | // Sensitive data in URL warning not implemented in current validator 239 | it.skip('should check for sensitive data in URLs', () => { 240 | const nodeType = 'nodes-base.httpRequest'; 241 | const config = { 242 | method: 'GET', 243 | url: 'https://api.example.com/users?api_key=secret123&token=abc' 244 | }; 245 | const properties = [ 246 | { name: 'method', type: 'options' }, 247 | { name: 'url', type: 'string' } 248 | ]; 249 | 250 | const result = ConfigValidator.validate(nodeType, config, properties); 251 | 252 | expect(result.warnings.some(w => 253 | w.type === 'security' && 254 | w.message.includes('Sensitive data in URL') 255 | )).toBe(true); 256 | }); 257 | }); 258 | 259 | describe('File system security', () => { 260 | // File system operations warning not implemented in current validator 261 | it.skip('should warn about dangerous file operations', () => { 262 | const nodeType = 'nodes-base.code'; 263 | const config = { 264 | language: 'javascript', 265 | jsCode: ` 266 | const fs = require('fs'); 267 | fs.unlinkSync('/etc/passwd'); 268 | return items; 269 | ` 270 | }; 271 | const properties = [ 272 | { name: 'language', type: 'options' }, 273 | { name: 'jsCode', type: 'string' } 274 | ]; 275 | 276 | const result = ConfigValidator.validate(nodeType, config, properties); 277 | 278 | expect(result.warnings.some(w => 279 | w.type === 'security' && 280 | w.message.includes('File system operations') 281 | )).toBe(true); 282 | }); 283 | 284 | // Path traversal warning not implemented in current validator 285 | it.skip('should check for path traversal vulnerabilities', () => { 286 | const nodeType = 'nodes-base.code'; 287 | const config = { 288 | language: 'javascript', 289 | jsCode: ` 290 | const path = items[0].json.userPath; 291 | const file = fs.readFileSync('../../../' + path); 292 | return [{json: {content: file.toString()}}]; 293 | ` 294 | }; 295 | const properties = [ 296 | { name: 'language', type: 'options' }, 297 | { name: 'jsCode', type: 'string' } 298 | ]; 299 | 300 | const result = ConfigValidator.validate(nodeType, config, properties); 301 | 302 | expect(result.warnings.some(w => 303 | w.type === 'security' && 304 | w.message.includes('Path traversal') 305 | )).toBe(true); 306 | }); 307 | }); 308 | 309 | describe('Crypto and sensitive operations', () => { 310 | it('should validate crypto module usage', () => { 311 | const nodeType = 'nodes-base.code'; 312 | const config = { 313 | language: 'javascript', 314 | jsCode: ` 315 | const uuid = crypto.randomUUID(); 316 | return [{json: {id: uuid}}]; 317 | ` 318 | }; 319 | const properties = [ 320 | { name: 'language', type: 'options' }, 321 | { name: 'jsCode', type: 'string' } 322 | ]; 323 | 324 | const result = ConfigValidator.validate(nodeType, config, properties); 325 | 326 | expect(result.warnings.some(w => 327 | w.type === 'invalid_value' && 328 | w.message.includes('Using crypto without require') 329 | )).toBe(true); 330 | }); 331 | 332 | // Weak crypto algorithm warning not implemented in current validator 333 | it.skip('should warn about weak crypto algorithms', () => { 334 | const nodeType = 'nodes-base.code'; 335 | const config = { 336 | language: 'javascript', 337 | jsCode: ` 338 | const crypto = require('crypto'); 339 | const hash = crypto.createHash('md5'); 340 | hash.update(data); 341 | return [{json: {hash: hash.digest('hex')}}]; 342 | ` 343 | }; 344 | const properties = [ 345 | { name: 'language', type: 'options' }, 346 | { name: 'jsCode', type: 'string' } 347 | ]; 348 | 349 | const result = ConfigValidator.validate(nodeType, config, properties); 350 | 351 | expect(result.warnings.some(w => 352 | w.type === 'security' && 353 | w.message.includes('MD5 is cryptographically weak') 354 | )).toBe(true); 355 | }); 356 | 357 | // Environment variable access warning not implemented in current validator 358 | it.skip('should check for environment variable access', () => { 359 | const nodeType = 'nodes-base.code'; 360 | const config = { 361 | language: 'javascript', 362 | jsCode: ` 363 | const apiKey = process.env.SECRET_API_KEY; 364 | const dbPassword = process.env.DATABASE_PASSWORD; 365 | return [{json: {configured: !!apiKey}}]; 366 | ` 367 | }; 368 | const properties = [ 369 | { name: 'language', type: 'options' }, 370 | { name: 'jsCode', type: 'string' } 371 | ]; 372 | 373 | const result = ConfigValidator.validate(nodeType, config, properties); 374 | 375 | expect(result.warnings.some(w => 376 | w.type === 'security' && 377 | w.message.includes('Accessing environment variables') 378 | )).toBe(true); 379 | }); 380 | }); 381 | 382 | describe('Python security', () => { 383 | it('should warn about exec/eval in Python', () => { 384 | const nodeType = 'nodes-base.code'; 385 | const config = { 386 | language: 'python', 387 | pythonCode: ` 388 | user_code = items[0]['json']['code'] 389 | result = exec(user_code) 390 | return [{"json": {"result": result}}] 391 | ` 392 | }; 393 | const properties = [ 394 | { name: 'language', type: 'options' }, 395 | { name: 'pythonCode', type: 'string' } 396 | ]; 397 | 398 | const result = ConfigValidator.validate(nodeType, config, properties); 399 | 400 | expect(result.warnings.some(w => 401 | w.type === 'security' && 402 | w.message.includes('eval/exec which can be a security risk') 403 | )).toBe(true); 404 | }); 405 | 406 | // os.system usage warning not implemented in current validator 407 | it.skip('should check for subprocess/os.system usage', () => { 408 | const nodeType = 'nodes-base.code'; 409 | const config = { 410 | language: 'python', 411 | pythonCode: ` 412 | import os 413 | command = items[0]['json']['command'] 414 | os.system(command) 415 | return [{"json": {"executed": True}}] 416 | ` 417 | }; 418 | const properties = [ 419 | { name: 'language', type: 'options' }, 420 | { name: 'pythonCode', type: 'string' } 421 | ]; 422 | 423 | const result = ConfigValidator.validate(nodeType, config, properties); 424 | 425 | expect(result.warnings.some(w => 426 | w.type === 'security' && 427 | w.message.includes('os.system() can execute arbitrary commands') 428 | )).toBe(true); 429 | }); 430 | }); 431 | }); ``` -------------------------------------------------------------------------------- /tests/integration/ai-validation/e2e-validation.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Integration Tests: End-to-End AI Workflow Validation 3 | * 4 | * Tests complete AI workflow validation and creation flow. 5 | * Validates multi-error detection and workflow creation after validation. 6 | */ 7 | 8 | import { describe, it, expect, beforeEach, afterEach, afterAll } from 'vitest'; 9 | import { createTestContext, TestContext, createTestWorkflowName } from '../n8n-api/utils/test-context'; 10 | import { getTestN8nClient } from '../n8n-api/utils/n8n-client'; 11 | import { N8nApiClient } from '../../../src/services/n8n-api-client'; 12 | import { cleanupOrphanedWorkflows } from '../n8n-api/utils/cleanup-helpers'; 13 | import { createMcpContext } from '../n8n-api/utils/mcp-context'; 14 | import { InstanceContext } from '../../../src/types/instance-context'; 15 | import { handleValidateWorkflow, handleCreateWorkflow } from '../../../src/mcp/handlers-n8n-manager'; 16 | import { getNodeRepository, closeNodeRepository } from '../n8n-api/utils/node-repository'; 17 | import { NodeRepository } from '../../../src/database/node-repository'; 18 | import { ValidationResponse } from '../n8n-api/types/mcp-responses'; 19 | import { 20 | createChatTriggerNode, 21 | createAIAgentNode, 22 | createLanguageModelNode, 23 | createHTTPRequestToolNode, 24 | createCodeToolNode, 25 | createMemoryNode, 26 | createRespondNode, 27 | createAIConnection, 28 | createMainConnection, 29 | mergeConnections, 30 | createAIWorkflow 31 | } from './helpers'; 32 | 33 | describe('Integration: End-to-End AI Workflow Validation', () => { 34 | let context: TestContext; 35 | let client: N8nApiClient; 36 | let mcpContext: InstanceContext; 37 | let repository: NodeRepository; 38 | 39 | beforeEach(async () => { 40 | context = createTestContext(); 41 | client = getTestN8nClient(); 42 | mcpContext = createMcpContext(); 43 | repository = await getNodeRepository(); 44 | }); 45 | 46 | afterEach(async () => { 47 | await context.cleanup(); 48 | }); 49 | 50 | afterAll(async () => { 51 | await closeNodeRepository(); 52 | if (!process.env.CI) { 53 | await cleanupOrphanedWorkflows(); 54 | } 55 | }); 56 | 57 | // ====================================================================== 58 | // TEST 1: Validate and Create Complex AI Workflow 59 | // ====================================================================== 60 | 61 | it('should validate and create complex AI workflow', async () => { 62 | const chatTrigger = createChatTriggerNode({ 63 | name: 'Chat Trigger', 64 | responseMode: 'lastNode' 65 | }); 66 | 67 | const languageModel = createLanguageModelNode('openai', { 68 | name: 'OpenAI Chat Model' 69 | }); 70 | 71 | const httpTool = createHTTPRequestToolNode({ 72 | name: 'Weather API', 73 | toolDescription: 'Fetches current weather data from weather API', 74 | url: 'https://api.weather.com/current', 75 | method: 'GET' 76 | }); 77 | 78 | const codeTool = createCodeToolNode({ 79 | name: 'Data Processor', 80 | toolDescription: 'Processes and formats weather data', 81 | code: 'return { formatted: JSON.stringify($input.all()) };' 82 | }); 83 | 84 | const memory = createMemoryNode({ 85 | name: 'Conversation Memory', 86 | contextWindowLength: 10 87 | }); 88 | 89 | const agent = createAIAgentNode({ 90 | name: 'Weather Assistant', 91 | promptType: 'define', 92 | text: 'You are a weather assistant. Help users understand weather data.', 93 | systemMessage: 'You are an AI assistant specialized in weather information. You have access to weather APIs and can process data. Always provide clear, helpful responses.' 94 | }); 95 | 96 | const respond = createRespondNode({ 97 | name: 'Respond to User' 98 | }); 99 | 100 | const workflow = createAIWorkflow( 101 | [chatTrigger, languageModel, httpTool, codeTool, memory, agent, respond], 102 | mergeConnections( 103 | createMainConnection('Chat Trigger', 'Weather Assistant'), 104 | createAIConnection('OpenAI Chat Model', 'Weather Assistant', 'ai_languageModel'), 105 | createAIConnection('Weather API', 'Weather Assistant', 'ai_tool'), 106 | createAIConnection('Data Processor', 'Weather Assistant', 'ai_tool'), 107 | createAIConnection('Conversation Memory', 'Weather Assistant', 'ai_memory'), 108 | createMainConnection('Weather Assistant', 'Respond to User') 109 | ), 110 | { 111 | name: createTestWorkflowName('E2E - Complex AI Workflow'), 112 | tags: ['mcp-integration-test', 'ai-validation', 'e2e'] 113 | } 114 | ); 115 | 116 | // Step 1: Create workflow 117 | const created = await client.createWorkflow(workflow); 118 | context.trackWorkflow(created.id!); 119 | 120 | // Step 2: Validate workflow 121 | const validationResponse = await handleValidateWorkflow( 122 | { id: created.id }, 123 | repository, 124 | mcpContext 125 | ); 126 | 127 | expect(validationResponse.success).toBe(true); 128 | const validationData = validationResponse.data as ValidationResponse; 129 | 130 | // Workflow should be valid 131 | expect(validationData.valid).toBe(true); 132 | expect(validationData.errors).toBeUndefined(); 133 | expect(validationData.summary.errorCount).toBe(0); 134 | 135 | // Verify all nodes detected 136 | expect(validationData.summary.totalNodes).toBe(7); 137 | expect(validationData.summary.triggerNodes).toBe(1); 138 | 139 | // Step 3: Since it's valid, it's already created and ready to use 140 | // Just verify it exists 141 | const retrieved = await client.getWorkflow(created.id!); 142 | expect(retrieved.id).toBe(created.id); 143 | expect(retrieved.nodes.length).toBe(7); 144 | }); 145 | 146 | // ====================================================================== 147 | // TEST 2: Detect Multiple Validation Errors 148 | // ====================================================================== 149 | 150 | it('should detect multiple validation errors', async () => { 151 | const chatTrigger = createChatTriggerNode({ 152 | name: 'Chat Trigger', 153 | responseMode: 'streaming' 154 | }); 155 | 156 | const httpTool = createHTTPRequestToolNode({ 157 | name: 'HTTP Tool', 158 | toolDescription: '', // ERROR: missing description 159 | url: '', // ERROR: missing URL 160 | method: 'GET' 161 | }); 162 | 163 | const codeTool = createCodeToolNode({ 164 | name: 'Code Tool', 165 | toolDescription: 'Short', // WARNING: too short 166 | code: '' // ERROR: missing code 167 | }); 168 | 169 | const agent = createAIAgentNode({ 170 | name: 'AI Agent', 171 | promptType: 'define', 172 | text: '', // ERROR: missing prompt text 173 | // ERROR: missing language model connection 174 | // ERROR: has main output in streaming mode 175 | }); 176 | 177 | const respond = createRespondNode({ 178 | name: 'Respond' 179 | }); 180 | 181 | const workflow = createAIWorkflow( 182 | [chatTrigger, httpTool, codeTool, agent, respond], 183 | mergeConnections( 184 | createMainConnection('Chat Trigger', 'AI Agent'), 185 | createAIConnection('HTTP Tool', 'AI Agent', 'ai_tool'), 186 | createAIConnection('Code Tool', 'AI Agent', 'ai_tool'), 187 | createMainConnection('AI Agent', 'Respond') // ERROR in streaming mode 188 | ), 189 | { 190 | name: createTestWorkflowName('E2E - Multiple Errors'), 191 | tags: ['mcp-integration-test', 'ai-validation', 'e2e'] 192 | } 193 | ); 194 | 195 | const created = await client.createWorkflow(workflow); 196 | context.trackWorkflow(created.id!); 197 | 198 | const validationResponse = await handleValidateWorkflow( 199 | { id: created.id }, 200 | repository, 201 | mcpContext 202 | ); 203 | 204 | expect(validationResponse.success).toBe(true); 205 | const validationData = validationResponse.data as ValidationResponse; 206 | 207 | // Should be invalid with multiple errors 208 | expect(validationData.valid).toBe(false); 209 | expect(validationData.errors).toBeDefined(); 210 | expect(validationData.errors!.length).toBeGreaterThan(3); 211 | 212 | // Verify specific errors are detected 213 | const errorCodes = validationData.errors!.map(e => e.details?.code || e.code); 214 | 215 | expect(errorCodes).toContain('MISSING_LANGUAGE_MODEL'); // AI Agent 216 | expect(errorCodes).toContain('MISSING_PROMPT_TEXT'); // AI Agent 217 | expect(errorCodes).toContain('MISSING_TOOL_DESCRIPTION'); // HTTP Tool 218 | expect(errorCodes).toContain('MISSING_URL'); // HTTP Tool 219 | expect(errorCodes).toContain('MISSING_CODE'); // Code Tool 220 | 221 | // Should also have streaming error 222 | const streamingErrors = validationData.errors!.filter(e => { 223 | const code = e.details?.code || e.code; 224 | return code === 'STREAMING_WITH_MAIN_OUTPUT' || 225 | code === 'STREAMING_AGENT_HAS_OUTPUT'; 226 | }); 227 | expect(streamingErrors.length).toBeGreaterThan(0); 228 | 229 | // Verify error messages are actionable 230 | for (const error of validationData.errors!) { 231 | expect(error.message).toBeDefined(); 232 | expect(error.message.length).toBeGreaterThan(10); 233 | expect(error.nodeName).toBeDefined(); 234 | } 235 | }); 236 | 237 | // ====================================================================== 238 | // TEST 3: Validate Streaming Workflow (No Main Output) 239 | // ====================================================================== 240 | 241 | it('should validate streaming workflow without main output', async () => { 242 | const chatTrigger = createChatTriggerNode({ 243 | name: 'Chat Trigger', 244 | responseMode: 'streaming' 245 | }); 246 | 247 | const languageModel = createLanguageModelNode('anthropic', { 248 | name: 'Claude Model' 249 | }); 250 | 251 | const agent = createAIAgentNode({ 252 | name: 'Streaming Agent', 253 | text: 'You are a helpful assistant', 254 | systemMessage: 'Provide helpful, streaming responses to user queries' 255 | }); 256 | 257 | const workflow = createAIWorkflow( 258 | [chatTrigger, languageModel, agent], 259 | mergeConnections( 260 | createMainConnection('Chat Trigger', 'Streaming Agent'), 261 | createAIConnection('Claude Model', 'Streaming Agent', 'ai_languageModel') 262 | // No main output from agent - streaming mode 263 | ), 264 | { 265 | name: createTestWorkflowName('E2E - Streaming Workflow'), 266 | tags: ['mcp-integration-test', 'ai-validation', 'e2e'] 267 | } 268 | ); 269 | 270 | const created = await client.createWorkflow(workflow); 271 | context.trackWorkflow(created.id!); 272 | 273 | const validationResponse = await handleValidateWorkflow( 274 | { id: created.id }, 275 | repository, 276 | mcpContext 277 | ); 278 | 279 | expect(validationResponse.success).toBe(true); 280 | const validationData = validationResponse.data as ValidationResponse; 281 | 282 | expect(validationData.valid).toBe(true); 283 | expect(validationData.errors).toBeUndefined(); 284 | expect(validationData.summary.errorCount).toBe(0); 285 | }); 286 | 287 | // ====================================================================== 288 | // TEST 4: Validate Non-Streaming Workflow (With Main Output) 289 | // ====================================================================== 290 | 291 | it('should validate non-streaming workflow with main output', async () => { 292 | const chatTrigger = createChatTriggerNode({ 293 | name: 'Chat Trigger', 294 | responseMode: 'lastNode' 295 | }); 296 | 297 | const languageModel = createLanguageModelNode('openai', { 298 | name: 'GPT Model' 299 | }); 300 | 301 | const agent = createAIAgentNode({ 302 | name: 'Non-Streaming Agent', 303 | text: 'You are a helpful assistant' 304 | }); 305 | 306 | const respond = createRespondNode({ 307 | name: 'Final Response' 308 | }); 309 | 310 | const workflow = createAIWorkflow( 311 | [chatTrigger, languageModel, agent, respond], 312 | mergeConnections( 313 | createMainConnection('Chat Trigger', 'Non-Streaming Agent'), 314 | createAIConnection('GPT Model', 'Non-Streaming Agent', 'ai_languageModel'), 315 | createMainConnection('Non-Streaming Agent', 'Final Response') 316 | ), 317 | { 318 | name: createTestWorkflowName('E2E - Non-Streaming Workflow'), 319 | tags: ['mcp-integration-test', 'ai-validation', 'e2e'] 320 | } 321 | ); 322 | 323 | const created = await client.createWorkflow(workflow); 324 | context.trackWorkflow(created.id!); 325 | 326 | const validationResponse = await handleValidateWorkflow( 327 | { id: created.id }, 328 | repository, 329 | mcpContext 330 | ); 331 | 332 | expect(validationResponse.success).toBe(true); 333 | const validationData = validationResponse.data as ValidationResponse; 334 | 335 | expect(validationData.valid).toBe(true); 336 | expect(validationData.errors).toBeUndefined(); 337 | }); 338 | 339 | // ====================================================================== 340 | // TEST 5: Test Node Type Normalization (Bug Fix Validation) 341 | // ====================================================================== 342 | 343 | it('should correctly normalize node types during validation', async () => { 344 | // This test validates the v2.17.0 fix for node type normalization 345 | const languageModel = createLanguageModelNode('openai', { 346 | name: 'OpenAI Model' 347 | }); 348 | 349 | const agent = createAIAgentNode({ 350 | name: 'AI Agent', 351 | text: 'Test agent' 352 | }); 353 | 354 | const httpTool = createHTTPRequestToolNode({ 355 | name: 'API Tool', 356 | toolDescription: 'Calls external API', 357 | url: 'https://api.example.com/test' 358 | }); 359 | 360 | const workflow = createAIWorkflow( 361 | [languageModel, agent, httpTool], 362 | mergeConnections( 363 | createAIConnection('OpenAI Model', 'AI Agent', 'ai_languageModel'), 364 | createAIConnection('API Tool', 'AI Agent', 'ai_tool') 365 | ), 366 | { 367 | name: createTestWorkflowName('E2E - Type Normalization'), 368 | tags: ['mcp-integration-test', 'ai-validation', 'e2e'] 369 | } 370 | ); 371 | 372 | const created = await client.createWorkflow(workflow); 373 | context.trackWorkflow(created.id!); 374 | 375 | const validationResponse = await handleValidateWorkflow( 376 | { id: created.id }, 377 | repository, 378 | mcpContext 379 | ); 380 | 381 | expect(validationResponse.success).toBe(true); 382 | const validationData = validationResponse.data as ValidationResponse; 383 | 384 | // Should be valid - no false "no tools connected" warning 385 | expect(validationData.valid).toBe(true); 386 | 387 | // Should NOT have false warnings about tools 388 | if (validationData.warnings) { 389 | const falseToolWarnings = validationData.warnings.filter(w => 390 | w.message.toLowerCase().includes('no ai_tool') && 391 | w.nodeName === 'AI Agent' 392 | ); 393 | expect(falseToolWarnings.length).toBe(0); 394 | } 395 | }); 396 | }); 397 | ``` -------------------------------------------------------------------------------- /tests/unit/services/property-dependencies.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { describe, it, expect, vi, beforeEach } from 'vitest'; 2 | import { PropertyDependencies } from '@/services/property-dependencies'; 3 | import type { DependencyAnalysis, PropertyDependency } from '@/services/property-dependencies'; 4 | 5 | // Mock the database 6 | vi.mock('better-sqlite3'); 7 | 8 | describe('PropertyDependencies', () => { 9 | beforeEach(() => { 10 | vi.clearAllMocks(); 11 | }); 12 | 13 | describe('analyze', () => { 14 | it('should analyze simple property dependencies', () => { 15 | const properties = [ 16 | { 17 | name: 'method', 18 | displayName: 'HTTP Method', 19 | type: 'options' 20 | }, 21 | { 22 | name: 'sendBody', 23 | displayName: 'Send Body', 24 | type: 'boolean', 25 | displayOptions: { 26 | show: { 27 | method: ['POST', 'PUT', 'PATCH'] 28 | } 29 | } 30 | } 31 | ]; 32 | 33 | const analysis = PropertyDependencies.analyze(properties); 34 | 35 | expect(analysis.totalProperties).toBe(2); 36 | expect(analysis.propertiesWithDependencies).toBe(1); 37 | expect(analysis.dependencies).toHaveLength(1); 38 | 39 | const sendBodyDep = analysis.dependencies[0]; 40 | expect(sendBodyDep.property).toBe('sendBody'); 41 | expect(sendBodyDep.dependsOn).toHaveLength(1); 42 | expect(sendBodyDep.dependsOn[0]).toMatchObject({ 43 | property: 'method', 44 | values: ['POST', 'PUT', 'PATCH'], 45 | condition: 'equals' 46 | }); 47 | }); 48 | 49 | it('should handle hide conditions', () => { 50 | const properties = [ 51 | { 52 | name: 'mode', 53 | type: 'options' 54 | }, 55 | { 56 | name: 'manualField', 57 | type: 'string', 58 | displayOptions: { 59 | hide: { 60 | mode: ['automatic'] 61 | } 62 | } 63 | } 64 | ]; 65 | 66 | const analysis = PropertyDependencies.analyze(properties); 67 | 68 | const manualFieldDep = analysis.dependencies[0]; 69 | expect(manualFieldDep.hideWhen).toEqual({ mode: ['automatic'] }); 70 | expect(manualFieldDep.dependsOn[0].condition).toBe('not_equals'); 71 | }); 72 | 73 | it('should handle multiple dependencies', () => { 74 | const properties = [ 75 | { 76 | name: 'resource', 77 | type: 'options' 78 | }, 79 | { 80 | name: 'operation', 81 | type: 'options' 82 | }, 83 | { 84 | name: 'channel', 85 | type: 'string', 86 | displayOptions: { 87 | show: { 88 | resource: ['message'], 89 | operation: ['post'] 90 | } 91 | } 92 | } 93 | ]; 94 | 95 | const analysis = PropertyDependencies.analyze(properties); 96 | 97 | const channelDep = analysis.dependencies[0]; 98 | expect(channelDep.dependsOn).toHaveLength(2); 99 | expect(channelDep.notes).toContain('Multiple conditions must be met for this property to be visible'); 100 | }); 101 | 102 | it('should build dependency graph', () => { 103 | const properties = [ 104 | { 105 | name: 'method', 106 | type: 'options' 107 | }, 108 | { 109 | name: 'sendBody', 110 | type: 'boolean', 111 | displayOptions: { 112 | show: { method: ['POST'] } 113 | } 114 | }, 115 | { 116 | name: 'contentType', 117 | type: 'options', 118 | displayOptions: { 119 | show: { method: ['POST'], sendBody: [true] } 120 | } 121 | } 122 | ]; 123 | 124 | const analysis = PropertyDependencies.analyze(properties); 125 | 126 | expect(analysis.dependencyGraph).toMatchObject({ 127 | method: ['sendBody', 'contentType'], 128 | sendBody: ['contentType'] 129 | }); 130 | }); 131 | 132 | it('should identify properties that enable others', () => { 133 | const properties = [ 134 | { 135 | name: 'sendHeaders', 136 | type: 'boolean' 137 | }, 138 | { 139 | name: 'headerParameters', 140 | type: 'collection', 141 | displayOptions: { 142 | show: { sendHeaders: [true] } 143 | } 144 | }, 145 | { 146 | name: 'headerCount', 147 | type: 'number', 148 | displayOptions: { 149 | show: { sendHeaders: [true] } 150 | } 151 | } 152 | ]; 153 | 154 | const analysis = PropertyDependencies.analyze(properties); 155 | 156 | const sendHeadersDeps = analysis.dependencies.filter(d => 157 | d.dependsOn.some(c => c.property === 'sendHeaders') 158 | ); 159 | 160 | expect(sendHeadersDeps).toHaveLength(2); 161 | expect(analysis.dependencyGraph.sendHeaders).toContain('headerParameters'); 162 | expect(analysis.dependencyGraph.sendHeaders).toContain('headerCount'); 163 | }); 164 | 165 | it('should add notes for collection types', () => { 166 | const properties = [ 167 | { 168 | name: 'showCollection', 169 | type: 'boolean' 170 | }, 171 | { 172 | name: 'items', 173 | type: 'collection', 174 | displayOptions: { 175 | show: { showCollection: [true] } 176 | } 177 | } 178 | ]; 179 | 180 | const analysis = PropertyDependencies.analyze(properties); 181 | 182 | const itemsDep = analysis.dependencies[0]; 183 | expect(itemsDep.notes).toContain('This property contains nested properties that may have their own dependencies'); 184 | }); 185 | 186 | it('should generate helpful descriptions', () => { 187 | const properties = [ 188 | { 189 | name: 'method', 190 | displayName: 'HTTP Method', 191 | type: 'options' 192 | }, 193 | { 194 | name: 'sendBody', 195 | type: 'boolean', 196 | displayOptions: { 197 | show: { method: ['POST', 'PUT'] } 198 | } 199 | } 200 | ]; 201 | 202 | const analysis = PropertyDependencies.analyze(properties); 203 | 204 | const sendBodyDep = analysis.dependencies[0]; 205 | expect(sendBodyDep.dependsOn[0].description).toBe( 206 | 'Visible when HTTP Method is one of: "POST", "PUT"' 207 | ); 208 | }); 209 | 210 | it('should handle empty properties', () => { 211 | const analysis = PropertyDependencies.analyze([]); 212 | 213 | expect(analysis.totalProperties).toBe(0); 214 | expect(analysis.propertiesWithDependencies).toBe(0); 215 | expect(analysis.dependencies).toHaveLength(0); 216 | expect(analysis.dependencyGraph).toEqual({}); 217 | }); 218 | }); 219 | 220 | describe('suggestions', () => { 221 | it('should suggest key properties to configure first', () => { 222 | const properties = [ 223 | { 224 | name: 'resource', 225 | type: 'options' 226 | }, 227 | { 228 | name: 'operation', 229 | type: 'options', 230 | displayOptions: { 231 | show: { resource: ['message'] } 232 | } 233 | }, 234 | { 235 | name: 'channel', 236 | type: 'string', 237 | displayOptions: { 238 | show: { resource: ['message'], operation: ['post'] } 239 | } 240 | }, 241 | { 242 | name: 'text', 243 | type: 'string', 244 | displayOptions: { 245 | show: { resource: ['message'], operation: ['post'] } 246 | } 247 | } 248 | ]; 249 | 250 | const analysis = PropertyDependencies.analyze(properties); 251 | 252 | expect(analysis.suggestions[0]).toContain('Key properties to configure first'); 253 | expect(analysis.suggestions[0]).toContain('resource'); 254 | }); 255 | 256 | it('should detect circular dependencies', () => { 257 | const properties = [ 258 | { 259 | name: 'fieldA', 260 | type: 'string', 261 | displayOptions: { 262 | show: { fieldB: ['value'] } 263 | } 264 | }, 265 | { 266 | name: 'fieldB', 267 | type: 'string', 268 | displayOptions: { 269 | show: { fieldA: ['value'] } 270 | } 271 | } 272 | ]; 273 | 274 | const analysis = PropertyDependencies.analyze(properties); 275 | 276 | expect(analysis.suggestions.some(s => s.includes('Circular dependency'))).toBe(true); 277 | }); 278 | 279 | it('should note complex dependencies', () => { 280 | const properties = [ 281 | { 282 | name: 'a', 283 | type: 'string' 284 | }, 285 | { 286 | name: 'b', 287 | type: 'string' 288 | }, 289 | { 290 | name: 'c', 291 | type: 'string' 292 | }, 293 | { 294 | name: 'complex', 295 | type: 'string', 296 | displayOptions: { 297 | show: { a: ['1'], b: ['2'], c: ['3'] } 298 | } 299 | } 300 | ]; 301 | 302 | const analysis = PropertyDependencies.analyze(properties); 303 | 304 | expect(analysis.suggestions.some(s => s.includes('multiple dependencies'))).toBe(true); 305 | }); 306 | }); 307 | 308 | describe('getVisibilityImpact', () => { 309 | const properties = [ 310 | { 311 | name: 'method', 312 | type: 'options' 313 | }, 314 | { 315 | name: 'sendBody', 316 | type: 'boolean', 317 | displayOptions: { 318 | show: { method: ['POST', 'PUT'] } 319 | } 320 | }, 321 | { 322 | name: 'contentType', 323 | type: 'options', 324 | displayOptions: { 325 | show: { 326 | method: ['POST', 'PUT'], 327 | sendBody: [true] 328 | } 329 | } 330 | }, 331 | { 332 | name: 'debugMode', 333 | type: 'boolean', 334 | displayOptions: { 335 | hide: { method: ['GET'] } 336 | } 337 | } 338 | ]; 339 | 340 | it('should determine visible properties for POST method', () => { 341 | const config = { method: 'POST', sendBody: true }; 342 | const impact = PropertyDependencies.getVisibilityImpact(properties, config); 343 | 344 | expect(impact.visible).toContain('method'); 345 | expect(impact.visible).toContain('sendBody'); 346 | expect(impact.visible).toContain('contentType'); 347 | expect(impact.visible).toContain('debugMode'); 348 | expect(impact.hidden).toHaveLength(0); 349 | }); 350 | 351 | it('should determine hidden properties for GET method', () => { 352 | const config = { method: 'GET' }; 353 | const impact = PropertyDependencies.getVisibilityImpact(properties, config); 354 | 355 | expect(impact.visible).toContain('method'); 356 | expect(impact.hidden).toContain('sendBody'); 357 | expect(impact.hidden).toContain('contentType'); 358 | expect(impact.hidden).toContain('debugMode'); // Hidden by hide condition 359 | }); 360 | 361 | it('should provide reasons for visibility', () => { 362 | const config = { method: 'GET' }; 363 | const impact = PropertyDependencies.getVisibilityImpact(properties, config); 364 | 365 | expect(impact.reasons.sendBody).toContain('needs to be POST or PUT'); 366 | expect(impact.reasons.debugMode).toContain('Hidden because method is "GET"'); 367 | }); 368 | 369 | it('should handle partial dependencies', () => { 370 | const config = { method: 'POST', sendBody: false }; 371 | const impact = PropertyDependencies.getVisibilityImpact(properties, config); 372 | 373 | expect(impact.visible).toContain('sendBody'); 374 | expect(impact.hidden).toContain('contentType'); 375 | expect(impact.reasons.contentType).toContain('needs to be true'); 376 | }); 377 | 378 | it('should handle properties without display options', () => { 379 | const simpleProps = [ 380 | { name: 'field1', type: 'string' }, 381 | { name: 'field2', type: 'number' } 382 | ]; 383 | 384 | const impact = PropertyDependencies.getVisibilityImpact(simpleProps, {}); 385 | 386 | expect(impact.visible).toEqual(['field1', 'field2']); 387 | expect(impact.hidden).toHaveLength(0); 388 | }); 389 | 390 | it('should handle empty configuration', () => { 391 | const impact = PropertyDependencies.getVisibilityImpact(properties, {}); 392 | 393 | expect(impact.visible).toContain('method'); 394 | expect(impact.hidden).toContain('sendBody'); // No method value provided 395 | expect(impact.hidden).toContain('contentType'); 396 | }); 397 | 398 | it('should handle array values in conditions', () => { 399 | const props = [ 400 | { 401 | name: 'status', 402 | type: 'options' 403 | }, 404 | { 405 | name: 'errorMessage', 406 | type: 'string', 407 | displayOptions: { 408 | show: { status: ['error', 'failed'] } 409 | } 410 | } 411 | ]; 412 | 413 | const config1 = { status: 'error' }; 414 | const impact1 = PropertyDependencies.getVisibilityImpact(props, config1); 415 | expect(impact1.visible).toContain('errorMessage'); 416 | 417 | const config2 = { status: 'success' }; 418 | const impact2 = PropertyDependencies.getVisibilityImpact(props, config2); 419 | expect(impact2.hidden).toContain('errorMessage'); 420 | }); 421 | }); 422 | 423 | describe('edge cases', () => { 424 | it('should handle properties with both show and hide conditions', () => { 425 | const properties = [ 426 | { 427 | name: 'mode', 428 | type: 'options' 429 | }, 430 | { 431 | name: 'special', 432 | type: 'string', 433 | displayOptions: { 434 | show: { mode: ['custom'] }, 435 | hide: { debug: [true] } 436 | } 437 | } 438 | ]; 439 | 440 | const analysis = PropertyDependencies.analyze(properties); 441 | 442 | const specialDep = analysis.dependencies[0]; 443 | expect(specialDep.showWhen).toEqual({ mode: ['custom'] }); 444 | expect(specialDep.hideWhen).toEqual({ debug: [true] }); 445 | expect(specialDep.dependsOn).toHaveLength(2); 446 | }); 447 | 448 | it('should handle non-array values in display conditions', () => { 449 | const properties = [ 450 | { 451 | name: 'enabled', 452 | type: 'boolean' 453 | }, 454 | { 455 | name: 'config', 456 | type: 'string', 457 | displayOptions: { 458 | show: { enabled: true } // Not an array 459 | } 460 | } 461 | ]; 462 | 463 | const analysis = PropertyDependencies.analyze(properties); 464 | 465 | const configDep = analysis.dependencies[0]; 466 | expect(configDep.dependsOn[0].values).toEqual([true]); 467 | }); 468 | 469 | it('should handle deeply nested property references', () => { 470 | const properties = [ 471 | { 472 | name: 'level1', 473 | type: 'options' 474 | }, 475 | { 476 | name: 'level2', 477 | type: 'options', 478 | displayOptions: { 479 | show: { level1: ['A'] } 480 | } 481 | }, 482 | { 483 | name: 'level3', 484 | type: 'string', 485 | displayOptions: { 486 | show: { level1: ['A'], level2: ['B'] } 487 | } 488 | } 489 | ]; 490 | 491 | const analysis = PropertyDependencies.analyze(properties); 492 | 493 | expect(analysis.dependencyGraph).toMatchObject({ 494 | level1: ['level2', 'level3'], 495 | level2: ['level3'] 496 | }); 497 | }); 498 | }); 499 | }); ``` -------------------------------------------------------------------------------- /tests/utils/database-utils.ts: -------------------------------------------------------------------------------- ```typescript 1 | import { DatabaseAdapter, createDatabaseAdapter } from '../../src/database/database-adapter'; 2 | import { NodeRepository } from '../../src/database/node-repository'; 3 | import { TemplateRepository } from '../../src/templates/template-repository'; 4 | import { ParsedNode } from '../../src/parsers/node-parser'; 5 | import { TemplateWorkflow, TemplateNode, TemplateUser, TemplateDetail } from '../../src/templates/template-fetcher'; 6 | import * as fs from 'fs'; 7 | import * as path from 'path'; 8 | import { vi } from 'vitest'; 9 | 10 | /** 11 | * Database test utilities for n8n-mcp 12 | * Provides helpers for creating, seeding, and managing test databases 13 | */ 14 | 15 | export interface TestDatabaseOptions { 16 | /** 17 | * Use in-memory database (default: true) 18 | * When false, creates a temporary file database 19 | */ 20 | inMemory?: boolean; 21 | 22 | /** 23 | * Custom database path (only used when inMemory is false) 24 | */ 25 | dbPath?: string; 26 | 27 | /** 28 | * Initialize with schema (default: true) 29 | */ 30 | initSchema?: boolean; 31 | 32 | /** 33 | * Enable FTS5 support if available (default: false) 34 | */ 35 | enableFTS5?: boolean; 36 | } 37 | 38 | export interface TestDatabase { 39 | adapter: DatabaseAdapter; 40 | nodeRepository: NodeRepository; 41 | templateRepository: TemplateRepository; 42 | path: string; 43 | cleanup: () => Promise<void>; 44 | } 45 | 46 | export interface DatabaseSnapshot { 47 | nodes: any[]; 48 | templates: any[]; 49 | metadata: { 50 | createdAt: string; 51 | nodeCount: number; 52 | templateCount: number; 53 | }; 54 | } 55 | 56 | /** 57 | * Creates a test database with repositories 58 | */ 59 | export async function createTestDatabase(options: TestDatabaseOptions = {}): Promise<TestDatabase> { 60 | const { 61 | inMemory = true, 62 | dbPath, 63 | initSchema = true, 64 | enableFTS5 = false 65 | } = options; 66 | 67 | // Determine database path 68 | const finalPath = inMemory 69 | ? ':memory:' 70 | : dbPath || path.join(__dirname, `../temp/test-${Date.now()}.db`); 71 | 72 | // Ensure directory exists for file-based databases 73 | if (!inMemory) { 74 | const dir = path.dirname(finalPath); 75 | if (!fs.existsSync(dir)) { 76 | fs.mkdirSync(dir, { recursive: true }); 77 | } 78 | } 79 | 80 | // Create database adapter 81 | const adapter = await createDatabaseAdapter(finalPath); 82 | 83 | // Initialize schema if requested 84 | if (initSchema) { 85 | await initializeDatabaseSchema(adapter, enableFTS5); 86 | } 87 | 88 | // Create repositories 89 | const nodeRepository = new NodeRepository(adapter); 90 | const templateRepository = new TemplateRepository(adapter); 91 | 92 | // Cleanup function 93 | const cleanup = async () => { 94 | adapter.close(); 95 | if (!inMemory && fs.existsSync(finalPath)) { 96 | fs.unlinkSync(finalPath); 97 | } 98 | }; 99 | 100 | return { 101 | adapter, 102 | nodeRepository, 103 | templateRepository, 104 | path: finalPath, 105 | cleanup 106 | }; 107 | } 108 | 109 | /** 110 | * Initializes database schema from SQL file 111 | */ 112 | export async function initializeDatabaseSchema(adapter: DatabaseAdapter, enableFTS5 = false): Promise<void> { 113 | const schemaPath = path.join(__dirname, '../../src/database/schema.sql'); 114 | const schema = fs.readFileSync(schemaPath, 'utf-8'); 115 | 116 | // Execute main schema 117 | adapter.exec(schema); 118 | 119 | // Optionally initialize FTS5 tables 120 | if (enableFTS5 && adapter.checkFTS5Support()) { 121 | adapter.exec(` 122 | CREATE VIRTUAL TABLE IF NOT EXISTS templates_fts USING fts5( 123 | name, 124 | description, 125 | content='templates', 126 | content_rowid='id' 127 | ); 128 | 129 | -- Trigger to keep FTS index in sync 130 | CREATE TRIGGER IF NOT EXISTS templates_ai AFTER INSERT ON templates BEGIN 131 | INSERT INTO templates_fts(rowid, name, description) 132 | VALUES (new.id, new.name, new.description); 133 | END; 134 | 135 | CREATE TRIGGER IF NOT EXISTS templates_au AFTER UPDATE ON templates BEGIN 136 | UPDATE templates_fts 137 | SET name = new.name, description = new.description 138 | WHERE rowid = new.id; 139 | END; 140 | 141 | CREATE TRIGGER IF NOT EXISTS templates_ad AFTER DELETE ON templates BEGIN 142 | DELETE FROM templates_fts WHERE rowid = old.id; 143 | END; 144 | `); 145 | } 146 | } 147 | 148 | /** 149 | * Seeds test nodes into the database 150 | */ 151 | export async function seedTestNodes( 152 | nodeRepository: NodeRepository, 153 | nodes: Partial<ParsedNode>[] = [] 154 | ): Promise<ParsedNode[]> { 155 | const defaultNodes: ParsedNode[] = [ 156 | createTestNode({ 157 | nodeType: 'nodes-base.httpRequest', 158 | displayName: 'HTTP Request', 159 | description: 'Makes HTTP requests', 160 | category: 'Core Nodes', 161 | isAITool: true 162 | }), 163 | createTestNode({ 164 | nodeType: 'nodes-base.webhook', 165 | displayName: 'Webhook', 166 | description: 'Receives webhook calls', 167 | category: 'Core Nodes', 168 | isTrigger: true, 169 | isWebhook: true 170 | }), 171 | createTestNode({ 172 | nodeType: 'nodes-base.slack', 173 | displayName: 'Slack', 174 | description: 'Send messages to Slack', 175 | category: 'Communication', 176 | isAITool: true 177 | }) 178 | ]; 179 | 180 | const allNodes = [...defaultNodes, ...nodes.map(n => createTestNode(n))]; 181 | 182 | for (const node of allNodes) { 183 | nodeRepository.saveNode(node); 184 | } 185 | 186 | return allNodes; 187 | } 188 | 189 | /** 190 | * Seeds test templates into the database 191 | */ 192 | export async function seedTestTemplates( 193 | templateRepository: TemplateRepository, 194 | templates: Partial<TemplateWorkflow>[] = [] 195 | ): Promise<TemplateWorkflow[]> { 196 | const defaultTemplates: TemplateWorkflow[] = [ 197 | createTestTemplate({ 198 | id: 1, 199 | name: 'Simple HTTP Workflow', 200 | description: 'Basic HTTP request workflow', 201 | nodes: [{ id: 1, name: 'HTTP Request', icon: 'http' }] 202 | }), 203 | createTestTemplate({ 204 | id: 2, 205 | name: 'Webhook to Slack', 206 | description: 'Webhook that sends to Slack', 207 | nodes: [ 208 | { id: 1, name: 'Webhook', icon: 'webhook' }, 209 | { id: 2, name: 'Slack', icon: 'slack' } 210 | ] 211 | }) 212 | ]; 213 | 214 | const allTemplates = [...defaultTemplates, ...templates.map(t => createTestTemplate(t))]; 215 | 216 | for (const template of allTemplates) { 217 | // Convert to TemplateDetail format for saving 218 | const detail: TemplateDetail = { 219 | id: template.id, 220 | name: template.name, 221 | description: template.description, 222 | views: template.totalViews, 223 | createdAt: template.createdAt, 224 | workflow: { 225 | nodes: template.nodes?.map((n, i) => ({ 226 | id: `node_${i}`, 227 | name: n.name, 228 | type: `n8n-nodes-base.${n.name.toLowerCase()}`, 229 | position: [250 + i * 200, 300], 230 | parameters: {} 231 | })) || [], 232 | connections: {}, 233 | settings: {} 234 | } 235 | }; 236 | await templateRepository.saveTemplate(template, detail); 237 | } 238 | 239 | return allTemplates; 240 | } 241 | 242 | /** 243 | * Creates a test node with defaults 244 | */ 245 | export function createTestNode(overrides: Partial<ParsedNode> = {}): ParsedNode { 246 | return { 247 | style: 'programmatic', 248 | nodeType: 'nodes-base.test', 249 | displayName: 'Test Node', 250 | description: 'A test node', 251 | category: 'Test', 252 | properties: [], 253 | credentials: [], 254 | isAITool: false, 255 | isTrigger: false, 256 | isWebhook: false, 257 | operations: [], 258 | version: '1', 259 | isVersioned: false, 260 | packageName: 'n8n-nodes-base', 261 | documentation: undefined, 262 | ...overrides 263 | }; 264 | } 265 | 266 | /** 267 | * Creates a test template with defaults 268 | */ 269 | export function createTestTemplate(overrides: Partial<TemplateWorkflow> = {}): TemplateWorkflow { 270 | const id = overrides.id || Math.floor(Math.random() * 10000); 271 | return { 272 | id, 273 | name: `Test Template ${id}`, 274 | description: 'A test template', 275 | nodes: overrides.nodes || [], 276 | user: overrides.user || { 277 | id: 1, 278 | name: 'Test User', 279 | username: 'testuser', 280 | verified: false 281 | }, 282 | createdAt: overrides.createdAt || new Date().toISOString(), 283 | totalViews: overrides.totalViews || 100, 284 | ...overrides 285 | }; 286 | } 287 | 288 | /** 289 | * Resets database to clean state 290 | */ 291 | export async function resetDatabase(adapter: DatabaseAdapter): Promise<void> { 292 | // Drop all tables 293 | adapter.exec(` 294 | DROP TABLE IF EXISTS templates_fts; 295 | DROP TABLE IF EXISTS templates; 296 | DROP TABLE IF EXISTS nodes; 297 | `); 298 | 299 | // Reinitialize schema 300 | await initializeDatabaseSchema(adapter); 301 | } 302 | 303 | /** 304 | * Creates a database snapshot 305 | */ 306 | export async function createDatabaseSnapshot(adapter: DatabaseAdapter): Promise<DatabaseSnapshot> { 307 | const nodes = adapter.prepare('SELECT * FROM nodes').all(); 308 | const templates = adapter.prepare('SELECT * FROM templates').all(); 309 | 310 | return { 311 | nodes, 312 | templates, 313 | metadata: { 314 | createdAt: new Date().toISOString(), 315 | nodeCount: nodes.length, 316 | templateCount: templates.length 317 | } 318 | }; 319 | } 320 | 321 | /** 322 | * Restores database from snapshot 323 | */ 324 | export async function restoreDatabaseSnapshot( 325 | adapter: DatabaseAdapter, 326 | snapshot: DatabaseSnapshot 327 | ): Promise<void> { 328 | // Reset database first 329 | await resetDatabase(adapter); 330 | 331 | // Restore nodes 332 | const nodeStmt = adapter.prepare(` 333 | INSERT INTO nodes ( 334 | node_type, package_name, display_name, description, 335 | category, development_style, is_ai_tool, is_trigger, 336 | is_webhook, is_versioned, version, documentation, 337 | properties_schema, operations, credentials_required 338 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 339 | `); 340 | 341 | for (const node of snapshot.nodes) { 342 | nodeStmt.run( 343 | node.node_type, 344 | node.package_name, 345 | node.display_name, 346 | node.description, 347 | node.category, 348 | node.development_style, 349 | node.is_ai_tool, 350 | node.is_trigger, 351 | node.is_webhook, 352 | node.is_versioned, 353 | node.version, 354 | node.documentation, 355 | node.properties_schema, 356 | node.operations, 357 | node.credentials_required 358 | ); 359 | } 360 | 361 | // Restore templates 362 | const templateStmt = adapter.prepare(` 363 | INSERT INTO templates ( 364 | id, workflow_id, name, description, 365 | author_name, author_username, author_verified, 366 | nodes_used, workflow_json, categories, 367 | views, created_at, updated_at, url 368 | ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) 369 | `); 370 | 371 | for (const template of snapshot.templates) { 372 | templateStmt.run( 373 | template.id, 374 | template.workflow_id, 375 | template.name, 376 | template.description, 377 | template.author_name, 378 | template.author_username, 379 | template.author_verified, 380 | template.nodes_used, 381 | template.workflow_json, 382 | template.categories, 383 | template.views, 384 | template.created_at, 385 | template.updated_at, 386 | template.url 387 | ); 388 | } 389 | } 390 | 391 | /** 392 | * Loads JSON fixtures into database 393 | */ 394 | export async function loadFixtures( 395 | adapter: DatabaseAdapter, 396 | fixturePath: string 397 | ): Promise<void> { 398 | const fixtures = JSON.parse(fs.readFileSync(fixturePath, 'utf-8')); 399 | 400 | if (fixtures.nodes) { 401 | const nodeRepo = new NodeRepository(adapter); 402 | for (const node of fixtures.nodes) { 403 | nodeRepo.saveNode(node); 404 | } 405 | } 406 | 407 | if (fixtures.templates) { 408 | const templateRepo = new TemplateRepository(adapter); 409 | for (const template of fixtures.templates) { 410 | // Convert to proper format 411 | const detail: TemplateDetail = { 412 | id: template.id, 413 | name: template.name, 414 | description: template.description, 415 | views: template.views || template.totalViews || 0, 416 | createdAt: template.createdAt, 417 | workflow: template.workflow || { 418 | nodes: template.nodes?.map((n: any, i: number) => ({ 419 | id: `node_${i}`, 420 | name: n.name, 421 | type: `n8n-nodes-base.${n.name.toLowerCase()}`, 422 | position: [250 + i * 200, 300], 423 | parameters: {} 424 | })) || [], 425 | connections: {}, 426 | settings: {} 427 | } 428 | }; 429 | await templateRepo.saveTemplate(template, detail); 430 | } 431 | } 432 | } 433 | 434 | /** 435 | * Database test helpers for common operations 436 | */ 437 | export const dbHelpers = { 438 | /** 439 | * Counts rows in a table 440 | */ 441 | countRows(adapter: DatabaseAdapter, table: string): number { 442 | const result = adapter.prepare(`SELECT COUNT(*) as count FROM ${table}`).get() as { count: number }; 443 | return result.count; 444 | }, 445 | 446 | /** 447 | * Checks if a node exists 448 | */ 449 | nodeExists(adapter: DatabaseAdapter, nodeType: string): boolean { 450 | const result = adapter.prepare('SELECT 1 FROM nodes WHERE node_type = ?').get(nodeType); 451 | return !!result; 452 | }, 453 | 454 | /** 455 | * Gets all node types 456 | */ 457 | getAllNodeTypes(adapter: DatabaseAdapter): string[] { 458 | const rows = adapter.prepare('SELECT node_type FROM nodes').all() as { node_type: string }[]; 459 | return rows.map(r => r.node_type); 460 | }, 461 | 462 | /** 463 | * Clears a specific table 464 | */ 465 | clearTable(adapter: DatabaseAdapter, table: string): void { 466 | adapter.exec(`DELETE FROM ${table}`); 467 | }, 468 | 469 | /** 470 | * Executes raw SQL 471 | */ 472 | executeSql(adapter: DatabaseAdapter, sql: string): void { 473 | adapter.exec(sql); 474 | } 475 | }; 476 | 477 | /** 478 | * Creates a mock database adapter for unit tests 479 | */ 480 | export function createMockDatabaseAdapter(): DatabaseAdapter { 481 | const mockDb = { 482 | prepare: vi.fn(), 483 | exec: vi.fn(), 484 | close: vi.fn(), 485 | pragma: vi.fn(), 486 | inTransaction: false, 487 | transaction: vi.fn((fn) => fn()), 488 | checkFTS5Support: vi.fn(() => false) 489 | }; 490 | 491 | return mockDb as unknown as DatabaseAdapter; 492 | } 493 | 494 | /** 495 | * Transaction test helper 496 | * Note: better-sqlite3 transactions are synchronous 497 | */ 498 | export async function withTransaction<T>( 499 | adapter: DatabaseAdapter, 500 | fn: () => Promise<T> 501 | ): Promise<T | null> { 502 | try { 503 | adapter.exec('BEGIN'); 504 | const result = await fn(); 505 | // Always rollback for testing 506 | adapter.exec('ROLLBACK'); 507 | return null; // Indicate rollback happened 508 | } catch (error) { 509 | adapter.exec('ROLLBACK'); 510 | throw error; 511 | } 512 | } 513 | 514 | /** 515 | * Performance test helper 516 | */ 517 | export async function measureDatabaseOperation( 518 | name: string, 519 | operation: () => Promise<void> 520 | ): Promise<number> { 521 | const start = performance.now(); 522 | await operation(); 523 | const duration = performance.now() - start; 524 | console.log(`[DB Performance] ${name}: ${duration.toFixed(2)}ms`); 525 | return duration; 526 | } ``` -------------------------------------------------------------------------------- /tests/integration/ai-validation/ai-agent-validation.test.ts: -------------------------------------------------------------------------------- ```typescript 1 | /** 2 | * Integration Tests: AI Agent Validation 3 | * 4 | * Tests AI Agent validation against real n8n instance. 5 | * These tests validate the fixes from v2.17.0 including node type normalization. 6 | */ 7 | 8 | import { describe, it, expect, beforeEach, afterEach, afterAll } from 'vitest'; 9 | import { createTestContext, TestContext, createTestWorkflowName } from '../n8n-api/utils/test-context'; 10 | import { getTestN8nClient } from '../n8n-api/utils/n8n-client'; 11 | import { N8nApiClient } from '../../../src/services/n8n-api-client'; 12 | import { cleanupOrphanedWorkflows } from '../n8n-api/utils/cleanup-helpers'; 13 | import { createMcpContext } from '../n8n-api/utils/mcp-context'; 14 | import { InstanceContext } from '../../../src/types/instance-context'; 15 | import { handleValidateWorkflow } from '../../../src/mcp/handlers-n8n-manager'; 16 | import { getNodeRepository, closeNodeRepository } from '../n8n-api/utils/node-repository'; 17 | import { NodeRepository } from '../../../src/database/node-repository'; 18 | import { ValidationResponse } from '../n8n-api/types/mcp-responses'; 19 | import { 20 | createAIAgentNode, 21 | createChatTriggerNode, 22 | createLanguageModelNode, 23 | createHTTPRequestToolNode, 24 | createCodeToolNode, 25 | createMemoryNode, 26 | createRespondNode, 27 | createAIConnection, 28 | createMainConnection, 29 | mergeConnections, 30 | createAIWorkflow 31 | } from './helpers'; 32 | 33 | describe('Integration: AI Agent Validation', () => { 34 | let context: TestContext; 35 | let client: N8nApiClient; 36 | let mcpContext: InstanceContext; 37 | let repository: NodeRepository; 38 | 39 | beforeEach(async () => { 40 | context = createTestContext(); 41 | client = getTestN8nClient(); 42 | mcpContext = createMcpContext(); 43 | repository = await getNodeRepository(); 44 | }); 45 | 46 | afterEach(async () => { 47 | await context.cleanup(); 48 | }); 49 | 50 | afterAll(async () => { 51 | await closeNodeRepository(); 52 | if (!process.env.CI) { 53 | await cleanupOrphanedWorkflows(); 54 | } 55 | }); 56 | 57 | // ====================================================================== 58 | // TEST 1: Missing Language Model 59 | // ====================================================================== 60 | 61 | it('should detect missing language model in real workflow', async () => { 62 | const agent = createAIAgentNode({ 63 | name: 'AI Agent', 64 | text: 'Test prompt' 65 | }); 66 | 67 | const workflow = createAIWorkflow( 68 | [agent], 69 | {}, 70 | { 71 | name: createTestWorkflowName('AI Agent - Missing Model'), 72 | tags: ['mcp-integration-test', 'ai-validation'] 73 | } 74 | ); 75 | 76 | const created = await client.createWorkflow(workflow); 77 | context.trackWorkflow(created.id!); 78 | 79 | const response = await handleValidateWorkflow( 80 | { id: created.id }, 81 | repository, 82 | mcpContext 83 | ); 84 | 85 | expect(response.success).toBe(true); 86 | const data = response.data as ValidationResponse; 87 | 88 | expect(data.valid).toBe(false); 89 | expect(data.errors).toBeDefined(); 90 | expect(data.errors!.length).toBeGreaterThan(0); 91 | 92 | const errorCodes = data.errors!.map(e => e.details?.code || e.code); 93 | expect(errorCodes).toContain('MISSING_LANGUAGE_MODEL'); 94 | 95 | const errorMessages = data.errors!.map(e => e.message).join(' '); 96 | expect(errorMessages).toMatch(/language model|ai_languageModel/i); 97 | }); 98 | 99 | // ====================================================================== 100 | // TEST 2: Valid AI Agent with Language Model 101 | // ====================================================================== 102 | 103 | it('should validate AI Agent with language model', async () => { 104 | const languageModel = createLanguageModelNode('openai', { 105 | name: 'OpenAI Chat Model' 106 | }); 107 | 108 | const agent = createAIAgentNode({ 109 | name: 'AI Agent', 110 | text: 'You are a helpful assistant' 111 | }); 112 | 113 | const workflow = createAIWorkflow( 114 | [languageModel, agent], 115 | mergeConnections( 116 | createAIConnection('OpenAI Chat Model', 'AI Agent', 'ai_languageModel') 117 | ), 118 | { 119 | name: createTestWorkflowName('AI Agent - Valid'), 120 | tags: ['mcp-integration-test', 'ai-validation'] 121 | } 122 | ); 123 | 124 | const created = await client.createWorkflow(workflow); 125 | context.trackWorkflow(created.id!); 126 | 127 | const response = await handleValidateWorkflow( 128 | { id: created.id }, 129 | repository, 130 | mcpContext 131 | ); 132 | 133 | expect(response.success).toBe(true); 134 | const data = response.data as ValidationResponse; 135 | 136 | expect(data.valid).toBe(true); 137 | expect(data.errors).toBeUndefined(); 138 | expect(data.summary.errorCount).toBe(0); 139 | }); 140 | 141 | // ====================================================================== 142 | // TEST 3: Tool Connections Detection 143 | // ====================================================================== 144 | 145 | it('should detect tool connections correctly', async () => { 146 | const languageModel = createLanguageModelNode('openai', { 147 | name: 'OpenAI Chat Model' 148 | }); 149 | 150 | const httpTool = createHTTPRequestToolNode({ 151 | name: 'HTTP Request Tool', 152 | toolDescription: 'Fetches weather data from API', 153 | url: 'https://api.weather.com/current', 154 | method: 'GET' 155 | }); 156 | 157 | const agent = createAIAgentNode({ 158 | name: 'AI Agent', 159 | text: 'You are a weather assistant' 160 | }); 161 | 162 | const workflow = createAIWorkflow( 163 | [languageModel, httpTool, agent], 164 | mergeConnections( 165 | createAIConnection('OpenAI Chat Model', 'AI Agent', 'ai_languageModel'), 166 | createAIConnection('HTTP Request Tool', 'AI Agent', 'ai_tool') 167 | ), 168 | { 169 | name: createTestWorkflowName('AI Agent - With Tool'), 170 | tags: ['mcp-integration-test', 'ai-validation'] 171 | } 172 | ); 173 | 174 | const created = await client.createWorkflow(workflow); 175 | context.trackWorkflow(created.id!); 176 | 177 | const response = await handleValidateWorkflow( 178 | { id: created.id }, 179 | repository, 180 | mcpContext 181 | ); 182 | 183 | expect(response.success).toBe(true); 184 | const data = response.data as ValidationResponse; 185 | 186 | expect(data.valid).toBe(true); 187 | 188 | // Should NOT have false "no tools" warning 189 | if (data.warnings) { 190 | const toolWarnings = data.warnings.filter(w => 191 | w.message.toLowerCase().includes('no ai_tool') 192 | ); 193 | expect(toolWarnings.length).toBe(0); 194 | } 195 | }); 196 | 197 | // ====================================================================== 198 | // TEST 4: Streaming Mode Constraints (Chat Trigger) 199 | // ====================================================================== 200 | 201 | it('should validate streaming mode constraints', async () => { 202 | const chatTrigger = createChatTriggerNode({ 203 | name: 'Chat Trigger', 204 | responseMode: 'streaming' 205 | }); 206 | 207 | const languageModel = createLanguageModelNode('openai', { 208 | name: 'OpenAI Chat Model' 209 | }); 210 | 211 | const agent = createAIAgentNode({ 212 | name: 'AI Agent', 213 | text: 'You are a helpful assistant' 214 | }); 215 | 216 | const respond = createRespondNode({ 217 | name: 'Respond to Webhook' 218 | }); 219 | 220 | const workflow = createAIWorkflow( 221 | [chatTrigger, languageModel, agent, respond], 222 | mergeConnections( 223 | createMainConnection('Chat Trigger', 'AI Agent'), 224 | createAIConnection('OpenAI Chat Model', 'AI Agent', 'ai_languageModel'), 225 | createMainConnection('AI Agent', 'Respond to Webhook') // ERROR: streaming with main output 226 | ), 227 | { 228 | name: createTestWorkflowName('AI Agent - Streaming Error'), 229 | tags: ['mcp-integration-test', 'ai-validation'] 230 | } 231 | ); 232 | 233 | const created = await client.createWorkflow(workflow); 234 | context.trackWorkflow(created.id!); 235 | 236 | const response = await handleValidateWorkflow( 237 | { id: created.id }, 238 | repository, 239 | mcpContext 240 | ); 241 | 242 | expect(response.success).toBe(true); 243 | const data = response.data as ValidationResponse; 244 | 245 | expect(data.valid).toBe(false); 246 | expect(data.errors).toBeDefined(); 247 | 248 | const streamingErrors = data.errors!.filter(e => { 249 | const code = e.details?.code || e.code; 250 | return code === 'STREAMING_WITH_MAIN_OUTPUT' || 251 | code === 'STREAMING_AGENT_HAS_OUTPUT'; 252 | }); 253 | expect(streamingErrors.length).toBeGreaterThan(0); 254 | }); 255 | 256 | // ====================================================================== 257 | // TEST 5: AI Agent Own streamResponse Setting 258 | // ====================================================================== 259 | 260 | it('should validate AI Agent own streamResponse setting', async () => { 261 | const languageModel = createLanguageModelNode('openai', { 262 | name: 'OpenAI Chat Model' 263 | }); 264 | 265 | const agent = createAIAgentNode({ 266 | name: 'AI Agent', 267 | text: 'You are a helpful assistant', 268 | streamResponse: true // Agent has its own streaming enabled 269 | }); 270 | 271 | const respond = createRespondNode({ 272 | name: 'Respond to Webhook' 273 | }); 274 | 275 | const workflow = createAIWorkflow( 276 | [languageModel, agent, respond], 277 | mergeConnections( 278 | createAIConnection('OpenAI Chat Model', 'AI Agent', 'ai_languageModel'), 279 | createMainConnection('AI Agent', 'Respond to Webhook') // ERROR: streaming with main output 280 | ), 281 | { 282 | name: createTestWorkflowName('AI Agent - Own Streaming'), 283 | tags: ['mcp-integration-test', 'ai-validation'] 284 | } 285 | ); 286 | 287 | const created = await client.createWorkflow(workflow); 288 | context.trackWorkflow(created.id!); 289 | 290 | const response = await handleValidateWorkflow( 291 | { id: created.id }, 292 | repository, 293 | mcpContext 294 | ); 295 | 296 | expect(response.success).toBe(true); 297 | const data = response.data as ValidationResponse; 298 | 299 | expect(data.valid).toBe(false); 300 | expect(data.errors).toBeDefined(); 301 | 302 | const errorCodes = data.errors!.map(e => e.details?.code || e.code); 303 | expect(errorCodes).toContain('STREAMING_WITH_MAIN_OUTPUT'); 304 | }); 305 | 306 | // ====================================================================== 307 | // TEST 6: Multiple Memory Connections 308 | // ====================================================================== 309 | 310 | it('should validate memory connections', async () => { 311 | const languageModel = createLanguageModelNode('openai', { 312 | name: 'OpenAI Chat Model' 313 | }); 314 | 315 | const memory1 = createMemoryNode({ 316 | name: 'Memory 1' 317 | }); 318 | 319 | const memory2 = createMemoryNode({ 320 | name: 'Memory 2' 321 | }); 322 | 323 | const agent = createAIAgentNode({ 324 | name: 'AI Agent', 325 | text: 'You are a helpful assistant' 326 | }); 327 | 328 | const workflow = createAIWorkflow( 329 | [languageModel, memory1, memory2, agent], 330 | mergeConnections( 331 | createAIConnection('OpenAI Chat Model', 'AI Agent', 'ai_languageModel'), 332 | createAIConnection('Memory 1', 'AI Agent', 'ai_memory'), 333 | createAIConnection('Memory 2', 'AI Agent', 'ai_memory') // ERROR: multiple memory 334 | ), 335 | { 336 | name: createTestWorkflowName('AI Agent - Multiple Memory'), 337 | tags: ['mcp-integration-test', 'ai-validation'] 338 | } 339 | ); 340 | 341 | const created = await client.createWorkflow(workflow); 342 | context.trackWorkflow(created.id!); 343 | 344 | const response = await handleValidateWorkflow( 345 | { id: created.id }, 346 | repository, 347 | mcpContext 348 | ); 349 | 350 | expect(response.success).toBe(true); 351 | const data = response.data as ValidationResponse; 352 | 353 | expect(data.valid).toBe(false); 354 | expect(data.errors).toBeDefined(); 355 | 356 | const errorCodes = data.errors!.map(e => e.details?.code || e.code); 357 | expect(errorCodes).toContain('MULTIPLE_MEMORY_CONNECTIONS'); 358 | }); 359 | 360 | // ====================================================================== 361 | // TEST 7: Complete AI Workflow (All Components) 362 | // ====================================================================== 363 | 364 | it('should validate complete AI workflow', async () => { 365 | const chatTrigger = createChatTriggerNode({ 366 | name: 'Chat Trigger', 367 | responseMode: 'lastNode' // Not streaming 368 | }); 369 | 370 | const languageModel = createLanguageModelNode('openai', { 371 | name: 'OpenAI Chat Model' 372 | }); 373 | 374 | const httpTool = createHTTPRequestToolNode({ 375 | name: 'HTTP Request Tool', 376 | toolDescription: 'Fetches data from external API', 377 | url: 'https://api.example.com/data', 378 | method: 'GET' 379 | }); 380 | 381 | const codeTool = createCodeToolNode({ 382 | name: 'Code Tool', 383 | toolDescription: 'Processes data with custom logic', 384 | code: 'return { result: "processed" };' 385 | }); 386 | 387 | const memory = createMemoryNode({ 388 | name: 'Window Buffer Memory', 389 | contextWindowLength: 5 390 | }); 391 | 392 | const agent = createAIAgentNode({ 393 | name: 'AI Agent', 394 | promptType: 'define', 395 | text: 'You are a helpful assistant with access to tools', 396 | systemMessage: 'You are an AI assistant that helps users with data processing and external API calls.' 397 | }); 398 | 399 | const respond = createRespondNode({ 400 | name: 'Respond to Webhook' 401 | }); 402 | 403 | const workflow = createAIWorkflow( 404 | [chatTrigger, languageModel, httpTool, codeTool, memory, agent, respond], 405 | mergeConnections( 406 | createMainConnection('Chat Trigger', 'AI Agent'), 407 | createAIConnection('OpenAI Chat Model', 'AI Agent', 'ai_languageModel'), 408 | createAIConnection('HTTP Request Tool', 'AI Agent', 'ai_tool'), 409 | createAIConnection('Code Tool', 'AI Agent', 'ai_tool'), 410 | createAIConnection('Window Buffer Memory', 'AI Agent', 'ai_memory'), 411 | createMainConnection('AI Agent', 'Respond to Webhook') 412 | ), 413 | { 414 | name: createTestWorkflowName('AI Agent - Complete Workflow'), 415 | tags: ['mcp-integration-test', 'ai-validation'] 416 | } 417 | ); 418 | 419 | const created = await client.createWorkflow(workflow); 420 | context.trackWorkflow(created.id!); 421 | 422 | const response = await handleValidateWorkflow( 423 | { id: created.id }, 424 | repository, 425 | mcpContext 426 | ); 427 | 428 | expect(response.success).toBe(true); 429 | const data = response.data as ValidationResponse; 430 | 431 | expect(data.valid).toBe(true); 432 | expect(data.errors).toBeUndefined(); 433 | expect(data.summary.errorCount).toBe(0); 434 | }); 435 | }); 436 | ```