#
tokens: 49462/50000 25/614 files (page 7/45)
lines: off (toggle) GitHub
raw markdown copy
This is page 7 of 45. Use http://codebase.md/czlonkowski/n8n-mcp?page={x} to view the full context.

# Directory Structure

```
├── _config.yml
├── .claude
│   └── agents
│       ├── code-reviewer.md
│       ├── context-manager.md
│       ├── debugger.md
│       ├── deployment-engineer.md
│       ├── mcp-backend-engineer.md
│       ├── n8n-mcp-tester.md
│       ├── technical-researcher.md
│       └── test-automator.md
├── .dockerignore
├── .env.docker
├── .env.example
├── .env.n8n.example
├── .env.test
├── .env.test.example
├── .github
│   ├── ABOUT.md
│   ├── BENCHMARK_THRESHOLDS.md
│   ├── FUNDING.yml
│   ├── gh-pages.yml
│   ├── secret_scanning.yml
│   └── workflows
│       ├── benchmark-pr.yml
│       ├── benchmark.yml
│       ├── docker-build-fast.yml
│       ├── docker-build-n8n.yml
│       ├── docker-build.yml
│       ├── release.yml
│       ├── test.yml
│       └── update-n8n-deps.yml
├── .gitignore
├── .npmignore
├── ATTRIBUTION.md
├── CHANGELOG.md
├── CLAUDE.md
├── codecov.yml
├── coverage.json
├── data
│   ├── .gitkeep
│   ├── nodes.db
│   ├── nodes.db-shm
│   ├── nodes.db-wal
│   └── templates.db
├── deploy
│   └── quick-deploy-n8n.sh
├── docker
│   ├── docker-entrypoint.sh
│   ├── n8n-mcp
│   ├── parse-config.js
│   └── README.md
├── docker-compose.buildkit.yml
├── docker-compose.extract.yml
├── docker-compose.n8n.yml
├── docker-compose.override.yml.example
├── docker-compose.test-n8n.yml
├── docker-compose.yml
├── Dockerfile
├── Dockerfile.railway
├── Dockerfile.test
├── docs
│   ├── AUTOMATED_RELEASES.md
│   ├── BENCHMARKS.md
│   ├── CHANGELOG.md
│   ├── CLAUDE_CODE_SETUP.md
│   ├── CLAUDE_INTERVIEW.md
│   ├── CODECOV_SETUP.md
│   ├── CODEX_SETUP.md
│   ├── CURSOR_SETUP.md
│   ├── DEPENDENCY_UPDATES.md
│   ├── DOCKER_README.md
│   ├── DOCKER_TROUBLESHOOTING.md
│   ├── FINAL_AI_VALIDATION_SPEC.md
│   ├── FLEXIBLE_INSTANCE_CONFIGURATION.md
│   ├── HTTP_DEPLOYMENT.md
│   ├── img
│   │   ├── cc_command.png
│   │   ├── cc_connected.png
│   │   ├── codex_connected.png
│   │   ├── cursor_tut.png
│   │   ├── Railway_api.png
│   │   ├── Railway_server_address.png
│   │   ├── vsc_ghcp_chat_agent_mode.png
│   │   ├── vsc_ghcp_chat_instruction_files.png
│   │   ├── vsc_ghcp_chat_thinking_tool.png
│   │   └── windsurf_tut.png
│   ├── INSTALLATION.md
│   ├── LIBRARY_USAGE.md
│   ├── local
│   │   ├── DEEP_DIVE_ANALYSIS_2025-10-02.md
│   │   ├── DEEP_DIVE_ANALYSIS_README.md
│   │   ├── Deep_dive_p1_p2.md
│   │   ├── integration-testing-plan.md
│   │   ├── integration-tests-phase1-summary.md
│   │   ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
│   │   ├── P0_IMPLEMENTATION_PLAN.md
│   │   └── TEMPLATE_MINING_ANALYSIS.md
│   ├── MCP_ESSENTIALS_README.md
│   ├── MCP_QUICK_START_GUIDE.md
│   ├── N8N_DEPLOYMENT.md
│   ├── RAILWAY_DEPLOYMENT.md
│   ├── README_CLAUDE_SETUP.md
│   ├── README.md
│   ├── tools-documentation-usage.md
│   ├── VS_CODE_PROJECT_SETUP.md
│   ├── WINDSURF_SETUP.md
│   └── workflow-diff-examples.md
├── examples
│   └── enhanced-documentation-demo.js
├── fetch_log.txt
├── LICENSE
├── MEMORY_N8N_UPDATE.md
├── MEMORY_TEMPLATE_UPDATE.md
├── monitor_fetch.sh
├── N8N_HTTP_STREAMABLE_SETUP.md
├── n8n-nodes.db
├── P0-R3-TEST-PLAN.md
├── package-lock.json
├── package.json
├── package.runtime.json
├── PRIVACY.md
├── railway.json
├── README.md
├── renovate.json
├── scripts
│   ├── analyze-optimization.sh
│   ├── audit-schema-coverage.ts
│   ├── build-optimized.sh
│   ├── compare-benchmarks.js
│   ├── demo-optimization.sh
│   ├── deploy-http.sh
│   ├── deploy-to-vm.sh
│   ├── export-webhook-workflows.ts
│   ├── extract-changelog.js
│   ├── extract-from-docker.js
│   ├── extract-nodes-docker.sh
│   ├── extract-nodes-simple.sh
│   ├── format-benchmark-results.js
│   ├── generate-benchmark-stub.js
│   ├── generate-detailed-reports.js
│   ├── generate-test-summary.js
│   ├── http-bridge.js
│   ├── mcp-http-client.js
│   ├── migrate-nodes-fts.ts
│   ├── migrate-tool-docs.ts
│   ├── n8n-docs-mcp.service
│   ├── nginx-n8n-mcp.conf
│   ├── prebuild-fts5.ts
│   ├── prepare-release.js
│   ├── publish-npm-quick.sh
│   ├── publish-npm.sh
│   ├── quick-test.ts
│   ├── run-benchmarks-ci.js
│   ├── sync-runtime-version.js
│   ├── test-ai-validation-debug.ts
│   ├── test-code-node-enhancements.ts
│   ├── test-code-node-fixes.ts
│   ├── test-docker-config.sh
│   ├── test-docker-fingerprint.ts
│   ├── test-docker-optimization.sh
│   ├── test-docker.sh
│   ├── test-empty-connection-validation.ts
│   ├── test-error-message-tracking.ts
│   ├── test-error-output-validation.ts
│   ├── test-error-validation.js
│   ├── test-essentials.ts
│   ├── test-expression-code-validation.ts
│   ├── test-expression-format-validation.js
│   ├── test-fts5-search.ts
│   ├── test-fuzzy-fix.ts
│   ├── test-fuzzy-simple.ts
│   ├── test-helpers-validation.ts
│   ├── test-http-search.ts
│   ├── test-http.sh
│   ├── test-jmespath-validation.ts
│   ├── test-multi-tenant-simple.ts
│   ├── test-multi-tenant.ts
│   ├── test-n8n-integration.sh
│   ├── test-node-info.js
│   ├── test-node-type-validation.ts
│   ├── test-nodes-base-prefix.ts
│   ├── test-operation-validation.ts
│   ├── test-optimized-docker.sh
│   ├── test-release-automation.js
│   ├── test-search-improvements.ts
│   ├── test-security.ts
│   ├── test-single-session.sh
│   ├── test-sqljs-triggers.ts
│   ├── test-telemetry-debug.ts
│   ├── test-telemetry-direct.ts
│   ├── test-telemetry-env.ts
│   ├── test-telemetry-integration.ts
│   ├── test-telemetry-no-select.ts
│   ├── test-telemetry-security.ts
│   ├── test-telemetry-simple.ts
│   ├── test-typeversion-validation.ts
│   ├── test-url-configuration.ts
│   ├── test-user-id-persistence.ts
│   ├── test-webhook-validation.ts
│   ├── test-workflow-insert.ts
│   ├── test-workflow-sanitizer.ts
│   ├── test-workflow-tracking-debug.ts
│   ├── update-and-publish-prep.sh
│   ├── update-n8n-deps.js
│   ├── update-readme-version.js
│   ├── vitest-benchmark-json-reporter.js
│   └── vitest-benchmark-reporter.ts
├── SECURITY.md
├── src
│   ├── config
│   │   └── n8n-api.ts
│   ├── data
│   │   └── canonical-ai-tool-examples.json
│   ├── database
│   │   ├── database-adapter.ts
│   │   ├── migrations
│   │   │   └── add-template-node-configs.sql
│   │   ├── node-repository.ts
│   │   ├── nodes.db
│   │   ├── schema-optimized.sql
│   │   └── schema.sql
│   ├── errors
│   │   └── validation-service-error.ts
│   ├── http-server-single-session.ts
│   ├── http-server.ts
│   ├── index.ts
│   ├── loaders
│   │   └── node-loader.ts
│   ├── mappers
│   │   └── docs-mapper.ts
│   ├── mcp
│   │   ├── handlers-n8n-manager.ts
│   │   ├── handlers-workflow-diff.ts
│   │   ├── index.ts
│   │   ├── server.ts
│   │   ├── stdio-wrapper.ts
│   │   ├── tool-docs
│   │   │   ├── configuration
│   │   │   │   ├── get-node-as-tool-info.ts
│   │   │   │   ├── get-node-documentation.ts
│   │   │   │   ├── get-node-essentials.ts
│   │   │   │   ├── get-node-info.ts
│   │   │   │   ├── get-property-dependencies.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── search-node-properties.ts
│   │   │   ├── discovery
│   │   │   │   ├── get-database-statistics.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-ai-tools.ts
│   │   │   │   ├── list-nodes.ts
│   │   │   │   └── search-nodes.ts
│   │   │   ├── guides
│   │   │   │   ├── ai-agents-guide.ts
│   │   │   │   └── index.ts
│   │   │   ├── index.ts
│   │   │   ├── system
│   │   │   │   ├── index.ts
│   │   │   │   ├── n8n-diagnostic.ts
│   │   │   │   ├── n8n-health-check.ts
│   │   │   │   ├── n8n-list-available-tools.ts
│   │   │   │   └── tools-documentation.ts
│   │   │   ├── templates
│   │   │   │   ├── get-template.ts
│   │   │   │   ├── get-templates-for-task.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-node-templates.ts
│   │   │   │   ├── list-tasks.ts
│   │   │   │   ├── search-templates-by-metadata.ts
│   │   │   │   └── search-templates.ts
│   │   │   ├── types.ts
│   │   │   ├── validation
│   │   │   │   ├── index.ts
│   │   │   │   ├── validate-node-minimal.ts
│   │   │   │   ├── validate-node-operation.ts
│   │   │   │   ├── validate-workflow-connections.ts
│   │   │   │   ├── validate-workflow-expressions.ts
│   │   │   │   └── validate-workflow.ts
│   │   │   └── workflow_management
│   │   │       ├── index.ts
│   │   │       ├── n8n-autofix-workflow.ts
│   │   │       ├── n8n-create-workflow.ts
│   │   │       ├── n8n-delete-execution.ts
│   │   │       ├── n8n-delete-workflow.ts
│   │   │       ├── n8n-get-execution.ts
│   │   │       ├── n8n-get-workflow-details.ts
│   │   │       ├── n8n-get-workflow-minimal.ts
│   │   │       ├── n8n-get-workflow-structure.ts
│   │   │       ├── n8n-get-workflow.ts
│   │   │       ├── n8n-list-executions.ts
│   │   │       ├── n8n-list-workflows.ts
│   │   │       ├── n8n-trigger-webhook-workflow.ts
│   │   │       ├── n8n-update-full-workflow.ts
│   │   │       ├── n8n-update-partial-workflow.ts
│   │   │       └── n8n-validate-workflow.ts
│   │   ├── tools-documentation.ts
│   │   ├── tools-n8n-friendly.ts
│   │   ├── tools-n8n-manager.ts
│   │   ├── tools.ts
│   │   └── workflow-examples.ts
│   ├── mcp-engine.ts
│   ├── mcp-tools-engine.ts
│   ├── n8n
│   │   ├── MCPApi.credentials.ts
│   │   └── MCPNode.node.ts
│   ├── parsers
│   │   ├── node-parser.ts
│   │   ├── property-extractor.ts
│   │   └── simple-parser.ts
│   ├── scripts
│   │   ├── debug-http-search.ts
│   │   ├── extract-from-docker.ts
│   │   ├── fetch-templates-robust.ts
│   │   ├── fetch-templates.ts
│   │   ├── rebuild-database.ts
│   │   ├── rebuild-optimized.ts
│   │   ├── rebuild.ts
│   │   ├── sanitize-templates.ts
│   │   ├── seed-canonical-ai-examples.ts
│   │   ├── test-autofix-documentation.ts
│   │   ├── test-autofix-workflow.ts
│   │   ├── test-execution-filtering.ts
│   │   ├── test-node-suggestions.ts
│   │   ├── test-protocol-negotiation.ts
│   │   ├── test-summary.ts
│   │   ├── test-webhook-autofix.ts
│   │   ├── validate.ts
│   │   └── validation-summary.ts
│   ├── services
│   │   ├── ai-node-validator.ts
│   │   ├── ai-tool-validators.ts
│   │   ├── confidence-scorer.ts
│   │   ├── config-validator.ts
│   │   ├── enhanced-config-validator.ts
│   │   ├── example-generator.ts
│   │   ├── execution-processor.ts
│   │   ├── expression-format-validator.ts
│   │   ├── expression-validator.ts
│   │   ├── n8n-api-client.ts
│   │   ├── n8n-validation.ts
│   │   ├── node-documentation-service.ts
│   │   ├── node-similarity-service.ts
│   │   ├── node-specific-validators.ts
│   │   ├── operation-similarity-service.ts
│   │   ├── property-dependencies.ts
│   │   ├── property-filter.ts
│   │   ├── resource-similarity-service.ts
│   │   ├── sqlite-storage-service.ts
│   │   ├── task-templates.ts
│   │   ├── universal-expression-validator.ts
│   │   ├── workflow-auto-fixer.ts
│   │   ├── workflow-diff-engine.ts
│   │   └── workflow-validator.ts
│   ├── telemetry
│   │   ├── batch-processor.ts
│   │   ├── config-manager.ts
│   │   ├── early-error-logger.ts
│   │   ├── error-sanitization-utils.ts
│   │   ├── error-sanitizer.ts
│   │   ├── event-tracker.ts
│   │   ├── event-validator.ts
│   │   ├── index.ts
│   │   ├── performance-monitor.ts
│   │   ├── rate-limiter.ts
│   │   ├── startup-checkpoints.ts
│   │   ├── telemetry-error.ts
│   │   ├── telemetry-manager.ts
│   │   ├── telemetry-types.ts
│   │   └── workflow-sanitizer.ts
│   ├── templates
│   │   ├── batch-processor.ts
│   │   ├── metadata-generator.ts
│   │   ├── README.md
│   │   ├── template-fetcher.ts
│   │   ├── template-repository.ts
│   │   └── template-service.ts
│   ├── types
│   │   ├── index.ts
│   │   ├── instance-context.ts
│   │   ├── n8n-api.ts
│   │   ├── node-types.ts
│   │   └── workflow-diff.ts
│   └── utils
│       ├── auth.ts
│       ├── bridge.ts
│       ├── cache-utils.ts
│       ├── console-manager.ts
│       ├── documentation-fetcher.ts
│       ├── enhanced-documentation-fetcher.ts
│       ├── error-handler.ts
│       ├── example-generator.ts
│       ├── fixed-collection-validator.ts
│       ├── logger.ts
│       ├── mcp-client.ts
│       ├── n8n-errors.ts
│       ├── node-source-extractor.ts
│       ├── node-type-normalizer.ts
│       ├── node-type-utils.ts
│       ├── node-utils.ts
│       ├── npm-version-checker.ts
│       ├── protocol-version.ts
│       ├── simple-cache.ts
│       ├── ssrf-protection.ts
│       ├── template-node-resolver.ts
│       ├── template-sanitizer.ts
│       ├── url-detector.ts
│       ├── validation-schemas.ts
│       └── version.ts
├── test-output.txt
├── test-reinit-fix.sh
├── tests
│   ├── __snapshots__
│   │   └── .gitkeep
│   ├── auth.test.ts
│   ├── benchmarks
│   │   ├── database-queries.bench.ts
│   │   ├── index.ts
│   │   ├── mcp-tools.bench.ts
│   │   ├── mcp-tools.bench.ts.disabled
│   │   ├── mcp-tools.bench.ts.skip
│   │   ├── node-loading.bench.ts.disabled
│   │   ├── README.md
│   │   ├── search-operations.bench.ts.disabled
│   │   └── validation-performance.bench.ts.disabled
│   ├── bridge.test.ts
│   ├── comprehensive-extraction-test.js
│   ├── data
│   │   └── .gitkeep
│   ├── debug-slack-doc.js
│   ├── demo-enhanced-documentation.js
│   ├── docker-tests-README.md
│   ├── error-handler.test.ts
│   ├── examples
│   │   └── using-database-utils.test.ts
│   ├── extracted-nodes-db
│   │   ├── database-import.json
│   │   ├── extraction-report.json
│   │   ├── insert-nodes.sql
│   │   ├── n8n-nodes-base__Airtable.json
│   │   ├── n8n-nodes-base__Discord.json
│   │   ├── n8n-nodes-base__Function.json
│   │   ├── n8n-nodes-base__HttpRequest.json
│   │   ├── n8n-nodes-base__If.json
│   │   ├── n8n-nodes-base__Slack.json
│   │   ├── n8n-nodes-base__SplitInBatches.json
│   │   └── n8n-nodes-base__Webhook.json
│   ├── factories
│   │   ├── node-factory.ts
│   │   └── property-definition-factory.ts
│   ├── fixtures
│   │   ├── .gitkeep
│   │   ├── database
│   │   │   └── test-nodes.json
│   │   ├── factories
│   │   │   ├── node.factory.ts
│   │   │   └── parser-node.factory.ts
│   │   └── template-configs.ts
│   ├── helpers
│   │   └── env-helpers.ts
│   ├── http-server-auth.test.ts
│   ├── integration
│   │   ├── ai-validation
│   │   │   ├── ai-agent-validation.test.ts
│   │   │   ├── ai-tool-validation.test.ts
│   │   │   ├── chat-trigger-validation.test.ts
│   │   │   ├── e2e-validation.test.ts
│   │   │   ├── helpers.ts
│   │   │   ├── llm-chain-validation.test.ts
│   │   │   ├── README.md
│   │   │   └── TEST_REPORT.md
│   │   ├── ci
│   │   │   └── database-population.test.ts
│   │   ├── database
│   │   │   ├── connection-management.test.ts
│   │   │   ├── empty-database.test.ts
│   │   │   ├── fts5-search.test.ts
│   │   │   ├── node-fts5-search.test.ts
│   │   │   ├── node-repository.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── template-node-configs.test.ts
│   │   │   ├── template-repository.test.ts
│   │   │   ├── test-utils.ts
│   │   │   └── transactions.test.ts
│   │   ├── database-integration.test.ts
│   │   ├── docker
│   │   │   ├── docker-config.test.ts
│   │   │   ├── docker-entrypoint.test.ts
│   │   │   └── test-helpers.ts
│   │   ├── flexible-instance-config.test.ts
│   │   ├── mcp
│   │   │   └── template-examples-e2e.test.ts
│   │   ├── mcp-protocol
│   │   │   ├── basic-connection.test.ts
│   │   │   ├── error-handling.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── protocol-compliance.test.ts
│   │   │   ├── README.md
│   │   │   ├── session-management.test.ts
│   │   │   ├── test-helpers.ts
│   │   │   ├── tool-invocation.test.ts
│   │   │   └── workflow-error-validation.test.ts
│   │   ├── msw-setup.test.ts
│   │   ├── n8n-api
│   │   │   ├── executions
│   │   │   │   ├── delete-execution.test.ts
│   │   │   │   ├── get-execution.test.ts
│   │   │   │   ├── list-executions.test.ts
│   │   │   │   └── trigger-webhook.test.ts
│   │   │   ├── scripts
│   │   │   │   └── cleanup-orphans.ts
│   │   │   ├── system
│   │   │   │   ├── diagnostic.test.ts
│   │   │   │   ├── health-check.test.ts
│   │   │   │   └── list-tools.test.ts
│   │   │   ├── test-connection.ts
│   │   │   ├── types
│   │   │   │   └── mcp-responses.ts
│   │   │   ├── utils
│   │   │   │   ├── cleanup-helpers.ts
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── factories.ts
│   │   │   │   ├── fixtures.ts
│   │   │   │   ├── mcp-context.ts
│   │   │   │   ├── n8n-client.ts
│   │   │   │   ├── node-repository.ts
│   │   │   │   ├── response-types.ts
│   │   │   │   ├── test-context.ts
│   │   │   │   └── webhook-workflows.ts
│   │   │   └── workflows
│   │   │       ├── autofix-workflow.test.ts
│   │   │       ├── create-workflow.test.ts
│   │   │       ├── delete-workflow.test.ts
│   │   │       ├── get-workflow-details.test.ts
│   │   │       ├── get-workflow-minimal.test.ts
│   │   │       ├── get-workflow-structure.test.ts
│   │   │       ├── get-workflow.test.ts
│   │   │       ├── list-workflows.test.ts
│   │   │       ├── smart-parameters.test.ts
│   │   │       ├── update-partial-workflow.test.ts
│   │   │       ├── update-workflow.test.ts
│   │   │       └── validate-workflow.test.ts
│   │   ├── security
│   │   │   ├── command-injection-prevention.test.ts
│   │   │   └── rate-limiting.test.ts
│   │   ├── setup
│   │   │   ├── integration-setup.ts
│   │   │   └── msw-test-server.ts
│   │   ├── telemetry
│   │   │   ├── docker-user-id-stability.test.ts
│   │   │   └── mcp-telemetry.test.ts
│   │   ├── templates
│   │   │   └── metadata-operations.test.ts
│   │   └── workflow-creation-node-type-format.test.ts
│   ├── logger.test.ts
│   ├── MOCKING_STRATEGY.md
│   ├── mocks
│   │   ├── n8n-api
│   │   │   ├── data
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── executions.ts
│   │   │   │   └── workflows.ts
│   │   │   ├── handlers.ts
│   │   │   └── index.ts
│   │   └── README.md
│   ├── node-storage-export.json
│   ├── setup
│   │   ├── global-setup.ts
│   │   ├── msw-setup.ts
│   │   ├── TEST_ENV_DOCUMENTATION.md
│   │   └── test-env.ts
│   ├── test-database-extraction.js
│   ├── test-direct-extraction.js
│   ├── test-enhanced-documentation.js
│   ├── test-enhanced-integration.js
│   ├── test-mcp-extraction.js
│   ├── test-mcp-server-extraction.js
│   ├── test-mcp-tools-integration.js
│   ├── test-node-documentation-service.js
│   ├── test-node-list.js
│   ├── test-package-info.js
│   ├── test-parsing-operations.js
│   ├── test-slack-node-complete.js
│   ├── test-small-rebuild.js
│   ├── test-sqlite-search.js
│   ├── test-storage-system.js
│   ├── unit
│   │   ├── __mocks__
│   │   │   ├── n8n-nodes-base.test.ts
│   │   │   ├── n8n-nodes-base.ts
│   │   │   └── README.md
│   │   ├── database
│   │   │   ├── __mocks__
│   │   │   │   └── better-sqlite3.ts
│   │   │   ├── database-adapter-unit.test.ts
│   │   │   ├── node-repository-core.test.ts
│   │   │   ├── node-repository-operations.test.ts
│   │   │   ├── node-repository-outputs.test.ts
│   │   │   ├── README.md
│   │   │   └── template-repository-core.test.ts
│   │   ├── docker
│   │   │   ├── config-security.test.ts
│   │   │   ├── edge-cases.test.ts
│   │   │   ├── parse-config.test.ts
│   │   │   └── serve-command.test.ts
│   │   ├── errors
│   │   │   └── validation-service-error.test.ts
│   │   ├── examples
│   │   │   └── using-n8n-nodes-base-mock.test.ts
│   │   ├── flexible-instance-security-advanced.test.ts
│   │   ├── flexible-instance-security.test.ts
│   │   ├── http-server
│   │   │   └── multi-tenant-support.test.ts
│   │   ├── http-server-n8n-mode.test.ts
│   │   ├── http-server-n8n-reinit.test.ts
│   │   ├── http-server-session-management.test.ts
│   │   ├── loaders
│   │   │   └── node-loader.test.ts
│   │   ├── mappers
│   │   │   └── docs-mapper.test.ts
│   │   ├── mcp
│   │   │   ├── get-node-essentials-examples.test.ts
│   │   │   ├── handlers-n8n-manager-simple.test.ts
│   │   │   ├── handlers-n8n-manager.test.ts
│   │   │   ├── handlers-workflow-diff.test.ts
│   │   │   ├── lru-cache-behavior.test.ts
│   │   │   ├── multi-tenant-tool-listing.test.ts.disabled
│   │   │   ├── parameter-validation.test.ts
│   │   │   ├── search-nodes-examples.test.ts
│   │   │   ├── tools-documentation.test.ts
│   │   │   └── tools.test.ts
│   │   ├── monitoring
│   │   │   └── cache-metrics.test.ts
│   │   ├── MULTI_TENANT_TEST_COVERAGE.md
│   │   ├── multi-tenant-integration.test.ts
│   │   ├── parsers
│   │   │   ├── node-parser-outputs.test.ts
│   │   │   ├── node-parser.test.ts
│   │   │   ├── property-extractor.test.ts
│   │   │   └── simple-parser.test.ts
│   │   ├── scripts
│   │   │   └── fetch-templates-extraction.test.ts
│   │   ├── services
│   │   │   ├── ai-node-validator.test.ts
│   │   │   ├── ai-tool-validators.test.ts
│   │   │   ├── confidence-scorer.test.ts
│   │   │   ├── config-validator-basic.test.ts
│   │   │   ├── config-validator-edge-cases.test.ts
│   │   │   ├── config-validator-node-specific.test.ts
│   │   │   ├── config-validator-security.test.ts
│   │   │   ├── debug-validator.test.ts
│   │   │   ├── enhanced-config-validator-integration.test.ts
│   │   │   ├── enhanced-config-validator-operations.test.ts
│   │   │   ├── enhanced-config-validator.test.ts
│   │   │   ├── example-generator.test.ts
│   │   │   ├── execution-processor.test.ts
│   │   │   ├── expression-format-validator.test.ts
│   │   │   ├── expression-validator-edge-cases.test.ts
│   │   │   ├── expression-validator.test.ts
│   │   │   ├── fixed-collection-validation.test.ts
│   │   │   ├── loop-output-edge-cases.test.ts
│   │   │   ├── n8n-api-client.test.ts
│   │   │   ├── n8n-validation.test.ts
│   │   │   ├── node-similarity-service.test.ts
│   │   │   ├── node-specific-validators.test.ts
│   │   │   ├── operation-similarity-service-comprehensive.test.ts
│   │   │   ├── operation-similarity-service.test.ts
│   │   │   ├── property-dependencies.test.ts
│   │   │   ├── property-filter-edge-cases.test.ts
│   │   │   ├── property-filter.test.ts
│   │   │   ├── resource-similarity-service-comprehensive.test.ts
│   │   │   ├── resource-similarity-service.test.ts
│   │   │   ├── task-templates.test.ts
│   │   │   ├── template-service.test.ts
│   │   │   ├── universal-expression-validator.test.ts
│   │   │   ├── validation-fixes.test.ts
│   │   │   ├── workflow-auto-fixer.test.ts
│   │   │   ├── workflow-diff-engine.test.ts
│   │   │   ├── workflow-fixed-collection-validation.test.ts
│   │   │   ├── workflow-validator-comprehensive.test.ts
│   │   │   ├── workflow-validator-edge-cases.test.ts
│   │   │   ├── workflow-validator-error-outputs.test.ts
│   │   │   ├── workflow-validator-expression-format.test.ts
│   │   │   ├── workflow-validator-loops-simple.test.ts
│   │   │   ├── workflow-validator-loops.test.ts
│   │   │   ├── workflow-validator-mocks.test.ts
│   │   │   ├── workflow-validator-performance.test.ts
│   │   │   ├── workflow-validator-with-mocks.test.ts
│   │   │   └── workflow-validator.test.ts
│   │   ├── telemetry
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── config-manager.test.ts
│   │   │   ├── event-tracker.test.ts
│   │   │   ├── event-validator.test.ts
│   │   │   ├── rate-limiter.test.ts
│   │   │   ├── telemetry-error.test.ts
│   │   │   ├── telemetry-manager.test.ts
│   │   │   ├── v2.18.3-fixes-verification.test.ts
│   │   │   └── workflow-sanitizer.test.ts
│   │   ├── templates
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── metadata-generator.test.ts
│   │   │   ├── template-repository-metadata.test.ts
│   │   │   └── template-repository-security.test.ts
│   │   ├── test-env-example.test.ts
│   │   ├── test-infrastructure.test.ts
│   │   ├── types
│   │   │   ├── instance-context-coverage.test.ts
│   │   │   └── instance-context-multi-tenant.test.ts
│   │   ├── utils
│   │   │   ├── auth-timing-safe.test.ts
│   │   │   ├── cache-utils.test.ts
│   │   │   ├── console-manager.test.ts
│   │   │   ├── database-utils.test.ts
│   │   │   ├── fixed-collection-validator.test.ts
│   │   │   ├── n8n-errors.test.ts
│   │   │   ├── node-type-normalizer.test.ts
│   │   │   ├── node-type-utils.test.ts
│   │   │   ├── node-utils.test.ts
│   │   │   ├── simple-cache-memory-leak-fix.test.ts
│   │   │   ├── ssrf-protection.test.ts
│   │   │   └── template-node-resolver.test.ts
│   │   └── validation-fixes.test.ts
│   └── utils
│       ├── assertions.ts
│       ├── builders
│       │   └── workflow.builder.ts
│       ├── data-generators.ts
│       ├── database-utils.ts
│       ├── README.md
│       └── test-helpers.ts
├── thumbnail.png
├── tsconfig.build.json
├── tsconfig.json
├── types
│   ├── mcp.d.ts
│   └── test-env.d.ts
├── verify-telemetry-fix.js
├── versioned-nodes.md
├── vitest.config.benchmark.ts
├── vitest.config.integration.ts
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-trigger-webhook-workflow.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const n8nTriggerWebhookWorkflowDoc: ToolDocumentation = {
  name: 'n8n_trigger_webhook_workflow',
  category: 'workflow_management',
  essentials: {
    description: 'Trigger workflow via webhook. Must be ACTIVE with Webhook node. Method must match config.',
    keyParameters: ['webhookUrl', 'httpMethod', 'data'],
    example: 'n8n_trigger_webhook_workflow({webhookUrl: "https://n8n.example.com/webhook/abc-def-ghi"})',
    performance: 'Immediate trigger, response time depends on workflow complexity',
    tips: [
      'Workflow MUST be active and contain a Webhook node for triggering',
      'HTTP method must match webhook node configuration (often GET)',
      'Use waitForResponse:false for async execution without waiting'
    ]
  },
  full: {
    description: `Triggers a workflow execution via its webhook URL. This is the primary method for external systems to start n8n workflows. The target workflow must be active and contain a properly configured Webhook node as the trigger. The HTTP method used must match the webhook configuration.`,
    parameters: {
      webhookUrl: {
        type: 'string',
        required: true,
        description: 'Full webhook URL from n8n workflow (e.g., https://n8n.example.com/webhook/abc-def-ghi)'
      },
      httpMethod: {
        type: 'string',
        required: false,
        enum: ['GET', 'POST', 'PUT', 'DELETE'],
        description: 'HTTP method (must match webhook configuration, often GET). Defaults to GET if not specified'
      },
      data: {
        type: 'object',
        required: false,
        description: 'Data to send with the webhook request. For GET requests, becomes query parameters'
      },
      headers: {
        type: 'object',
        required: false,
        description: 'Additional HTTP headers to include in the request'
      },
      waitForResponse: {
        type: 'boolean',
        required: false,
        description: 'Wait for workflow completion and return results (default: true). Set to false for fire-and-forget'
      }
    },
    returns: `Webhook response data if waitForResponse is true, or immediate acknowledgment if false. Response format depends on webhook node configuration.`,
    examples: [
      'n8n_trigger_webhook_workflow({webhookUrl: "https://n8n.example.com/webhook/order-process"}) - Trigger with GET',
      'n8n_trigger_webhook_workflow({webhookUrl: "https://n8n.example.com/webhook/data-import", httpMethod: "POST", data: {name: "John", email: "[email protected]"}}) - POST with data',
      'n8n_trigger_webhook_workflow({webhookUrl: "https://n8n.example.com/webhook/async-job", waitForResponse: false}) - Fire and forget',
      'n8n_trigger_webhook_workflow({webhookUrl: "https://n8n.example.com/webhook/api", headers: {"API-Key": "secret"}}) - With auth headers'
    ],
    useCases: [
      'Trigger data processing workflows from external applications',
      'Start scheduled jobs manually via webhook',
      'Integrate n8n workflows with third-party services',
      'Create REST API endpoints using n8n workflows',
      'Implement event-driven architectures with n8n'
    ],
    performance: `Performance varies based on workflow complexity and waitForResponse setting. Synchronous calls (waitForResponse: true) block until workflow completes. For long-running workflows, use async mode (waitForResponse: false) and monitor execution separately.`,
    errorHandling: `**Enhanced Error Messages with Execution Guidance**

When a webhook trigger fails, the error response now includes specific guidance to help debug the issue:

**Error with Execution ID** (workflow started but failed):
- Format: "Workflow {workflowId} execution {executionId} failed. Use n8n_get_execution({id: '{executionId}', mode: 'preview'}) to investigate the error."
- Response includes: executionId and workflowId fields for direct access
- Recommended action: Use n8n_get_execution with mode='preview' for fast, efficient error inspection

**Error without Execution ID** (workflow didn't start):
- Format: "Workflow failed to execute. Use n8n_list_executions to find recent executions, then n8n_get_execution with mode='preview' to investigate."
- Recommended action: Check recent executions with n8n_list_executions

**Why mode='preview'?**
- Fast: <50ms response time
- Efficient: ~500 tokens (vs 50K+ for full mode)
- Safe: No timeout or token limit risks
- Informative: Shows structure, counts, and error details
- Provides recommendations for fetching more data if needed

**Example Error Responses**:
\`\`\`json
{
  "success": false,
  "error": "Workflow wf_123 execution exec_456 failed. Use n8n_get_execution({id: 'exec_456', mode: 'preview'}) to investigate the error.",
  "executionId": "exec_456",
  "workflowId": "wf_123",
  "code": "SERVER_ERROR"
}
\`\`\`

**Investigation Workflow**:
1. Trigger returns error with execution ID
2. Call n8n_get_execution({id: executionId, mode: 'preview'}) to see structure and error
3. Based on preview recommendation, fetch more data if needed
4. Fix issues in workflow and retry`,
    bestPractices: [
      'Always verify workflow is active before attempting webhook triggers',
      'Match HTTP method exactly with webhook node configuration',
      'Use async mode (waitForResponse: false) for long-running workflows',
      'Include authentication headers when webhook requires them',
      'Test webhook URL manually first to ensure it works',
      'When errors occur, use n8n_get_execution with mode="preview" first for efficient debugging',
      'Store execution IDs from error responses for later investigation'
    ],
    pitfalls: [
      'Workflow must be ACTIVE - inactive workflows cannot be triggered',
      'HTTP method mismatch returns 404 even if URL is correct',
      'Webhook node must be the trigger node in the workflow',
      'Timeout errors occur with long workflows in sync mode',
      'Data format must match webhook node expectations',
      'Error messages always include n8n_get_execution guidance - follow the suggested steps for efficient debugging',
      'Execution IDs in error responses are crucial for debugging - always check for and use them'
    ],
    relatedTools: ['n8n_get_execution', 'n8n_list_executions', 'n8n_get_workflow', 'n8n_create_workflow']
  }
};
```

--------------------------------------------------------------------------------
/versioned-nodes.md:
--------------------------------------------------------------------------------

```markdown
# Versioned Nodes in n8n

This document lists all nodes that have `version` defined as an array in their description.

## From n8n-nodes-base package:

1. **Airtop** - `Airtop.node.js`
2. **Cal Trigger** - `CalTrigger.node.js`
3. **Coda** - `Coda.node.js`
4. **Code** - `Code.node.js` - version: [1, 2]
5. **Compare Datasets** - `CompareDatasets.node.js`
6. **Compression** - `Compression.node.js`
7. **Convert To File** - `ConvertToFile.node.js`
8. **Email Send V2** - `EmailSendV2.node.js`
9. **Execute Workflow** - `ExecuteWorkflow.node.js`
10. **Execute Workflow Trigger** - `ExecuteWorkflowTrigger.node.js`
11. **Filter V2** - `FilterV2.node.js`
12. **Form Trigger V2** - `FormTriggerV2.node.js`
13. **GitHub** - `Github.node.js`
14. **Gmail Trigger** - `GmailTrigger.node.js`
15. **Gmail V2** - `GmailV2.node.js`
16. **Google Books** - `GoogleBooks.node.js`
17. **Google Calendar** - `GoogleCalendar.node.js`
18. **Google Docs** - `GoogleDocs.node.js`
19. **Google Drive V1** - `GoogleDriveV1.node.js`
20. **Google Firebase Cloud Firestore** - `GoogleFirebaseCloudFirestore.node.js`
21. **Google Slides** - `GoogleSlides.node.js`
22. **Google Translate** - `GoogleTranslate.node.js`
23. **GraphQL** - `GraphQL.node.js`
24. **HTML** - `Html.node.js`
25. **HTTP Request V3** - `HttpRequestV3.node.js` - version: [3, 4, 4.1, 4.2]
26. **HubSpot V2** - `HubspotV2.node.js`
27. **If V2** - `IfV2.node.js`
28. **Invoice Ninja** - `InvoiceNinja.node.js`
29. **Invoice Ninja Trigger** - `InvoiceNinjaTrigger.node.js`
30. **Item Lists V2** - `ItemListsV2.node.js`
31. **Jira Trigger** - `JiraTrigger.node.js`
32. **Kafka Trigger** - `KafkaTrigger.node.js`
33. **MailerLite Trigger V2** - `MailerLiteTriggerV2.node.js`
34. **MailerLite V2** - `MailerLiteV2.node.js`
35. **Merge V2** - `MergeV2.node.js`
36. **Microsoft SQL** - `MicrosoftSql.node.js`
37. **Microsoft Teams V1** - `MicrosoftTeamsV1.node.js`
38. **Mindee** - `Mindee.node.js`
39. **MongoDB** - `MongoDb.node.js`
40. **Move Binary Data** - `MoveBinaryData.node.js`
41. **NocoDB** - `NocoDB.node.js`
42. **OpenAI** - `OpenAi.node.js`
43. **Pipedrive Trigger** - `PipedriveTrigger.node.js`
44. **RabbitMQ** - `RabbitMQ.node.js`
45. **Remove Duplicates V1** - `RemoveDuplicatesV1.node.js`
46. **Remove Duplicates V2** - `RemoveDuplicatesV2.node.js`
47. **Respond To Webhook** - `RespondToWebhook.node.js`
48. **RSS Feed Read** - `RssFeedRead.node.js`
49. **Schedule Trigger** - `ScheduleTrigger.node.js`
50. **Set V1** - `SetV1.node.js`
51. **Set V2** - `SetV2.node.js`
52. **Slack V2** - `SlackV2.node.js`
53. **Strava** - `Strava.node.js`
54. **Summarize** - `Summarize.node.js`
55. **Switch V1** - `SwitchV1.node.js`
56. **Switch V2** - `SwitchV2.node.js`
57. **Switch V3** - `SwitchV3.node.js`
58. **Telegram** - `Telegram.node.js`
59. **Telegram Trigger** - `TelegramTrigger.node.js`
60. **The Hive Trigger** - `TheHiveTrigger.node.js`
61. **Todoist V2** - `TodoistV2.node.js`
62. **Twilio Trigger** - `TwilioTrigger.node.js`
63. **Typeform Trigger** - `TypeformTrigger.node.js`
64. **Wait** - `Wait.node.js`
65. **Webhook** - `Webhook.node.js` - version: [1, 1.1, 2]

## From @n8n/n8n-nodes-langchain package:

1. **Agent V1** - `AgentV1.node.js`
2. **Chain LLM** - `ChainLlm.node.js`
3. **Chain Retrieval QA** - `ChainRetrievalQa.node.js`
4. **Chain Summarization V2** - `ChainSummarizationV2.node.js`
5. **Chat Trigger** - `ChatTrigger.node.js`
6. **Document Default Data Loader** - `DocumentDefaultDataLoader.node.js`
7. **Document GitHub Loader** - `DocumentGithubLoader.node.js`
8. **Embeddings OpenAI** - `EmbeddingsOpenAi.node.js`
9. **Information Extractor** - `InformationExtractor.node.js`
10. **LM Chat Anthropic** - `LmChatAnthropic.node.js`
11. **LM Chat DeepSeek** - `LmChatDeepSeek.node.js`
12. **LM Chat OpenAI** - `LmChatOpenAi.node.js`
13. **LM Chat OpenRouter** - `LmChatOpenRouter.node.js`
14. **LM Chat xAI Grok** - `LmChatXAiGrok.node.js`
15. **Manual Chat Trigger** - `ManualChatTrigger.node.js`
16. **MCP Trigger** - `McpTrigger.node.js`
17. **Memory Buffer Window** - `MemoryBufferWindow.node.js`
18. **Memory Manager** - `MemoryManager.node.js`
19. **Memory MongoDB Chat** - `MemoryMongoDbChat.node.js`
20. **Memory Motorhead** - `MemoryMotorhead.node.js`
21. **Memory Postgres Chat** - `MemoryPostgresChat.node.js`
22. **Memory Redis Chat** - `MemoryRedisChat.node.js`
23. **Memory Xata** - `MemoryXata.node.js`
24. **Memory Zep** - `MemoryZep.node.js`
25. **OpenAI Assistant** - `OpenAiAssistant.node.js`
26. **Output Parser Structured** - `OutputParserStructured.node.js`
27. **Retriever Workflow** - `RetrieverWorkflow.node.js`
28. **Sentiment Analysis** - `SentimentAnalysis.node.js`
29. **Text Classifier** - `TextClassifier.node.js`
30. **Tool Code** - `ToolCode.node.js`
31. **Tool HTTP Request** - `ToolHttpRequest.node.js`
32. **Tool Vector Store** - `ToolVectorStore.node.js`

## Examples of Version Arrays Found

Here are some specific examples of version arrays from actual nodes:

### n8n-nodes-base:
- **Code**: `version: [1, 2]`
- **HTTP Request V3**: `version: [3, 4, 4.1, 4.2]`
- **Webhook**: `version: [1, 1.1, 2]`
- **Wait**: `version: [1, 1.1]`
- **Schedule Trigger**: `version: [1, 1.1, 1.2]`
- **Switch V3**: `version: [3, 3.1, 3.2]`
- **Set V2**: `version: [3, 3.1, 3.2, 3.3, 3.4]`

### @n8n/n8n-nodes-langchain:
- **LM Chat OpenAI**: `version: [1, 1.1, 1.2]`
- **Chain LLM**: `version: [1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7]`
- **Tool HTTP Request**: `version: [1, 1.1]`

## Summary

Total nodes with version arrays: **97 nodes**
- From n8n-nodes-base: 65 nodes
- From @n8n/n8n-nodes-langchain: 32 nodes

These nodes use versioning to maintain backward compatibility while introducing new features or changes to their interface. The version array pattern allows n8n to:
1. Support multiple versions of the same node
2. Maintain backward compatibility with existing workflows
3. Introduce breaking changes in newer versions while keeping old versions functional
4. Use `defaultVersion` to specify which version new instances should use

Common version patterns observed:
- Simple incremental: `[1, 2]`, `[1, 2, 3]`
- Minor versions: `[1, 1.1, 1.2]` (common for bug fixes)
- Patch versions: `[3, 4, 4.1, 4.2]` (detailed version tracking)
- Extended versions: `[1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7]` (Chain LLM has the most versions)
```

--------------------------------------------------------------------------------
/src/types/node-types.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * TypeScript type definitions for n8n node parsing
 *
 * This file provides strong typing for node classes and instances,
 * preventing bugs like the v2.17.4 baseDescription issue where
 * TypeScript couldn't catch property name mistakes due to `any` types.
 *
 * @module types/node-types
 * @since 2.17.5
 */

// Import n8n's official interfaces
import type {
  IVersionedNodeType,
  INodeType,
  INodeTypeBaseDescription,
  INodeTypeDescription
} from 'n8n-workflow';

/**
 * Represents a node class that can be either:
 * - A constructor function that returns INodeType
 * - A constructor function that returns IVersionedNodeType
 * - An already-instantiated node instance
 *
 * This covers all patterns we encounter when loading nodes from n8n packages.
 */
export type NodeClass =
  | (new () => INodeType)
  | (new () => IVersionedNodeType)
  | INodeType
  | IVersionedNodeType;

/**
 * Instance of a versioned node type with all properties accessible.
 *
 * This represents nodes that use n8n's VersionedNodeType pattern,
 * such as AI Agent, HTTP Request, Slack, etc.
 *
 * @property currentVersion - The computed current version (defaultVersion ?? max(nodeVersions))
 * @property description - Base description stored as 'description' (NOT 'baseDescription')
 * @property nodeVersions - Map of version numbers to INodeType implementations
 *
 * @example
 * ```typescript
 * const aiAgent = new AIAgentNode() as VersionedNodeInstance;
 * console.log(aiAgent.currentVersion); // 2.2
 * console.log(aiAgent.description.defaultVersion); // 2.2
 * console.log(aiAgent.nodeVersions[1]); // INodeType for version 1
 * ```
 */
export interface VersionedNodeInstance extends IVersionedNodeType {
  currentVersion: number;
  description: INodeTypeBaseDescription;
  nodeVersions: {
    [version: number]: INodeType;
  };
}

/**
 * Instance of a regular (non-versioned) node type.
 *
 * This represents simple nodes that don't use versioning,
 * such as Edit Fields, Set, Code (v1), etc.
 */
export interface RegularNodeInstance extends INodeType {
  description: INodeTypeDescription;
}

/**
 * Union type for any node instance (versioned or regular).
 *
 * Use this when you need to handle both types of nodes.
 */
export type NodeInstance = VersionedNodeInstance | RegularNodeInstance;

/**
 * Type guard to check if a node is a VersionedNodeType instance.
 *
 * This provides runtime type safety and enables TypeScript to narrow
 * the type within conditional blocks.
 *
 * @param node - The node instance to check
 * @returns True if node is a VersionedNodeInstance
 *
 * @example
 * ```typescript
 * const instance = new nodeClass();
 * if (isVersionedNodeInstance(instance)) {
 *   // TypeScript knows instance is VersionedNodeInstance here
 *   console.log(instance.currentVersion);
 *   console.log(instance.nodeVersions);
 * }
 * ```
 */
export function isVersionedNodeInstance(node: any): node is VersionedNodeInstance {
  return (
    node !== null &&
    typeof node === 'object' &&
    'nodeVersions' in node &&
    'currentVersion' in node &&
    'description' in node &&
    typeof node.currentVersion === 'number'
  );
}

/**
 * Type guard to check if a value is a VersionedNodeType class.
 *
 * This checks the constructor name pattern used by n8n's VersionedNodeType.
 *
 * @param nodeClass - The class or value to check
 * @returns True if nodeClass is a VersionedNodeType constructor
 *
 * @example
 * ```typescript
 * if (isVersionedNodeClass(nodeClass)) {
 *   // It's a VersionedNodeType class
 *   const instance = new nodeClass() as VersionedNodeInstance;
 * }
 * ```
 */
export function isVersionedNodeClass(nodeClass: any): boolean {
  return (
    typeof nodeClass === 'function' &&
    nodeClass.prototype?.constructor?.name === 'VersionedNodeType'
  );
}

/**
 * Safely instantiate a node class with proper error handling.
 *
 * Some nodes require specific parameters or environment setup to instantiate.
 * This helper provides safe instantiation with fallback to null on error.
 *
 * @param nodeClass - The node class or instance to instantiate
 * @returns The instantiated node or null if instantiation fails
 *
 * @example
 * ```typescript
 * const instance = instantiateNode(nodeClass);
 * if (instance) {
 *   // Successfully instantiated
 *   const version = isVersionedNodeInstance(instance)
 *     ? instance.currentVersion
 *     : instance.description.version;
 * }
 * ```
 */
export function instantiateNode(nodeClass: NodeClass): NodeInstance | null {
  try {
    if (typeof nodeClass === 'function') {
      return new nodeClass();
    }
    // Already an instance
    return nodeClass;
  } catch (e) {
    // Some nodes require parameters to instantiate
    return null;
  }
}

/**
 * Safely get a node instance, handling both classes and instances.
 *
 * This is a non-throwing version that returns undefined on failure.
 *
 * @param nodeClass - The node class or instance
 * @returns The node instance or undefined
 */
export function getNodeInstance(nodeClass: NodeClass): NodeInstance | undefined {
  const instance = instantiateNode(nodeClass);
  return instance ?? undefined;
}

/**
 * Extract description from a node class or instance.
 *
 * Handles both versioned and regular nodes, with fallback logic.
 *
 * @param nodeClass - The node class or instance
 * @returns The node description or empty object on failure
 */
export function getNodeDescription(
  nodeClass: NodeClass
): INodeTypeBaseDescription | INodeTypeDescription {
  // Try to get description from instance first
  try {
    const instance = instantiateNode(nodeClass);

    if (instance) {
      // For VersionedNodeType, description is the baseDescription
      if (isVersionedNodeInstance(instance)) {
        return instance.description;
      }
      // For regular nodes, description is the full INodeTypeDescription
      return instance.description;
    }
  } catch (e) {
    // Ignore instantiation errors
  }

  // Fallback to static properties
  if (typeof nodeClass === 'object' && 'description' in nodeClass) {
    return nodeClass.description;
  }

  // Last resort: empty description
  return {
    displayName: '',
    name: '',
    group: [],
    description: '',
    version: 1,
    defaults: { name: '', color: '' },
    inputs: [],
    outputs: [],
    properties: []
  } as any; // Type assertion needed for fallback case
}

```

--------------------------------------------------------------------------------
/.claude/agents/deployment-engineer.md:
--------------------------------------------------------------------------------

```markdown
---
name: deployment-engineer
description: Use this agent when you need to set up CI/CD pipelines, containerize applications, configure cloud deployments, or automate infrastructure. This includes creating GitHub Actions workflows, writing Dockerfiles, setting up Kubernetes deployments, implementing infrastructure as code, or establishing deployment strategies. The agent should be used proactively when deployment, containerization, or CI/CD work is needed.\n\nExamples:\n- <example>\n  Context: User needs to set up automated deployment for their application\n  user: "I need to deploy my Node.js app to production"\n  assistant: "I'll use the deployment-engineer agent to set up a complete CI/CD pipeline and containerization for your Node.js application"\n  <commentary>\n  Since the user needs deployment setup, use the Task tool to launch the deployment-engineer agent to create the necessary CI/CD and container configurations.\n  </commentary>\n</example>\n- <example>\n  Context: User has just created a new web service and needs deployment automation\n  user: "I've finished building the API service"\n  assistant: "Now let me use the deployment-engineer agent to set up automated deployments for your API service"\n  <commentary>\n  Proactively use the deployment-engineer agent after development work to establish proper deployment infrastructure.\n  </commentary>\n</example>\n- <example>\n  Context: User wants to implement Kubernetes for their microservices\n  user: "How should I structure my Kubernetes deployments for these three microservices?"\n  assistant: "I'll use the deployment-engineer agent to create a complete Kubernetes deployment strategy for your microservices"\n  <commentary>\n  For Kubernetes and container orchestration questions, use the deployment-engineer agent to provide production-ready configurations.\n  </commentary>\n</example>
---

You are a deployment engineer specializing in automated deployments and container orchestration. Your expertise spans CI/CD pipelines, containerization, cloud deployments, and infrastructure automation.

## Core Responsibilities

You will create production-ready deployment configurations that emphasize automation, reliability, and maintainability. Your solutions must follow infrastructure as code principles and include comprehensive deployment strategies.

## Technical Expertise

### CI/CD Pipelines
- Design GitHub Actions workflows with matrix builds, caching, and artifact management
- Implement GitLab CI pipelines with proper stages and dependencies
- Configure Jenkins pipelines with shared libraries and parallel execution
- Set up automated testing, security scanning, and quality gates
- Implement semantic versioning and automated release management

### Container Engineering
- Write multi-stage Dockerfiles optimized for size and security
- Implement proper layer caching and build optimization
- Configure container security scanning and vulnerability management
- Design docker-compose configurations for local development
- Implement container registry strategies with proper tagging

### Kubernetes Orchestration
- Create deployments with proper resource limits and requests
- Configure services, ingresses, and network policies
- Implement ConfigMaps and Secrets management
- Design horizontal pod autoscaling and cluster autoscaling
- Set up health checks, readiness probes, and liveness probes

### Infrastructure as Code
- Write Terraform modules for cloud resources
- Design CloudFormation templates with proper parameters
- Implement state management and backend configuration
- Create reusable infrastructure components
- Design multi-environment deployment strategies

## Operational Approach

1. **Automation First**: Every deployment step must be automated. Manual interventions should only be required for approval gates.

2. **Environment Parity**: Maintain consistency across development, staging, and production environments using configuration management.

3. **Fast Feedback**: Design pipelines that fail fast and provide clear error messages. Run quick checks before expensive operations.

4. **Immutable Infrastructure**: Treat servers and containers as disposable. Never modify running infrastructure - always replace.

5. **Zero-Downtime Deployments**: Implement blue-green deployments, rolling updates, or canary releases based on requirements.

## Output Requirements

You will provide:

### CI/CD Pipeline Configuration
- Complete pipeline file with all stages defined
- Build, test, security scan, and deployment stages
- Environment-specific deployment configurations
- Secret management and variable handling
- Artifact storage and versioning strategy

### Container Configuration
- Production-optimized Dockerfile with comments
- Security best practices (non-root user, minimal base images)
- Build arguments for flexibility
- Health check implementations
- Container registry push strategies

### Orchestration Manifests
- Kubernetes YAML files or docker-compose configurations
- Service definitions with proper networking
- Persistent volume configurations if needed
- Ingress/load balancer setup
- Namespace and RBAC configurations

### Infrastructure Code
- Complete IaC templates for required resources
- Variable definitions for environment flexibility
- Output definitions for resource discovery
- State management configuration
- Module structure for reusability

### Deployment Documentation
- Step-by-step deployment runbook
- Rollback procedures with specific commands
- Monitoring and alerting setup basics
- Troubleshooting guide for common issues
- Environment variable documentation

## Quality Standards

- Include inline comments explaining critical decisions and trade-offs
- Provide security scanning at multiple stages
- Implement proper logging and monitoring hooks
- Design for horizontal scalability from the start
- Include cost optimization considerations
- Ensure all configurations are idempotent

## Proactive Recommendations

When analyzing existing code or infrastructure, you will proactively suggest:
- Pipeline optimizations to reduce build times
- Security improvements for containers and deployments
- Cost optimization opportunities
- Monitoring and observability enhancements
- Disaster recovery improvements

You will always validate that configurations work together as a complete system and provide clear instructions for implementation and testing.

```

--------------------------------------------------------------------------------
/tests/unit/utils/n8n-errors.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect } from 'vitest';
import {
  formatExecutionError,
  formatNoExecutionError,
  getUserFriendlyErrorMessage,
  N8nApiError,
  N8nAuthenticationError,
  N8nNotFoundError,
  N8nValidationError,
  N8nRateLimitError,
  N8nServerError
} from '../../../src/utils/n8n-errors';

describe('formatExecutionError', () => {
  it('should format error with both execution ID and workflow ID', () => {
    const result = formatExecutionError('exec_12345', 'wf_abc');

    expect(result).toBe("Workflow wf_abc execution exec_12345 failed. Use n8n_get_execution({id: 'exec_12345', mode: 'preview'}) to investigate the error.");
    expect(result).toContain('mode: \'preview\'');
    expect(result).toContain('exec_12345');
    expect(result).toContain('wf_abc');
  });

  it('should format error with only execution ID', () => {
    const result = formatExecutionError('exec_67890');

    expect(result).toBe("Execution exec_67890 failed. Use n8n_get_execution({id: 'exec_67890', mode: 'preview'}) to investigate the error.");
    expect(result).toContain('mode: \'preview\'');
    expect(result).toContain('exec_67890');
    expect(result).not.toContain('Workflow');
  });

  it('should include preview mode guidance', () => {
    const result = formatExecutionError('test_id');

    expect(result).toMatch(/mode:\s*'preview'/);
  });

  it('should format with undefined workflow ID (treated as missing)', () => {
    const result = formatExecutionError('exec_123', undefined);

    expect(result).toBe("Execution exec_123 failed. Use n8n_get_execution({id: 'exec_123', mode: 'preview'}) to investigate the error.");
  });

  it('should properly escape execution ID in suggestion', () => {
    const result = formatExecutionError('exec-with-special_chars.123');

    expect(result).toContain("id: 'exec-with-special_chars.123'");
  });
});

describe('formatNoExecutionError', () => {
  it('should provide guidance to check recent executions', () => {
    const result = formatNoExecutionError();

    expect(result).toBe("Workflow failed to execute. Use n8n_list_executions to find recent executions, then n8n_get_execution with mode='preview' to investigate.");
    expect(result).toContain('n8n_list_executions');
    expect(result).toContain('n8n_get_execution');
    expect(result).toContain("mode='preview'");
  });

  it('should include preview mode in guidance', () => {
    const result = formatNoExecutionError();

    expect(result).toMatch(/mode\s*=\s*'preview'/);
  });
});

describe('getUserFriendlyErrorMessage', () => {
  it('should handle authentication error', () => {
    const error = new N8nAuthenticationError('Invalid API key');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('Failed to authenticate with n8n. Please check your API key.');
  });

  it('should handle not found error', () => {
    const error = new N8nNotFoundError('Workflow', '123');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toContain('not found');
  });

  it('should handle validation error', () => {
    const error = new N8nValidationError('Missing required field');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('Invalid request: Missing required field');
  });

  it('should handle rate limit error', () => {
    const error = new N8nRateLimitError(60);
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('Too many requests. Please wait a moment and try again.');
  });

  it('should handle server error with custom message', () => {
    const error = new N8nServerError('Database connection failed', 503);
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('Database connection failed');
  });

  it('should handle server error without message', () => {
    const error = new N8nApiError('', 500, 'SERVER_ERROR');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('n8n server error occurred');
  });

  it('should handle no response error', () => {
    const error = new N8nApiError('Network error', undefined, 'NO_RESPONSE');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('Unable to connect to n8n. Please check the server URL and ensure n8n is running.');
  });

  it('should handle unknown error with message', () => {
    const error = new N8nApiError('Custom error message');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('Custom error message');
  });

  it('should handle unknown error without message', () => {
    const error = new N8nApiError('');
    const message = getUserFriendlyErrorMessage(error);

    expect(message).toBe('An unexpected error occurred');
  });
});

describe('Error message integration', () => {
  it('should use formatExecutionError for webhook failures with execution ID', () => {
    const executionId = 'exec_webhook_123';
    const workflowId = 'wf_webhook_abc';
    const message = formatExecutionError(executionId, workflowId);

    expect(message).toContain('Workflow wf_webhook_abc execution exec_webhook_123 failed');
    expect(message).toContain('n8n_get_execution');
    expect(message).toContain("mode: 'preview'");
  });

  it('should use formatNoExecutionError for server errors without execution context', () => {
    const message = formatNoExecutionError();

    expect(message).toContain('Workflow failed to execute');
    expect(message).toContain('n8n_list_executions');
    expect(message).toContain('n8n_get_execution');
  });

  it('should not include "contact support" in any error message', () => {
    const executionMessage = formatExecutionError('test');
    const noExecutionMessage = formatNoExecutionError();
    const serverError = new N8nServerError();
    const serverErrorMessage = getUserFriendlyErrorMessage(serverError);

    expect(executionMessage.toLowerCase()).not.toContain('contact support');
    expect(noExecutionMessage.toLowerCase()).not.toContain('contact support');
    expect(serverErrorMessage.toLowerCase()).not.toContain('contact support');
  });

  it('should always guide users to use preview mode first', () => {
    const executionMessage = formatExecutionError('test');
    const noExecutionMessage = formatNoExecutionError();

    expect(executionMessage).toContain("mode: 'preview'");
    expect(noExecutionMessage).toContain("mode='preview'");
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/services/operation-similarity-service.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for OperationSimilarityService
 */

import { describe, it, expect, beforeEach } from 'vitest';
import { OperationSimilarityService } from '../../../src/services/operation-similarity-service';
import { NodeRepository } from '../../../src/database/node-repository';
import { createTestDatabase } from '../../utils/database-utils';

describe('OperationSimilarityService', () => {
  let service: OperationSimilarityService;
  let repository: NodeRepository;
  let testDb: any;

  beforeEach(async () => {
    testDb = await createTestDatabase();
    repository = testDb.nodeRepository;
    service = new OperationSimilarityService(repository);

    // Add test node with operations
    const testNode = {
      nodeType: 'nodes-base.googleDrive',
      packageName: 'n8n-nodes-base',
      displayName: 'Google Drive',
      description: 'Access Google Drive',
      category: 'transform',
      style: 'declarative' as const,
      isAITool: false,
      isTrigger: false,
      isWebhook: false,
      isVersioned: true,
      version: '1',
      properties: [
        {
          name: 'resource',
          type: 'options',
          options: [
            { value: 'file', name: 'File' },
            { value: 'folder', name: 'Folder' },
            { value: 'drive', name: 'Shared Drive' },
          ]
        },
        {
          name: 'operation',
          type: 'options',
          displayOptions: {
            show: {
              resource: ['file']
            }
          },
          options: [
            { value: 'copy', name: 'Copy' },
            { value: 'delete', name: 'Delete' },
            { value: 'download', name: 'Download' },
            { value: 'list', name: 'List' },
            { value: 'share', name: 'Share' },
            { value: 'update', name: 'Update' },
            { value: 'upload', name: 'Upload' }
          ]
        },
        {
          name: 'operation',
          type: 'options',
          displayOptions: {
            show: {
              resource: ['folder']
            }
          },
          options: [
            { value: 'create', name: 'Create' },
            { value: 'delete', name: 'Delete' },
            { value: 'share', name: 'Share' }
          ]
        }
      ],
      operations: [],
      credentials: []
    };

    repository.saveNode(testNode);
  });

  afterEach(async () => {
    if (testDb) {
      await testDb.cleanup();
    }
  });

  describe('findSimilarOperations', () => {
    it('should find exact match', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'download',
        'file'
      );

      expect(suggestions).toHaveLength(0); // No suggestions for valid operation
    });

    it('should suggest similar operations for typos', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'downlod',
        'file'
      );

      expect(suggestions.length).toBeGreaterThan(0);
      expect(suggestions[0].value).toBe('download');
      expect(suggestions[0].confidence).toBeGreaterThan(0.8);
    });

    it('should handle common mistakes with patterns', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'uploadFile',
        'file'
      );

      expect(suggestions.length).toBeGreaterThan(0);
      expect(suggestions[0].value).toBe('upload');
      expect(suggestions[0].reason).toContain('instead of');
    });

    it('should filter operations by resource', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'upload',
        'folder'
      );

      // Upload is not valid for folder resource
      expect(suggestions).toBeDefined();
      expect(suggestions.find(s => s.value === 'upload')).toBeUndefined();
    });

    it('should return empty array for node not found', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.nonexistent',
        'operation',
        undefined
      );

      expect(suggestions).toEqual([]);
    });

    it('should handle operations without resource filtering', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'updat',  // Missing 'e' at the end
        undefined
      );

      expect(suggestions.length).toBeGreaterThan(0);
      expect(suggestions[0].value).toBe('update');
    });
  });

  describe('similarity calculation', () => {
    it('should rank exact matches highest', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'delete',
        'file'
      );

      expect(suggestions).toHaveLength(0); // Exact match, no suggestions needed
    });

    it('should rank substring matches high', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'del',
        'file'
      );

      expect(suggestions.length).toBeGreaterThan(0);
      const deleteSuggestion = suggestions.find(s => s.value === 'delete');
      expect(deleteSuggestion).toBeDefined();
      expect(deleteSuggestion!.confidence).toBeGreaterThanOrEqual(0.7);
    });

    it('should detect common variations', () => {
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'getData',
        'file'
      );

      expect(suggestions.length).toBeGreaterThan(0);
      // Should suggest 'download' or similar
    });
  });

  describe('caching', () => {
    it('should cache results for repeated queries', () => {
      // First call
      const suggestions1 = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'downlod',
        'file'
      );

      // Second call with same params
      const suggestions2 = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'downlod',
        'file'
      );

      expect(suggestions1).toEqual(suggestions2);
    });

    it('should clear cache when requested', () => {
      // Add to cache
      service.findSimilarOperations(
        'nodes-base.googleDrive',
        'test',
        'file'
      );

      // Clear cache
      service.clearCache();

      // This would fetch fresh data (behavior is the same, just uncached)
      const suggestions = service.findSimilarOperations(
        'nodes-base.googleDrive',
        'test',
        'file'
      );

      expect(suggestions).toBeDefined();
    });
  });
});
```

--------------------------------------------------------------------------------
/src/types/workflow-diff.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Workflow Diff Types
 * Defines the structure for partial workflow updates using diff operations
 */

import { WorkflowNode, WorkflowConnection } from './n8n-api';

// Base operation interface
export interface DiffOperation {
  type: string;
  description?: string; // Optional description for clarity
}

// Node Operations
export interface AddNodeOperation extends DiffOperation {
  type: 'addNode';
  node: Partial<WorkflowNode> & {
    name: string; // Name is required
    type: string; // Type is required
    position: [number, number]; // Position is required
  };
}

export interface RemoveNodeOperation extends DiffOperation {
  type: 'removeNode';
  nodeId?: string; // Can use either ID or name
  nodeName?: string;
}

export interface UpdateNodeOperation extends DiffOperation {
  type: 'updateNode';
  nodeId?: string; // Can use either ID or name
  nodeName?: string;
  updates: {
    [path: string]: any; // Dot notation paths like 'parameters.url'
  };
}

export interface MoveNodeOperation extends DiffOperation {
  type: 'moveNode';
  nodeId?: string;
  nodeName?: string;
  position: [number, number];
}

export interface EnableNodeOperation extends DiffOperation {
  type: 'enableNode';
  nodeId?: string;
  nodeName?: string;
}

export interface DisableNodeOperation extends DiffOperation {
  type: 'disableNode';
  nodeId?: string;
  nodeName?: string;
}

// Connection Operations
export interface AddConnectionOperation extends DiffOperation {
  type: 'addConnection';
  source: string; // Node name or ID
  target: string; // Node name or ID
  sourceOutput?: string; // Default: 'main'
  targetInput?: string; // Default: 'main'
  sourceIndex?: number; // Default: 0
  targetIndex?: number; // Default: 0
  // Smart parameters for multi-output nodes (Phase 1 UX improvement)
  branch?: 'true' | 'false'; // For IF nodes: maps to sourceIndex (0=true, 1=false)
  case?: number; // For Switch/multi-output nodes: maps to sourceIndex
}

export interface RemoveConnectionOperation extends DiffOperation {
  type: 'removeConnection';
  source: string; // Node name or ID
  target: string; // Node name or ID
  sourceOutput?: string; // Default: 'main'
  targetInput?: string; // Default: 'main'
  ignoreErrors?: boolean; // If true, don't fail when connection doesn't exist (useful for cleanup)
}

export interface RewireConnectionOperation extends DiffOperation {
  type: 'rewireConnection';
  source: string;      // Source node name or ID
  from: string;        // Current target to rewire FROM
  to: string;          // New target to rewire TO
  sourceOutput?: string;  // Optional: which output to rewire (default: 'main')
  targetInput?: string;   // Optional: which input type (default: 'main')
  sourceIndex?: number;   // Optional: which source index (default: 0)
  // Smart parameters for multi-output nodes (Phase 1 UX improvement)
  branch?: 'true' | 'false'; // For IF nodes: maps to sourceIndex (0=true, 1=false)
  case?: number; // For Switch/multi-output nodes: maps to sourceIndex
}

// Workflow Metadata Operations
export interface UpdateSettingsOperation extends DiffOperation {
  type: 'updateSettings';
  settings: {
    [key: string]: any;
  };
}

export interface UpdateNameOperation extends DiffOperation {
  type: 'updateName';
  name: string;
}

export interface AddTagOperation extends DiffOperation {
  type: 'addTag';
  tag: string;
}

export interface RemoveTagOperation extends DiffOperation {
  type: 'removeTag';
  tag: string;
}

// Connection Cleanup Operations
export interface CleanStaleConnectionsOperation extends DiffOperation {
  type: 'cleanStaleConnections';
  dryRun?: boolean; // If true, return what would be removed without applying changes
}

export interface ReplaceConnectionsOperation extends DiffOperation {
  type: 'replaceConnections';
  connections: {
    [nodeName: string]: {
      [outputName: string]: Array<Array<{
        node: string;
        type: string;
        index: number;
      }>>;
    };
  };
}

// Union type for all operations
export type WorkflowDiffOperation =
  | AddNodeOperation
  | RemoveNodeOperation
  | UpdateNodeOperation
  | MoveNodeOperation
  | EnableNodeOperation
  | DisableNodeOperation
  | AddConnectionOperation
  | RemoveConnectionOperation
  | RewireConnectionOperation
  | UpdateSettingsOperation
  | UpdateNameOperation
  | AddTagOperation
  | RemoveTagOperation
  | CleanStaleConnectionsOperation
  | ReplaceConnectionsOperation;

// Main diff request structure
export interface WorkflowDiffRequest {
  id: string; // Workflow ID
  operations: WorkflowDiffOperation[];
  validateOnly?: boolean; // If true, only validate without applying
  continueOnError?: boolean; // If true, apply valid operations even if some fail (default: false for atomic behavior)
}

// Response types
export interface WorkflowDiffValidationError {
  operation: number; // Index of the operation that failed
  message: string;
  details?: any;
}

export interface WorkflowDiffResult {
  success: boolean;
  workflow?: any; // Updated workflow if successful
  errors?: WorkflowDiffValidationError[];
  operationsApplied?: number;
  message?: string;
  applied?: number[]; // Indices of successfully applied operations (when continueOnError is true)
  failed?: number[]; // Indices of failed operations (when continueOnError is true)
  staleConnectionsRemoved?: Array<{ from: string; to: string }>; // For cleanStaleConnections operation
}

// Helper type for node reference (supports both ID and name)
export interface NodeReference {
  id?: string;
  name?: string;
}

// Utility functions type guards
export function isNodeOperation(op: WorkflowDiffOperation): op is 
  AddNodeOperation | RemoveNodeOperation | UpdateNodeOperation | 
  MoveNodeOperation | EnableNodeOperation | DisableNodeOperation {
  return ['addNode', 'removeNode', 'updateNode', 'moveNode', 'enableNode', 'disableNode'].includes(op.type);
}

export function isConnectionOperation(op: WorkflowDiffOperation): op is
  AddConnectionOperation | RemoveConnectionOperation | RewireConnectionOperation | CleanStaleConnectionsOperation | ReplaceConnectionsOperation {
  return ['addConnection', 'removeConnection', 'rewireConnection', 'cleanStaleConnections', 'replaceConnections'].includes(op.type);
}

export function isMetadataOperation(op: WorkflowDiffOperation): op is 
  UpdateSettingsOperation | UpdateNameOperation | AddTagOperation | RemoveTagOperation {
  return ['updateSettings', 'updateName', 'addTag', 'removeTag'].includes(op.type);
}
```

--------------------------------------------------------------------------------
/src/scripts/test-node-suggestions.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env npx tsx
/**
 * Test script for enhanced node type suggestions
 * Tests the NodeSimilarityService to ensure it provides helpful suggestions
 * for unknown or incorrectly typed nodes in workflows.
 */

import { createDatabaseAdapter } from '../database/database-adapter';
import { NodeRepository } from '../database/node-repository';
import { NodeSimilarityService } from '../services/node-similarity-service';
import { WorkflowValidator } from '../services/workflow-validator';
import { EnhancedConfigValidator } from '../services/enhanced-config-validator';
import { WorkflowAutoFixer } from '../services/workflow-auto-fixer';
import { Logger } from '../utils/logger';
import path from 'path';

const logger = new Logger({ prefix: '[NodeSuggestions Test]' });
const console = {
  log: (msg: string) => logger.info(msg),
  error: (msg: string, err?: any) => logger.error(msg, err)
};

async function testNodeSimilarity() {
  console.log('🔍 Testing Enhanced Node Type Suggestions\n');

  // Initialize database and services
  const dbPath = path.join(process.cwd(), 'data/nodes.db');
  const db = await createDatabaseAdapter(dbPath);
  const repository = new NodeRepository(db);
  const similarityService = new NodeSimilarityService(repository);
  const validator = new WorkflowValidator(repository, EnhancedConfigValidator);

  // Test cases with various invalid node types
  const testCases = [
    // Case variations
    { invalid: 'HttpRequest', expected: 'nodes-base.httpRequest' },
    { invalid: 'HTTPRequest', expected: 'nodes-base.httpRequest' },
    { invalid: 'Webhook', expected: 'nodes-base.webhook' },
    { invalid: 'WebHook', expected: 'nodes-base.webhook' },

    // Missing package prefix
    { invalid: 'slack', expected: 'nodes-base.slack' },
    { invalid: 'googleSheets', expected: 'nodes-base.googleSheets' },
    { invalid: 'telegram', expected: 'nodes-base.telegram' },

    // Common typos
    { invalid: 'htpRequest', expected: 'nodes-base.httpRequest' },
    { invalid: 'webook', expected: 'nodes-base.webhook' },
    { invalid: 'slak', expected: 'nodes-base.slack' },

    // Partial names
    { invalid: 'http', expected: 'nodes-base.httpRequest' },
    { invalid: 'sheet', expected: 'nodes-base.googleSheets' },

    // Wrong package prefix
    { invalid: 'nodes-base.openai', expected: 'nodes-langchain.openAi' },
    { invalid: 'n8n-nodes-base.httpRequest', expected: 'nodes-base.httpRequest' },

    // Complete unknowns
    { invalid: 'foobar', expected: null },
    { invalid: 'xyz123', expected: null },
  ];

  console.log('Testing individual node type suggestions:');
  console.log('=' .repeat(60));

  for (const testCase of testCases) {
    const suggestions = await similarityService.findSimilarNodes(testCase.invalid, 3);

    console.log(`\n❌ Invalid type: "${testCase.invalid}"`);

    if (suggestions.length > 0) {
      console.log('✨ Suggestions:');
      for (const suggestion of suggestions) {
        const confidence = Math.round(suggestion.confidence * 100);
        const marker = suggestion.nodeType === testCase.expected ? '✅' : '  ';
        console.log(
          `${marker} ${suggestion.nodeType} (${confidence}% match) - ${suggestion.reason}`
        );

        if (suggestion.confidence >= 0.9) {
          console.log('   💡 Can be auto-fixed!');
        }
      }

      // Check if expected match was found
      if (testCase.expected) {
        const found = suggestions.some(s => s.nodeType === testCase.expected);
        if (!found) {
          console.log(`   ⚠️  Expected "${testCase.expected}" was not suggested!`);
        }
      }
    } else {
      console.log('   No suggestions found');
      if (testCase.expected) {
        console.log(`   ⚠️  Expected "${testCase.expected}" was not suggested!`);
      }
    }
  }

  console.log('\n' + '='.repeat(60));
  console.log('\n📋 Testing workflow validation with unknown nodes:');
  console.log('='.repeat(60));

  // Test with a sample workflow
  const testWorkflow = {
    id: 'test-workflow',
    name: 'Test Workflow',
    nodes: [
      {
        id: '1',
        name: 'Start',
        type: 'nodes-base.manualTrigger',
        position: [100, 100] as [number, number],
        parameters: {},
        typeVersion: 1
      },
      {
        id: '2',
        name: 'HTTP Request',
        type: 'HTTPRequest', // Wrong capitalization
        position: [300, 100] as [number, number],
        parameters: {},
        typeVersion: 1
      },
      {
        id: '3',
        name: 'Slack',
        type: 'slack', // Missing prefix
        position: [500, 100] as [number, number],
        parameters: {},
        typeVersion: 1
      },
      {
        id: '4',
        name: 'Unknown',
        type: 'foobar', // Completely unknown
        position: [700, 100] as [number, number],
        parameters: {},
        typeVersion: 1
      }
    ],
    connections: {
      'Start': {
        main: [[{ node: 'HTTP Request', type: 'main', index: 0 }]]
      },
      'HTTP Request': {
        main: [[{ node: 'Slack', type: 'main', index: 0 }]]
      },
      'Slack': {
        main: [[{ node: 'Unknown', type: 'main', index: 0 }]]
      }
    },
    settings: {}
  };

  const validationResult = await validator.validateWorkflow(testWorkflow as any, {
    validateNodes: true,
    validateConnections: false,
    validateExpressions: false,
    profile: 'runtime'
  });

  console.log('\nValidation Results:');
  for (const error of validationResult.errors) {
    if (error.message?.includes('Unknown node type:')) {
      console.log(`\n🔴 ${error.nodeName}: ${error.message}`);
    }
  }

  console.log('\n' + '='.repeat(60));
  console.log('\n🔧 Testing AutoFixer with node type corrections:');
  console.log('='.repeat(60));

  const autoFixer = new WorkflowAutoFixer(repository);
  const fixResult = autoFixer.generateFixes(
    testWorkflow as any,
    validationResult,
    [],
    {
      applyFixes: false,
      fixTypes: ['node-type-correction'],
      confidenceThreshold: 'high'
    }
  );

  if (fixResult.fixes.length > 0) {
    console.log('\n✅ Auto-fixable issues found:');
    for (const fix of fixResult.fixes) {
      console.log(`   • ${fix.description}`);
    }
    console.log(`\nSummary: ${fixResult.summary}`);
  } else {
    console.log('\n❌ No auto-fixable node type issues found (only high-confidence fixes are applied)');
  }

  console.log('\n' + '='.repeat(60));
  console.log('\n✨ Test complete!');
}

// Run the test
testNodeSimilarity().catch(error => {
  console.error('Test failed:', error);
  process.exit(1);
});
```

--------------------------------------------------------------------------------
/tests/utils/test-helpers.ts:
--------------------------------------------------------------------------------

```typescript
import { vi } from 'vitest';
import { WorkflowNode, Workflow } from '@/types/n8n-api';

// Use any type for INodeDefinition since it's from n8n-workflow package
type INodeDefinition = any;

/**
 * Common test utilities and helpers
 */

/**
 * Wait for a condition to be true
 */
export async function waitFor(
  condition: () => boolean | Promise<boolean>,
  options: { timeout?: number; interval?: number } = {}
): Promise<void> {
  const { timeout = 5000, interval = 50 } = options;
  const startTime = Date.now();
  
  while (Date.now() - startTime < timeout) {
    if (await condition()) {
      return;
    }
    await new Promise(resolve => setTimeout(resolve, interval));
  }
  
  throw new Error(`Timeout waiting for condition after ${timeout}ms`);
}

/**
 * Create a mock node definition with default values
 */
export function createMockNodeDefinition(overrides?: Partial<INodeDefinition>): INodeDefinition {
  return {
    displayName: 'Mock Node',
    name: 'mockNode',
    group: ['transform'],
    version: 1,
    description: 'A mock node for testing',
    defaults: {
      name: 'Mock Node',
    },
    inputs: ['main'],
    outputs: ['main'],
    properties: [],
    ...overrides
  };
}

/**
 * Create a mock workflow node
 */
export function createMockNode(overrides?: Partial<WorkflowNode>): WorkflowNode {
  return {
    id: 'mock-node-id',
    name: 'Mock Node',
    type: 'n8n-nodes-base.mockNode',
    typeVersion: 1,
    position: [0, 0],
    parameters: {},
    ...overrides
  };
}

/**
 * Create a mock workflow
 */
export function createMockWorkflow(overrides?: Partial<Workflow>): Workflow {
  return {
    id: 'mock-workflow-id',
    name: 'Mock Workflow',
    active: false,
    nodes: [],
    connections: {},
    createdAt: new Date().toISOString(),
    updatedAt: new Date().toISOString(),
    ...overrides
  };
}

/**
 * Mock console methods for tests
 */
export function mockConsole() {
  const originalConsole = { ...console };
  
  const mocks = {
    log: vi.spyOn(console, 'log').mockImplementation(() => {}),
    error: vi.spyOn(console, 'error').mockImplementation(() => {}),
    warn: vi.spyOn(console, 'warn').mockImplementation(() => {}),
    debug: vi.spyOn(console, 'debug').mockImplementation(() => {}),
    info: vi.spyOn(console, 'info').mockImplementation(() => {})
  };
  
  return {
    mocks,
    restore: () => {
      Object.entries(mocks).forEach(([key, mock]) => {
        mock.mockRestore();
      });
    }
  };
}

/**
 * Create a deferred promise for testing async operations
 */
export function createDeferred<T>() {
  let resolve: (value: T) => void;
  let reject: (error: any) => void;
  
  const promise = new Promise<T>((res, rej) => {
    resolve = res;
    reject = rej;
  });
  
  return {
    promise,
    resolve: resolve!,
    reject: reject!
  };
}

/**
 * Helper to test error throwing
 */
export async function expectToThrowAsync(
  fn: () => Promise<any>,
  errorMatcher?: string | RegExp | Error
) {
  let thrown = false;
  let error: any;
  
  try {
    await fn();
  } catch (e) {
    thrown = true;
    error = e;
  }
  
  if (!thrown) {
    throw new Error('Expected function to throw');
  }
  
  if (errorMatcher) {
    if (typeof errorMatcher === 'string') {
      expect(error.message).toContain(errorMatcher);
    } else if (errorMatcher instanceof RegExp) {
      expect(error.message).toMatch(errorMatcher);
    } else if (errorMatcher instanceof Error) {
      expect(error).toEqual(errorMatcher);
    }
  }
  
  return error;
}

/**
 * Create a test database with initial data
 */
export function createTestDatabase(data: Record<string, any[]> = {}) {
  const db = new Map<string, any[]>();
  
  // Initialize with default tables
  db.set('nodes', data.nodes || []);
  db.set('templates', data.templates || []);
  db.set('tools_documentation', data.tools_documentation || []);
  
  // Add any additional tables from data
  Object.entries(data).forEach(([table, rows]) => {
    if (!db.has(table)) {
      db.set(table, rows);
    }
  });
  
  return {
    prepare: vi.fn((sql: string) => {
      const tableName = extractTableName(sql);
      const rows = db.get(tableName) || [];
      
      return {
        all: vi.fn(() => rows),
        get: vi.fn((params: any) => {
          if (typeof params === 'string') {
            return rows.find((r: any) => r.id === params);
          }
          return rows[0];
        }),
        run: vi.fn((params: any) => {
          rows.push(params);
          return { changes: 1, lastInsertRowid: rows.length };
        })
      };
    }),
    exec: vi.fn(),
    close: vi.fn(),
    transaction: vi.fn((fn: Function) => fn()),
    pragma: vi.fn()
  };
}

/**
 * Extract table name from SQL query
 */
function extractTableName(sql: string): string {
  const patterns = [
    /FROM\s+(\w+)/i,
    /INTO\s+(\w+)/i,
    /UPDATE\s+(\w+)/i,
    /TABLE\s+(\w+)/i
  ];
  
  for (const pattern of patterns) {
    const match = sql.match(pattern);
    if (match) {
      return match[1];
    }
  }
  
  return 'nodes';
}

/**
 * Create a mock HTTP response
 */
export function createMockResponse(data: any, status = 200) {
  return {
    data,
    status,
    statusText: status === 200 ? 'OK' : 'Error',
    headers: {},
    config: {}
  };
}

/**
 * Create a mock HTTP error
 */
export function createMockHttpError(message: string, status = 500, data?: any) {
  const error: any = new Error(message);
  error.isAxiosError = true;
  error.response = {
    data: data || { message },
    status,
    statusText: status === 500 ? 'Internal Server Error' : 'Error',
    headers: {},
    config: {}
  };
  return error;
}

/**
 * Helper to test MCP tool calls
 */
export async function testMCPToolCall(
  tool: any,
  args: any,
  expectedResult?: any
) {
  const result = await tool.handler(args);
  
  if (expectedResult !== undefined) {
    expect(result).toEqual(expectedResult);
  }
  
  return result;
}

/**
 * Create a mock MCP context
 */
export function createMockMCPContext() {
  return {
    request: vi.fn(),
    notify: vi.fn(),
    expose: vi.fn(),
    onClose: vi.fn()
  };
}

/**
 * Snapshot serializer for dates
 */
export const dateSerializer = {
  test: (value: any) => value instanceof Date,
  serialize: (value: Date) => value.toISOString()
};

/**
 * Snapshot serializer for functions
 */
export const functionSerializer = {
  test: (value: any) => typeof value === 'function',
  serialize: () => '[Function]'
};

/**
 * Clean up test environment
 */
export function cleanupTestEnvironment() {
  vi.clearAllMocks();
  vi.clearAllTimers();
  vi.useRealTimers();
}
```

--------------------------------------------------------------------------------
/tests/unit/services/node-similarity-service.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { NodeSimilarityService } from '@/services/node-similarity-service';
import { NodeRepository } from '@/database/node-repository';
import type { ParsedNode } from '@/parsers/node-parser';

vi.mock('@/database/node-repository');

describe('NodeSimilarityService', () => {
  let service: NodeSimilarityService;
  let mockRepository: NodeRepository;

  const createMockNode = (type: string, displayName: string, description = ''): any => ({
    nodeType: type,
    displayName,
    description,
    version: 1,
    defaults: {},
    inputs: ['main'],
    outputs: ['main'],
    properties: [],
    package: 'n8n-nodes-base',
    typeVersion: 1
  });

  beforeEach(() => {
    vi.clearAllMocks();
    mockRepository = new NodeRepository({} as any);
    service = new NodeSimilarityService(mockRepository);
  });

  afterEach(() => {
    vi.restoreAllMocks();
  });

  describe('Cache Management', () => {
    it('should invalidate cache when requested', () => {
      service.invalidateCache();
      expect(service['nodeCache']).toBeNull();
      expect(service['cacheVersion']).toBeGreaterThan(0);
    });

    it('should refresh cache with new data', async () => {
      const nodes = [
        createMockNode('nodes-base.httpRequest', 'HTTP Request'),
        createMockNode('nodes-base.webhook', 'Webhook')
      ];

      vi.spyOn(mockRepository, 'getAllNodes').mockReturnValue(nodes);

      await service.refreshCache();

      expect(service['nodeCache']).toEqual(nodes);
      expect(mockRepository.getAllNodes).toHaveBeenCalled();
    });

    it('should use stale cache on refresh error', async () => {
      const staleNodes = [createMockNode('nodes-base.slack', 'Slack')];
      service['nodeCache'] = staleNodes;
      service['cacheExpiry'] = Date.now() + 1000; // Set cache as not expired

      vi.spyOn(mockRepository, 'getAllNodes').mockImplementation(() => {
        throw new Error('Database error');
      });

      const nodes = await service['getCachedNodes']();

      expect(nodes).toEqual(staleNodes);
    });

    it('should refresh cache when expired', async () => {
      service['cacheExpiry'] = Date.now() - 1000; // Cache expired
      const nodes = [createMockNode('nodes-base.httpRequest', 'HTTP Request')];

      vi.spyOn(mockRepository, 'getAllNodes').mockReturnValue(nodes);

      const result = await service['getCachedNodes']();

      expect(result).toEqual(nodes);
      expect(mockRepository.getAllNodes).toHaveBeenCalled();
    });
  });

  describe('Edit Distance Optimization', () => {
    it('should return 0 for identical strings', () => {
      const distance = service['getEditDistance']('test', 'test');
      expect(distance).toBe(0);
    });

    it('should early terminate for length difference exceeding max', () => {
      const distance = service['getEditDistance']('a', 'abcdefghijk', 3);
      expect(distance).toBe(4); // maxDistance + 1
    });

    it('should calculate correct edit distance within threshold', () => {
      const distance = service['getEditDistance']('kitten', 'sitting', 10);
      expect(distance).toBe(3);
    });

    it('should use early termination when min distance exceeds max', () => {
      const distance = service['getEditDistance']('abc', 'xyz', 2);
      expect(distance).toBe(3); // Should terminate early and return maxDistance + 1
    });
  });


  describe('Node Suggestions', () => {
    beforeEach(() => {
      const nodes = [
        createMockNode('nodes-base.httpRequest', 'HTTP Request', 'Make HTTP requests'),
        createMockNode('nodes-base.webhook', 'Webhook', 'Receive webhooks'),
        createMockNode('nodes-base.slack', 'Slack', 'Send messages to Slack'),
        createMockNode('nodes-langchain.openAi', 'OpenAI', 'Use OpenAI models')
      ];

      vi.spyOn(mockRepository, 'getAllNodes').mockReturnValue(nodes);
    });

    it('should find similar nodes for exact match', async () => {
      const suggestions = await service.findSimilarNodes('httpRequest', 3);

      expect(suggestions).toHaveLength(1);
      expect(suggestions[0].nodeType).toBe('nodes-base.httpRequest');
      expect(suggestions[0].confidence).toBeGreaterThan(0.5); // Adjusted based on actual implementation
    });

    it('should find nodes for typo queries', async () => {
      const suggestions = await service.findSimilarNodes('htpRequest', 3);

      expect(suggestions.length).toBeGreaterThan(0);
      expect(suggestions[0].nodeType).toBe('nodes-base.httpRequest');
      expect(suggestions[0].confidence).toBeGreaterThan(0.4); // Adjusted based on actual implementation
    });

    it('should find nodes for partial matches', async () => {
      const suggestions = await service.findSimilarNodes('slack', 3);

      expect(suggestions.length).toBeGreaterThan(0);
      expect(suggestions[0].nodeType).toBe('nodes-base.slack');
    });

    it('should return empty array for no matches', async () => {
      const suggestions = await service.findSimilarNodes('nonexistent', 3);

      expect(suggestions).toEqual([]);
    });

    it('should respect the limit parameter', async () => {
      const suggestions = await service.findSimilarNodes('request', 2);

      expect(suggestions.length).toBeLessThanOrEqual(2);
    });

    it('should provide appropriate confidence levels', async () => {
      const suggestions = await service.findSimilarNodes('HttpRequest', 3);

      if (suggestions.length > 0) {
        expect(suggestions[0].confidence).toBeGreaterThan(0.5);
        expect(suggestions[0].reason).toBeDefined();
      }
    });

    it('should handle package prefix normalization', async () => {
      // Add a node with the exact type we're searching for
      const nodes = [
        createMockNode('nodes-base.httpRequest', 'HTTP Request', 'Make HTTP requests')
      ];
      vi.spyOn(mockRepository, 'getAllNodes').mockReturnValue(nodes);

      const suggestions = await service.findSimilarNodes('nodes-base.httpRequest', 3);

      expect(suggestions.length).toBeGreaterThan(0);
      expect(suggestions[0].nodeType).toBe('nodes-base.httpRequest');
    });
  });

  describe('Constants Usage', () => {
    it('should use proper constants for scoring', () => {
      expect(NodeSimilarityService['SCORING_THRESHOLD']).toBe(50);
      expect(NodeSimilarityService['TYPO_EDIT_DISTANCE']).toBe(2);
      expect(NodeSimilarityService['SHORT_SEARCH_LENGTH']).toBe(5);
      expect(NodeSimilarityService['CACHE_DURATION_MS']).toBe(5 * 60 * 1000);
      expect(NodeSimilarityService['AUTO_FIX_CONFIDENCE']).toBe(0.9);
    });
  });
});
```

--------------------------------------------------------------------------------
/tests/integration/database/empty-database.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Integration tests for empty database scenarios
 * Ensures we detect and handle empty database situations that caused production failures
 */
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { createDatabaseAdapter } from '../../../src/database/database-adapter';
import { NodeRepository } from '../../../src/database/node-repository';
import * as fs from 'fs';
import * as path from 'path';
import * as os from 'os';

describe('Empty Database Detection Tests', () => {
  let tempDbPath: string;
  let db: any;
  let repository: NodeRepository;

  beforeEach(async () => {
    // Create a temporary database file
    tempDbPath = path.join(os.tmpdir(), `test-empty-${Date.now()}.db`);
    db = await createDatabaseAdapter(tempDbPath);

    // Initialize schema
    const schemaPath = path.join(__dirname, '../../../src/database/schema.sql');
    const schema = fs.readFileSync(schemaPath, 'utf-8');
    db.exec(schema);

    repository = new NodeRepository(db);
  });

  afterEach(() => {
    if (db) {
      db.close();
    }
    // Clean up temp file
    if (fs.existsSync(tempDbPath)) {
      fs.unlinkSync(tempDbPath);
    }
  });

  describe('Empty Nodes Table Detection', () => {
    it('should detect empty nodes table', () => {
      const count = db.prepare('SELECT COUNT(*) as count FROM nodes').get();
      expect(count.count).toBe(0);
    });

    it('should detect empty FTS5 index', () => {
      const count = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get();
      expect(count.count).toBe(0);
    });

    it('should return empty results for critical node searches', () => {
      const criticalSearches = ['webhook', 'merge', 'split', 'code', 'http'];

      for (const search of criticalSearches) {
        const results = db.prepare(`
          SELECT node_type FROM nodes_fts
          WHERE nodes_fts MATCH ?
        `).all(search);

        expect(results).toHaveLength(0);
      }
    });

    it('should fail validation with empty database', () => {
      const validation = validateEmptyDatabase(repository);

      expect(validation.passed).toBe(false);
      expect(validation.issues.length).toBeGreaterThan(0);
      expect(validation.issues[0]).toMatch(/CRITICAL.*no nodes found/i);
    });
  });

  describe('LIKE Fallback with Empty Database', () => {
    it('should return empty results for LIKE searches', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes
        WHERE node_type LIKE ? OR display_name LIKE ? OR description LIKE ?
      `).all('%webhook%', '%webhook%', '%webhook%');

      expect(results).toHaveLength(0);
    });

    it('should return empty results for multi-word LIKE searches', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes
        WHERE (node_type LIKE ? OR display_name LIKE ? OR description LIKE ?)
        OR (node_type LIKE ? OR display_name LIKE ? OR description LIKE ?)
      `).all('%split%', '%split%', '%split%', '%batch%', '%batch%', '%batch%');

      expect(results).toHaveLength(0);
    });
  });

  describe('Repository Methods with Empty Database', () => {
    it('should return null for getNode() with empty database', () => {
      const node = repository.getNode('nodes-base.webhook');
      expect(node).toBeNull();
    });

    it('should return empty array for searchNodes() with empty database', () => {
      const results = repository.searchNodes('webhook');
      expect(results).toHaveLength(0);
    });

    it('should return empty array for getAITools() with empty database', () => {
      const tools = repository.getAITools();
      expect(tools).toHaveLength(0);
    });

    it('should return 0 for getNodeCount() with empty database', () => {
      const count = repository.getNodeCount();
      expect(count).toBe(0);
    });
  });

  describe('Validation Messages for Empty Database', () => {
    it('should provide clear error message for empty database', () => {
      const validation = validateEmptyDatabase(repository);

      const criticalError = validation.issues.find(issue =>
        issue.includes('CRITICAL') && issue.includes('empty')
      );

      expect(criticalError).toBeDefined();
      expect(criticalError).toContain('no nodes found');
    });

    it('should suggest rebuild command in error message', () => {
      const validation = validateEmptyDatabase(repository);

      const errorWithSuggestion = validation.issues.find(issue =>
        issue.toLowerCase().includes('rebuild')
      );

      // This expectation documents that we should add rebuild suggestions
      // Currently validation doesn't include this, but it should
      if (!errorWithSuggestion) {
        console.warn('TODO: Add rebuild suggestion to validation error messages');
      }
    });
  });

  describe('Empty Template Data', () => {
    it('should detect empty templates table', () => {
      const count = db.prepare('SELECT COUNT(*) as count FROM templates').get();
      expect(count.count).toBe(0);
    });

    it('should handle missing template data gracefully', () => {
      const templates = db.prepare('SELECT * FROM templates LIMIT 10').all();
      expect(templates).toHaveLength(0);
    });
  });
});

/**
 * Validation function matching rebuild.ts logic
 */
function validateEmptyDatabase(repository: NodeRepository): { passed: boolean; issues: string[] } {
  const issues: string[] = [];

  try {
    const db = (repository as any).db;

    // Check if database has any nodes
    const nodeCount = db.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number };
    if (nodeCount.count === 0) {
      issues.push('CRITICAL: Database is empty - no nodes found! Rebuild failed or was interrupted.');
      return { passed: false, issues };
    }

    // Check minimum expected node count
    if (nodeCount.count < 500) {
      issues.push(`WARNING: Only ${nodeCount.count} nodes found - expected at least 500 (both n8n packages)`);
    }

    // Check FTS5 table
    const ftsTableCheck = db.prepare(`
      SELECT name FROM sqlite_master
      WHERE type='table' AND name='nodes_fts'
    `).get();

    if (!ftsTableCheck) {
      issues.push('CRITICAL: FTS5 table (nodes_fts) does not exist - searches will fail or be very slow');
    } else {
      const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get() as { count: number };

      if (ftsCount.count === 0) {
        issues.push('CRITICAL: FTS5 index is empty - searches will return zero results');
      }
    }
  } catch (error) {
    issues.push(`Validation error: ${(error as Error).message}`);
  }

  return {
    passed: issues.length === 0,
    issues
  };
}

```

--------------------------------------------------------------------------------
/src/utils/ssrf-protection.ts:
--------------------------------------------------------------------------------

```typescript
import { URL } from 'url';
import { lookup } from 'dns/promises';
import { logger } from './logger';

/**
 * SSRF Protection Utility with Configurable Security Modes
 *
 * Validates URLs to prevent Server-Side Request Forgery attacks including DNS rebinding
 * See: https://github.com/czlonkowski/n8n-mcp/issues/265 (HIGH-03)
 *
 * Security Modes:
 * - strict (default): Block localhost + private IPs + cloud metadata (production)
 * - moderate: Allow localhost, block private IPs + cloud metadata (local dev)
 * - permissive: Allow localhost + private IPs, block cloud metadata (testing only)
 */

// Security mode type
type SecurityMode = 'strict' | 'moderate' | 'permissive';

// Cloud metadata endpoints (ALWAYS blocked in all modes)
const CLOUD_METADATA = new Set([
  // AWS/Azure
  '169.254.169.254', // AWS/Azure metadata
  '169.254.170.2',   // AWS ECS metadata
  // Google Cloud
  'metadata.google.internal', // GCP metadata
  'metadata',
  // Alibaba Cloud
  '100.100.100.200', // Alibaba Cloud metadata
  // Oracle Cloud
  '192.0.0.192',     // Oracle Cloud metadata
]);

// Localhost patterns
const LOCALHOST_PATTERNS = new Set([
  'localhost',
  '127.0.0.1',
  '::1',
  '0.0.0.0',
  'localhost.localdomain',
]);

// Private IP ranges (regex for IPv4)
const PRIVATE_IP_RANGES = [
  /^10\./,                          // 10.0.0.0/8
  /^192\.168\./,                    // 192.168.0.0/16
  /^172\.(1[6-9]|2[0-9]|3[0-1])\./, // 172.16.0.0/12
  /^169\.254\./,                    // 169.254.0.0/16 (Link-local)
  /^127\./,                         // 127.0.0.0/8 (Loopback)
  /^0\./,                           // 0.0.0.0/8 (Invalid)
];

export class SSRFProtection {
  /**
   * Validate webhook URL for SSRF protection with configurable security modes
   *
   * @param urlString - URL to validate
   * @returns Promise with validation result
   *
   * @security Uses DNS resolution to prevent DNS rebinding attacks
   *
   * @example
   * // Production (default strict mode)
   * const result = await SSRFProtection.validateWebhookUrl('http://localhost:5678');
   * // { valid: false, reason: 'Localhost not allowed' }
   *
   * @example
   * // Local development (moderate mode)
   * process.env.WEBHOOK_SECURITY_MODE = 'moderate';
   * const result = await SSRFProtection.validateWebhookUrl('http://localhost:5678');
   * // { valid: true }
   */
  static async validateWebhookUrl(urlString: string): Promise<{
    valid: boolean;
    reason?: string
  }> {
    try {
      const url = new URL(urlString);
      const mode: SecurityMode = (process.env.WEBHOOK_SECURITY_MODE || 'strict') as SecurityMode;

      // Step 1: Must be HTTP/HTTPS (all modes)
      if (!['http:', 'https:'].includes(url.protocol)) {
        return { valid: false, reason: 'Invalid protocol. Only HTTP/HTTPS allowed.' };
      }

      // Get hostname and strip IPv6 brackets if present
      let hostname = url.hostname.toLowerCase();
      // Remove IPv6 brackets for consistent comparison
      if (hostname.startsWith('[') && hostname.endsWith(']')) {
        hostname = hostname.slice(1, -1);
      }

      // Step 2: ALWAYS block cloud metadata endpoints (all modes)
      if (CLOUD_METADATA.has(hostname)) {
        logger.warn('SSRF blocked: Cloud metadata endpoint', { hostname, mode });
        return { valid: false, reason: 'Cloud metadata endpoint blocked' };
      }

      // Step 3: Resolve DNS to get actual IP address
      // This prevents DNS rebinding attacks where hostname resolves to different IPs
      let resolvedIP: string;
      try {
        const { address } = await lookup(hostname);
        resolvedIP = address;

        logger.debug('DNS resolved for SSRF check', { hostname, resolvedIP, mode });
      } catch (error) {
        logger.warn('DNS resolution failed for webhook URL', {
          hostname,
          error: error instanceof Error ? error.message : String(error)
        });
        return { valid: false, reason: 'DNS resolution failed' };
      }

      // Step 4: ALWAYS block cloud metadata IPs (all modes)
      if (CLOUD_METADATA.has(resolvedIP)) {
        logger.warn('SSRF blocked: Hostname resolves to cloud metadata IP', {
          hostname,
          resolvedIP,
          mode
        });
        return { valid: false, reason: 'Hostname resolves to cloud metadata endpoint' };
      }

      // Step 5: Mode-specific validation

      // MODE: permissive - Allow everything except cloud metadata
      if (mode === 'permissive') {
        logger.warn('SSRF protection in permissive mode (localhost and private IPs allowed)', {
          hostname,
          resolvedIP
        });
        return { valid: true };
      }

      // Check if target is localhost
      const isLocalhost = LOCALHOST_PATTERNS.has(hostname) ||
                        resolvedIP === '::1' ||
                        resolvedIP.startsWith('127.');

      // MODE: strict - Block localhost and private IPs
      if (mode === 'strict' && isLocalhost) {
        logger.warn('SSRF blocked: Localhost not allowed in strict mode', {
          hostname,
          resolvedIP
        });
        return { valid: false, reason: 'Localhost access is blocked in strict mode' };
      }

      // MODE: moderate - Allow localhost, block private IPs
      if (mode === 'moderate' && isLocalhost) {
        logger.info('Localhost webhook allowed (moderate mode)', { hostname, resolvedIP });
        return { valid: true };
      }

      // Step 6: Check private IPv4 ranges (strict & moderate modes)
      if (PRIVATE_IP_RANGES.some(regex => regex.test(resolvedIP))) {
        logger.warn('SSRF blocked: Private IP address', { hostname, resolvedIP, mode });
        return {
          valid: false,
          reason: mode === 'strict'
            ? 'Private IP addresses not allowed'
            : 'Private IP addresses not allowed (use WEBHOOK_SECURITY_MODE=permissive if needed)'
        };
      }

      // Step 7: IPv6 private address check (strict & moderate modes)
      if (resolvedIP === '::1' ||         // Loopback
          resolvedIP === '::' ||          // Unspecified address
          resolvedIP.startsWith('fe80:') || // Link-local
          resolvedIP.startsWith('fc00:') || // Unique local (fc00::/7)
          resolvedIP.startsWith('fd00:') || // Unique local (fd00::/8)
          resolvedIP.startsWith('::ffff:')) { // IPv4-mapped IPv6
        logger.warn('SSRF blocked: IPv6 private address', {
          hostname,
          resolvedIP,
          mode
        });
        return { valid: false, reason: 'IPv6 private address not allowed' };
      }

      return { valid: true };
    } catch (error) {
      return { valid: false, reason: 'Invalid URL format' };
    }
  }
}

```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-autofix-workflow.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const n8nAutofixWorkflowDoc: ToolDocumentation = {
  name: 'n8n_autofix_workflow',
  category: 'workflow_management',
  essentials: {
    description: 'Automatically fix common workflow validation errors - expression formats, typeVersions, error outputs, webhook paths',
    keyParameters: ['id', 'applyFixes'],
    example: 'n8n_autofix_workflow({id: "wf_abc123", applyFixes: false})',
    performance: 'Network-dependent (200-1000ms) - fetches, validates, and optionally updates workflow',
    tips: [
      'Use applyFixes: false to preview changes before applying',
      'Set confidenceThreshold to control fix aggressiveness (high/medium/low)',
      'Supports fixing expression formats, typeVersion issues, error outputs, node type corrections, and webhook paths',
      'High-confidence fixes (≥90%) are safe for auto-application'
    ]
  },
  full: {
    description: `Automatically detects and fixes common workflow validation errors in n8n workflows. This tool:

- Fetches the workflow from your n8n instance
- Runs comprehensive validation to detect issues
- Generates targeted fixes for common problems
- Optionally applies the fixes back to the workflow

The auto-fixer can resolve:
1. **Expression Format Issues**: Missing '=' prefix in n8n expressions (e.g., {{ $json.field }} → ={{ $json.field }})
2. **TypeVersion Corrections**: Downgrades nodes with unsupported typeVersions to maximum supported
3. **Error Output Configuration**: Removes conflicting onError settings when error connections are missing
4. **Node Type Corrections**: Intelligently fixes unknown node types using similarity matching:
   - Handles deprecated package prefixes (n8n-nodes-base. → nodes-base.)
   - Corrects capitalization mistakes (HttpRequest → httpRequest)
   - Suggests correct packages (nodes-base.openai → nodes-langchain.openAi)
   - Uses multi-factor scoring: name similarity, category match, package match, pattern match
   - Only auto-fixes suggestions with ≥90% confidence
   - Leverages NodeSimilarityService with 5-minute caching for performance
5. **Webhook Path Generation**: Automatically generates UUIDs for webhook nodes missing path configuration:
   - Generates a unique UUID for webhook path
   - Sets both 'path' parameter and 'webhookId' field to the same UUID
   - Ensures webhook nodes become functional with valid endpoints
   - High confidence fix as UUID generation is deterministic

The tool uses a confidence-based system to ensure safe fixes:
- **High (≥90%)**: Safe to auto-apply (exact matches, known patterns)
- **Medium (70-89%)**: Generally safe but review recommended
- **Low (<70%)**: Manual review strongly recommended

Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
    parameters: {
      id: {
        type: 'string',
        required: true,
        description: 'The workflow ID to fix in your n8n instance'
      },
      applyFixes: {
        type: 'boolean',
        required: false,
        description: 'Whether to apply fixes to the workflow (default: false - preview mode). When false, returns proposed fixes without modifying the workflow.'
      },
      fixTypes: {
        type: 'array',
        required: false,
        description: 'Types of fixes to apply. Options: ["expression-format", "typeversion-correction", "error-output-config", "node-type-correction", "webhook-missing-path"]. Default: all types.'
      },
      confidenceThreshold: {
        type: 'string',
        required: false,
        description: 'Minimum confidence level for fixes: "high" (≥90%), "medium" (≥70%), "low" (any). Default: "medium".'
      },
      maxFixes: {
        type: 'number',
        required: false,
        description: 'Maximum number of fixes to apply (default: 50). Useful for limiting scope of changes.'
      }
    },
    returns: `AutoFixResult object containing:
- operations: Array of diff operations that will be/were applied
- fixes: Detailed list of individual fixes with before/after values
- summary: Human-readable summary of fixes
- stats: Statistics by fix type and confidence level
- applied: Boolean indicating if fixes were applied (when applyFixes: true)`,
    examples: [
      'n8n_autofix_workflow({id: "wf_abc123"}) - Preview all possible fixes',
      'n8n_autofix_workflow({id: "wf_abc123", applyFixes: true}) - Apply all medium+ confidence fixes',
      'n8n_autofix_workflow({id: "wf_abc123", applyFixes: true, confidenceThreshold: "high"}) - Only apply high-confidence fixes',
      'n8n_autofix_workflow({id: "wf_abc123", fixTypes: ["expression-format"]}) - Only fix expression format issues',
      'n8n_autofix_workflow({id: "wf_abc123", fixTypes: ["webhook-missing-path"]}) - Only fix webhook path issues',
      'n8n_autofix_workflow({id: "wf_abc123", applyFixes: true, maxFixes: 10}) - Apply up to 10 fixes'
    ],
    useCases: [
      'Fixing workflows imported from older n8n versions',
      'Correcting expression syntax after manual edits',
      'Resolving typeVersion conflicts after n8n upgrades',
      'Cleaning up workflows before production deployment',
      'Batch fixing common issues across multiple workflows',
      'Migrating workflows between n8n instances with different versions',
      'Repairing webhook nodes that lost their path configuration'
    ],
    performance: 'Depends on workflow size and number of issues. Preview mode: 200-500ms. Apply mode: 500-1000ms for medium workflows. Node similarity matching is cached for 5 minutes for improved performance on repeated validations.',
    bestPractices: [
      'Always preview fixes first (applyFixes: false) before applying',
      'Start with high confidence threshold for production workflows',
      'Review the fix summary to understand what changed',
      'Test workflows after auto-fixing to ensure expected behavior',
      'Use fixTypes parameter to target specific issue categories',
      'Keep maxFixes reasonable to avoid too many changes at once'
    ],
    pitfalls: [
      'Some fixes may change workflow behavior - always test after fixing',
      'Low confidence fixes might not be the intended solution',
      'Expression format fixes assume standard n8n syntax requirements',
      'Node type corrections only work for known node types in the database',
      'Cannot fix structural issues like missing nodes or invalid connections',
      'TypeVersion downgrades might remove node features added in newer versions',
      'Generated webhook paths are new UUIDs - existing webhook URLs will change'
    ],
    relatedTools: [
      'n8n_validate_workflow',
      'validate_workflow',
      'n8n_update_partial_workflow',
      'validate_workflow_expressions',
      'validate_node_operation'
    ]
  }
};
```

--------------------------------------------------------------------------------
/tests/unit/MULTI_TENANT_TEST_COVERAGE.md:
--------------------------------------------------------------------------------

```markdown
# Multi-Tenant Support Test Coverage Summary

This document summarizes the comprehensive test suites created for the multi-tenant support implementation in n8n-mcp.

## Test Files Created

### 1. `tests/unit/mcp/multi-tenant-tool-listing.test.ts`
**Focus**: MCP Server ListToolsRequestSchema handler multi-tenant logic

**Coverage Areas**:
- Environment variable configuration (backward compatibility)
- Instance context configuration (multi-tenant support)
- ENABLE_MULTI_TENANT flag support
- shouldIncludeManagementTools logic truth table
- Tool availability logic with different configurations
- Combined configuration scenarios
- Edge cases and security validation
- Tool count validation and structure consistency

**Key Test Scenarios**:
- ✅ Environment variables only (N8N_API_URL, N8N_API_KEY)
- ✅ Instance context only (runtime configuration)
- ✅ Multi-tenant flag only (ENABLE_MULTI_TENANT=true)
- ✅ No configuration (documentation tools only)
- ✅ All combinations of the above
- ✅ Malformed instance context handling
- ✅ Security logging verification

### 2. `tests/unit/types/instance-context-multi-tenant.test.ts`
**Focus**: Enhanced URL validation in instance-context.ts

**Coverage Areas**:
- IPv4 address validation (valid and invalid ranges)
- IPv6 address validation (various formats)
- Localhost and development URLs
- Port validation (1-65535 range)
- Domain name validation (subdomains, TLDs)
- Protocol validation (http/https only)
- Edge cases and malformed URLs
- Real-world n8n deployment patterns
- Security and XSS prevention
- URL encoding handling

**Key Test Scenarios**:
- ✅ Valid IPv4: private networks, public IPs, localhost
- ✅ Invalid IPv4: out-of-range octets, malformed addresses
- ✅ Valid IPv6: loopback, documentation prefix, full addresses
- ✅ Valid ports: 1-65535 range, common development ports
- ✅ Invalid ports: negative, above 65535, non-numeric
- ✅ Domain patterns: subdomains, enterprise domains, development URLs
- ✅ Security validation: XSS attempts, file protocols, injection attempts
- ✅ Real n8n URLs: cloud, tenant, self-hosted patterns

### 3. `tests/unit/http-server/multi-tenant-support.test.ts`
**Focus**: HTTP server multi-tenant functions and session management

**Coverage Areas**:
- Header extraction and type safety
- Instance context creation from headers
- Session ID generation with configuration hashing
- Context switching between tenants
- Security logging with sanitization
- Session management and cleanup
- Race condition prevention
- Memory management

**Key Test Scenarios**:
- ✅ Multi-tenant header extraction (x-n8n-url, x-n8n-key, etc.)
- ✅ Instance context validation from headers
- ✅ Session isolation between tenants
- ✅ Configuration-based session ID generation
- ✅ Header type safety (arrays, non-strings)
- ✅ Missing/corrupt session data handling
- ✅ Memory pressure and cleanup strategies

### 4. `tests/unit/multi-tenant-integration.test.ts`
**Focus**: End-to-end integration testing of multi-tenant features

**Coverage Areas**:
- Real-world URL patterns and validation
- Environment variable handling
- Header processing simulation
- Configuration priority logic
- Session management concepts
- Error scenarios and recovery
- Security validation across components

**Key Test Scenarios**:
- ✅ Complete n8n deployment URL patterns
- ✅ API key validation (valid/invalid patterns)
- ✅ Environment flag handling (ENABLE_MULTI_TENANT)
- ✅ Header processing edge cases
- ✅ Configuration priority matrix
- ✅ Session isolation concepts
- ✅ Comprehensive error handling
- ✅ Specific validation error messages

## Test Coverage Metrics

### Instance Context Validation
- **Statements**: 83.78% (93/111)
- **Branches**: 81.53% (53/65)
- **Functions**: 100% (4/4)
- **Lines**: 83.78% (93/111)

### Test Quality Metrics
- **Total Test Cases**: 200+ individual test scenarios
- **Error Scenarios Covered**: 50+ edge cases and error conditions
- **Security Tests**: 15+ XSS, injection, and protocol abuse tests
- **Integration Scenarios**: 40+ end-to-end validation tests

## Key Features Tested

### Backward Compatibility
- ✅ Environment variable configuration (N8N_API_URL, N8N_API_KEY)
- ✅ Existing tool listing behavior preserved
- ✅ Graceful degradation when multi-tenant features are disabled

### Multi-Tenant Support
- ✅ Runtime instance context configuration
- ✅ HTTP header-based tenant identification
- ✅ Session isolation between tenants
- ✅ Dynamic tool registration based on context

### Security
- ✅ URL validation against XSS and injection attempts
- ✅ API key validation with placeholder detection
- ✅ Sensitive data sanitization in logs
- ✅ Protocol restriction (http/https only)

### Error Handling
- ✅ Graceful handling of malformed configurations
- ✅ Specific error messages for debugging
- ✅ Non-throwing validation functions
- ✅ Recovery from invalid session data

## Test Patterns Used

### Arrange-Act-Assert
All tests follow the clear AAA pattern for maintainability and readability.

### Comprehensive Mocking
- Logger mocking for isolation
- Environment variable mocking for clean state
- Dependency injection for testability

### Data-Driven Testing
- Parameterized tests for URL patterns
- Truth table testing for configuration logic
- Matrix testing for scenario combinations

### Edge Case Coverage
- Boundary value testing (ports, IP ranges)
- Invalid input testing (malformed URLs, empty strings)
- Security testing (XSS, injection attempts)

## Running the Tests

```bash
# Run all multi-tenant tests
npm test tests/unit/mcp/multi-tenant-tool-listing.test.ts
npm test tests/unit/types/instance-context-multi-tenant.test.ts
npm test tests/unit/http-server/multi-tenant-support.test.ts
npm test tests/unit/multi-tenant-integration.test.ts

# Run with coverage
npm run test:coverage

# Run specific test patterns
npm test -- --grep "multi-tenant"
```

## Test Maintenance Notes

### Mock Updates
When updating the logger or other core utilities, ensure mocks are updated accordingly.

### Environment Variables
Tests properly isolate environment variables to prevent cross-test pollution.

### Real-World Patterns
URL validation tests are based on actual n8n deployment patterns and should be updated as new deployment methods are supported.

### Security Tests
Security-focused tests should be regularly reviewed and updated as new attack vectors are discovered.

## Future Test Enhancements

### Performance Testing
- Session management under load
- Memory usage during high tenant count
- Configuration validation performance

### End-to-End Testing
- Full HTTP request/response cycles
- Multi-tenant workflow execution
- Session persistence across requests

### Integration Testing
- Database adapter integration with multi-tenant contexts
- MCP protocol compliance with dynamic tool sets
- Error propagation across component boundaries
```

--------------------------------------------------------------------------------
/src/services/confidence-scorer.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Confidence Scorer for node-specific validations
 *
 * Provides confidence scores for node-specific recommendations,
 * allowing users to understand the reliability of suggestions.
 */

export interface ConfidenceScore {
  value: number; // 0.0 to 1.0
  reason: string;
  factors: ConfidenceFactor[];
}

export interface ConfidenceFactor {
  name: string;
  weight: number;
  matched: boolean;
  description: string;
}

export class ConfidenceScorer {
  /**
   * Calculate confidence score for resource locator recommendation
   */
  static scoreResourceLocatorRecommendation(
    fieldName: string,
    nodeType: string,
    value: string
  ): ConfidenceScore {
    const factors: ConfidenceFactor[] = [];
    let totalWeight = 0;
    let matchedWeight = 0;

    // Factor 1: Exact field name match (highest confidence)
    const exactFieldMatch = this.checkExactFieldMatch(fieldName, nodeType);
    factors.push({
      name: 'exact-field-match',
      weight: 0.5,
      matched: exactFieldMatch,
      description: `Field name '${fieldName}' is known to use resource locator in ${nodeType}`
    });

    // Factor 2: Field name pattern (medium confidence)
    const patternMatch = this.checkFieldPattern(fieldName);
    factors.push({
      name: 'field-pattern',
      weight: 0.3,
      matched: patternMatch,
      description: `Field name '${fieldName}' matches common resource locator patterns`
    });

    // Factor 3: Value pattern (low confidence)
    const valuePattern = this.checkValuePattern(value);
    factors.push({
      name: 'value-pattern',
      weight: 0.1,
      matched: valuePattern,
      description: 'Value contains patterns typical of resource identifiers'
    });

    // Factor 4: Node type category (medium confidence)
    const nodeCategory = this.checkNodeCategory(nodeType);
    factors.push({
      name: 'node-category',
      weight: 0.1,
      matched: nodeCategory,
      description: `Node type '${nodeType}' typically uses resource locators`
    });

    // Calculate final score
    for (const factor of factors) {
      totalWeight += factor.weight;
      if (factor.matched) {
        matchedWeight += factor.weight;
      }
    }

    const score = totalWeight > 0 ? matchedWeight / totalWeight : 0;

    // Determine reason based on score
    let reason: string;
    if (score >= 0.8) {
      reason = 'High confidence: Multiple strong indicators suggest resource locator format';
    } else if (score >= 0.5) {
      reason = 'Medium confidence: Some indicators suggest resource locator format';
    } else if (score >= 0.3) {
      reason = 'Low confidence: Weak indicators for resource locator format';
    } else {
      reason = 'Very low confidence: Minimal evidence for resource locator format';
    }

    return {
      value: score,
      reason,
      factors
    };
  }

  /**
   * Known field mappings with exact matches
   */
  private static readonly EXACT_FIELD_MAPPINGS: Record<string, string[]> = {
    'github': ['owner', 'repository', 'user', 'organization'],
    'googlesheets': ['sheetId', 'documentId', 'spreadsheetId'],
    'googledrive': ['fileId', 'folderId', 'driveId'],
    'slack': ['channel', 'user', 'channelId', 'userId'],
    'notion': ['databaseId', 'pageId', 'blockId'],
    'airtable': ['baseId', 'tableId', 'viewId']
  };

  private static checkExactFieldMatch(fieldName: string, nodeType: string): boolean {
    const nodeBase = nodeType.split('.').pop()?.toLowerCase() || '';

    for (const [pattern, fields] of Object.entries(this.EXACT_FIELD_MAPPINGS)) {
      if (nodeBase === pattern || nodeBase.startsWith(`${pattern}-`)) {
        return fields.includes(fieldName);
      }
    }

    return false;
  }

  /**
   * Common patterns in field names that suggest resource locators
   */
  private static readonly FIELD_PATTERNS = [
    /^.*Id$/i,           // ends with Id
    /^.*Ids$/i,          // ends with Ids
    /^.*Key$/i,          // ends with Key
    /^.*Name$/i,         // ends with Name
    /^.*Path$/i,         // ends with Path
    /^.*Url$/i,          // ends with Url
    /^.*Uri$/i,          // ends with Uri
    /^(table|database|collection|bucket|folder|file|document|sheet|board|project|issue|user|channel|team|organization|repository|owner)$/i
  ];

  private static checkFieldPattern(fieldName: string): boolean {
    return this.FIELD_PATTERNS.some(pattern => pattern.test(fieldName));
  }

  /**
   * Check if the value looks like it contains identifiers
   */
  private static checkValuePattern(value: string): boolean {
    // Remove = prefix if present for analysis
    const content = value.startsWith('=') ? value.substring(1) : value;

    // Skip if not an expression
    if (!content.includes('{{') || !content.includes('}}')) {
      return false;
    }

    // Check for patterns that suggest IDs or resource references
    const patterns = [
      /\{\{.*\.(id|Id|ID|key|Key|name|Name|path|Path|url|Url|uri|Uri).*\}\}/i,
      /\{\{.*_(id|Id|ID|key|Key|name|Name|path|Path|url|Url|uri|Uri).*\}\}/i,
      /\{\{.*(id|Id|ID|key|Key|name|Name|path|Path|url|Url|uri|Uri).*\}\}/i
    ];

    return patterns.some(pattern => pattern.test(content));
  }

  /**
   * Node categories that commonly use resource locators
   */
  private static readonly RESOURCE_HEAVY_NODES = [
    'github', 'gitlab', 'bitbucket',           // Version control
    'googlesheets', 'googledrive', 'dropbox',  // Cloud storage
    'slack', 'discord', 'telegram',            // Communication
    'notion', 'airtable', 'baserow',          // Databases
    'jira', 'asana', 'trello', 'monday',      // Project management
    'salesforce', 'hubspot', 'pipedrive',     // CRM
    'stripe', 'paypal', 'square',             // Payment
    'aws', 'gcp', 'azure',                    // Cloud providers
    'mysql', 'postgres', 'mongodb', 'redis'   // Databases
  ];

  private static checkNodeCategory(nodeType: string): boolean {
    const nodeBase = nodeType.split('.').pop()?.toLowerCase() || '';

    return this.RESOURCE_HEAVY_NODES.some(category =>
      nodeBase.includes(category)
    );
  }

  /**
   * Get confidence level as a string
   */
  static getConfidenceLevel(score: number): 'high' | 'medium' | 'low' | 'very-low' {
    if (score >= 0.8) return 'high';
    if (score >= 0.5) return 'medium';
    if (score >= 0.3) return 'low';
    return 'very-low';
  }

  /**
   * Should apply recommendation based on confidence and threshold
   */
  static shouldApplyRecommendation(
    score: number,
    threshold: 'strict' | 'normal' | 'relaxed' = 'normal'
  ): boolean {
    const thresholds = {
      strict: 0.8,   // Only apply high confidence recommendations
      normal: 0.5,   // Apply medium and high confidence
      relaxed: 0.3   // Apply low, medium, and high confidence
    };

    return score >= thresholds[threshold];
  }
}
```

--------------------------------------------------------------------------------
/tests/test-mcp-tools-integration.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * End-to-end test for MCP server tools integration
 * Tests both get_node_source_code and list_available_nodes tools
 */

const { Server } = require('@modelcontextprotocol/sdk/server/index.js');
const { StdioServerTransport } = require('@modelcontextprotocol/sdk/server/stdio.js');
const { N8NMCPServer } = require('../dist/mcp/server');

// Test configuration
const TEST_CONFIG = {
  mcp: {
    port: 3000,
    host: '0.0.0.0',
    authToken: 'test-token'
  },
  n8n: {
    apiUrl: 'http://localhost:5678',
    apiKey: 'test-key'
  }
};

// Mock tool calls
const TEST_REQUESTS = [
  {
    name: 'list_available_nodes',
    description: 'List all available n8n nodes',
    request: {
      name: 'list_available_nodes',
      arguments: {}
    }
  },
  {
    name: 'list_ai_nodes',
    description: 'List AI/LangChain nodes',
    request: {
      name: 'list_available_nodes',
      arguments: {
        category: 'ai'
      }
    }
  },
  {
    name: 'get_function_node',
    description: 'Extract Function node source',
    request: {
      name: 'get_node_source_code',
      arguments: {
        nodeType: 'n8n-nodes-base.Function',
        includeCredentials: true
      }
    }
  },
  {
    name: 'get_ai_agent_node',
    description: 'Extract AI Agent node source',
    request: {
      name: 'get_node_source_code',
      arguments: {
        nodeType: '@n8n/n8n-nodes-langchain.Agent',
        includeCredentials: true
      }
    }
  },
  {
    name: 'get_webhook_node',
    description: 'Extract Webhook node source',
    request: {
      name: 'get_node_source_code',
      arguments: {
        nodeType: 'n8n-nodes-base.Webhook',
        includeCredentials: false
      }
    }
  }
];

async function simulateToolCall(server, toolRequest) {
  console.log(`\n📋 Testing: ${toolRequest.description}`);
  console.log(`   Tool: ${toolRequest.request.name}`);
  console.log(`   Args:`, JSON.stringify(toolRequest.request.arguments, null, 2));
  
  try {
    const startTime = Date.now();
    
    // Directly call the tool handler
    const handler = server.toolHandlers[toolRequest.request.name];
    if (!handler) {
      throw new Error(`Tool handler not found: ${toolRequest.request.name}`);
    }
    
    const result = await handler(toolRequest.request.arguments);
    const elapsed = Date.now() - startTime;
    
    console.log(`   ✅ Success (${elapsed}ms)`);
    
    // Analyze results based on tool type
    if (toolRequest.request.name === 'list_available_nodes') {
      console.log(`   📊 Found ${result.nodes.length} nodes`);
      if (result.nodes.length > 0) {
        console.log(`   Sample nodes:`);
        result.nodes.slice(0, 3).forEach(node => {
          console.log(`     - ${node.name} (${node.packageName || 'unknown'})`);
        });
      }
    } else if (toolRequest.request.name === 'get_node_source_code') {
      console.log(`   📦 Node: ${result.nodeType}`);
      console.log(`   📏 Code size: ${result.sourceCode.length} bytes`);
      console.log(`   📍 Location: ${result.location}`);
      console.log(`   🔐 Has credentials: ${!!result.credentialCode}`);
      console.log(`   📄 Has package info: ${!!result.packageInfo}`);
      
      if (result.packageInfo) {
        console.log(`   📦 Package: ${result.packageInfo.name} v${result.packageInfo.version}`);
      }
    }
    
    return { success: true, result, elapsed };
  } catch (error) {
    console.log(`   ❌ Failed: ${error.message}`);
    return { success: false, error: error.message };
  }
}

async function main() {
  console.log('=== MCP Server Tools Integration Test ===\n');
  
  // Create MCP server instance
  console.log('🚀 Initializing MCP server...');
  const server = new N8NMCPServer(TEST_CONFIG.mcp, TEST_CONFIG.n8n);
  
  // Store tool handlers for direct access
  server.toolHandlers = {};
  
  // Override handler setup to capture handlers
  const originalSetup = server.setupHandlers.bind(server);
  server.setupHandlers = function() {
    originalSetup();
    
    // Capture tool call handler
    const originalHandler = this.server.setRequestHandler;
    this.server.setRequestHandler = function(schema, handler) {
      if (schema.parse && schema.parse({method: 'tools/call'}).method === 'tools/call') {
        // This is the tool call handler
        const toolCallHandler = handler;
        server.handleToolCall = async (args) => {
          const response = await toolCallHandler({ method: 'tools/call', params: args });
          return response.content[0];
        };
      }
      return originalHandler.call(this, schema, handler);
    };
  };
  
  // Re-setup handlers
  server.setupHandlers();
  
  // Extract individual tool handlers
  server.toolHandlers = {
    list_available_nodes: async (args) => server.listAvailableNodes(args),
    get_node_source_code: async (args) => server.getNodeSourceCode(args)
  };
  
  console.log('✅ MCP server initialized\n');
  
  // Test statistics
  const stats = {
    total: 0,
    passed: 0,
    failed: 0,
    results: []
  };
  
  // Run all test requests
  for (const testRequest of TEST_REQUESTS) {
    stats.total++;
    const result = await simulateToolCall(server, testRequest);
    stats.results.push({
      name: testRequest.name,
      ...result
    });
    
    if (result.success) {
      stats.passed++;
    } else {
      stats.failed++;
    }
  }
  
  // Summary
  console.log('\n' + '='.repeat(60));
  console.log('TEST SUMMARY');
  console.log('='.repeat(60));
  console.log(`Total tests: ${stats.total}`);
  console.log(`Passed: ${stats.passed} ✅`);
  console.log(`Failed: ${stats.failed} ❌`);
  console.log(`Success rate: ${((stats.passed / stats.total) * 100).toFixed(1)}%`);
  
  // Detailed results
  console.log('\nDetailed Results:');
  stats.results.forEach(result => {
    const status = result.success ? '✅' : '❌';
    const time = result.elapsed ? ` (${result.elapsed}ms)` : '';
    console.log(`  ${status} ${result.name}${time}`);
    if (!result.success) {
      console.log(`     Error: ${result.error}`);
    }
  });
  
  console.log('\n✨ MCP tools integration test completed!');
  
  // Test database storage capability
  console.log('\n📊 Database Storage Capability:');
  const sampleExtraction = stats.results.find(r => r.success && r.result && r.result.sourceCode);
  if (sampleExtraction) {
    console.log('✅ Node extraction produces database-ready structure');
    console.log('✅ Includes source code, hash, location, and metadata');
    console.log('✅ Ready for bulk extraction and storage');
  } else {
    console.log('⚠️  No successful extraction to verify database structure');
  }
  
  process.exit(stats.failed > 0 ? 1 : 0);
}

// Handle errors
process.on('unhandledRejection', (error) => {
  console.error('\n💥 Unhandled error:', error);
  process.exit(1);
});

// Run the test
main();
```

--------------------------------------------------------------------------------
/docker/docker-entrypoint.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/sh
set -e

# Load configuration from JSON file if it exists
if [ -f "/app/config.json" ] && [ -f "/app/docker/parse-config.js" ]; then
    # Use Node.js to generate shell-safe export commands
    eval $(node /app/docker/parse-config.js /app/config.json)
fi

# Helper function for safe logging (prevents stdio mode corruption)
log_message() {
    [ "$MCP_MODE" != "stdio" ] && echo "$@"
}

# Environment variable validation
if [ "$MCP_MODE" = "http" ] && [ -z "$AUTH_TOKEN" ] && [ -z "$AUTH_TOKEN_FILE" ]; then
    log_message "ERROR: AUTH_TOKEN or AUTH_TOKEN_FILE is required for HTTP mode" >&2
    exit 1
fi

# Validate AUTH_TOKEN_FILE if provided
if [ -n "$AUTH_TOKEN_FILE" ] && [ ! -f "$AUTH_TOKEN_FILE" ]; then
    log_message "ERROR: AUTH_TOKEN_FILE specified but file not found: $AUTH_TOKEN_FILE" >&2
    exit 1
fi

# Database path configuration - respect NODE_DB_PATH if set
if [ -n "$NODE_DB_PATH" ]; then
    # Basic validation - must end with .db
    case "$NODE_DB_PATH" in
        *.db) ;;
        *) log_message "ERROR: NODE_DB_PATH must end with .db" >&2; exit 1 ;;
    esac
    
    # Use the path as-is (Docker paths should be absolute anyway)
    DB_PATH="$NODE_DB_PATH"
else
    DB_PATH="/app/data/nodes.db"
fi

DB_DIR=$(dirname "$DB_PATH")

# Ensure database directory exists with correct ownership
if [ ! -d "$DB_DIR" ]; then
    log_message "Creating database directory: $DB_DIR"
    if [ "$(id -u)" = "0" ]; then
        # Create as root but immediately fix ownership
        mkdir -p "$DB_DIR" && chown nodejs:nodejs "$DB_DIR"
    else
        mkdir -p "$DB_DIR"
    fi
fi

# Database initialization with file locking to prevent race conditions
if [ ! -f "$DB_PATH" ]; then
    log_message "Database not found at $DB_PATH. Initializing..."
    
    # Ensure lock directory exists before attempting to create lock
    mkdir -p "$DB_DIR"
    
    # Check if flock is available
    if command -v flock >/dev/null 2>&1; then
        # Use a lock file to prevent multiple containers from initializing simultaneously
        # Try to create lock file, handle permission errors gracefully
        LOCK_FILE="$DB_DIR/.db.lock"
        
        # Ensure we can create the lock file - fix permissions if running as root
        if [ "$(id -u)" = "0" ] && [ ! -w "$DB_DIR" ]; then
            chown nodejs:nodejs "$DB_DIR" 2>/dev/null || true
            chmod 755 "$DB_DIR" 2>/dev/null || true
        fi
        
        # Try to create lock file with proper error handling
        if touch "$LOCK_FILE" 2>/dev/null; then
            (
                flock -x 200
                # Double-check inside the lock
                if [ ! -f "$DB_PATH" ]; then
                    log_message "Initializing database at $DB_PATH..."
                    cd /app && NODE_DB_PATH="$DB_PATH" node dist/scripts/rebuild.js || {
                        log_message "ERROR: Database initialization failed" >&2
                        exit 1
                    }
                fi
            ) 200>"$LOCK_FILE"
        else
            log_message "WARNING: Cannot create lock file at $LOCK_FILE, proceeding without file locking"
            # Fallback without locking if we can't create the lock file
            if [ ! -f "$DB_PATH" ]; then
                log_message "Initializing database at $DB_PATH..."
                cd /app && NODE_DB_PATH="$DB_PATH" node dist/scripts/rebuild.js || {
                    log_message "ERROR: Database initialization failed" >&2
                    exit 1
                }
            fi
        fi
    else
        # Fallback without locking (log warning)
        log_message "WARNING: flock not available, database initialization may have race conditions"
        if [ ! -f "$DB_PATH" ]; then
            log_message "Initializing database at $DB_PATH..."
            cd /app && NODE_DB_PATH="$DB_PATH" node dist/scripts/rebuild.js || {
                log_message "ERROR: Database initialization failed" >&2
                exit 1
            }
        fi
    fi
fi

# Fix permissions if running as root (for development)
if [ "$(id -u)" = "0" ]; then
    log_message "Running as root, fixing permissions..."
    chown -R nodejs:nodejs "$DB_DIR"
    # Also ensure /app/data exists for backward compatibility
    if [ -d "/app/data" ]; then
        chown -R nodejs:nodejs /app/data
    fi
    # Switch to nodejs user with proper exec chain for signal propagation
    # Build the command to execute
    if [ $# -eq 0 ]; then
        # No arguments provided, use default CMD from Dockerfile
        set -- node /app/dist/mcp/index.js
    fi
    # Export all needed environment variables
    export MCP_MODE="$MCP_MODE"
    export NODE_DB_PATH="$NODE_DB_PATH"
    export AUTH_TOKEN="$AUTH_TOKEN"
    export AUTH_TOKEN_FILE="$AUTH_TOKEN_FILE"
    
    # Ensure AUTH_TOKEN_FILE has restricted permissions for security
    if [ -n "$AUTH_TOKEN_FILE" ] && [ -f "$AUTH_TOKEN_FILE" ]; then
        chmod 600 "$AUTH_TOKEN_FILE" 2>/dev/null || true
        chown nodejs:nodejs "$AUTH_TOKEN_FILE" 2>/dev/null || true
    fi
    # Use exec with su-exec for proper signal handling (Alpine Linux)
    # su-exec advantages:
    # - Proper signal forwarding (critical for container shutdown)
    # - No intermediate shell process
    # - Designed for privilege dropping in containers
    if command -v su-exec >/dev/null 2>&1; then
        exec su-exec nodejs "$@"
    else
        # Fallback to su with preserved environment
        # Use safer approach to prevent command injection
        exec su -p nodejs -s /bin/sh -c 'exec "$0" "$@"' -- sh -c 'exec "$@"' -- "$@"
    fi
fi

# Handle special commands
if [ "$1" = "n8n-mcp" ] && [ "$2" = "serve" ]; then
    # Set HTTP mode for "n8n-mcp serve" command
    export MCP_MODE="http"
    shift 2  # Remove "n8n-mcp serve" from arguments
    set -- node /app/dist/mcp/index.js "$@"
fi

# Export NODE_DB_PATH so it's visible to child processes
if [ -n "$DB_PATH" ]; then
    export NODE_DB_PATH="$DB_PATH"
fi

# Execute the main command directly with exec
# This ensures our Node.js process becomes PID 1 and receives signals directly
if [ "$MCP_MODE" = "stdio" ]; then
    # Debug: Log to stderr to check if wrapper exists
    if [ "$DEBUG_DOCKER" = "true" ]; then
        echo "MCP_MODE is stdio, checking for wrapper..." >&2
        ls -la /app/dist/mcp/stdio-wrapper.js >&2 || echo "Wrapper not found!" >&2
    fi
    
    if [ -f "/app/dist/mcp/stdio-wrapper.js" ]; then
        # Use the stdio wrapper for clean JSON-RPC output
        # exec replaces the shell with node process as PID 1
        exec node /app/dist/mcp/stdio-wrapper.js
    else
        # Fallback: run with explicit environment
        exec env MCP_MODE=stdio DISABLE_CONSOLE_OUTPUT=true LOG_LEVEL=error node /app/dist/mcp/index.js
    fi
else
    # HTTP mode or other
    if [ $# -eq 0 ]; then
        # No arguments provided, use default
        exec node /app/dist/mcp/index.js
    else
        exec "$@"
    fi
fi
```

--------------------------------------------------------------------------------
/docs/README_CLAUDE_SETUP.md:
--------------------------------------------------------------------------------

```markdown
# Claude Desktop Configuration for n8n-MCP

This guide helps you connect n8n-MCP to Claude Desktop, giving Claude comprehensive knowledge about n8n's 525 workflow automation nodes, including 263 AI-capable tools.

## 🎯 Prerequisites

- Claude Desktop installed
- For local installation: Node.js (any version)
- For Docker: Docker installed (see installation instructions in main README)

## 🛠️ Configuration Methods

### Method 1: Local Installation (Recommended) 💻

1. **Install and build:**
   ```bash
   git clone https://github.com/czlonkowski/n8n-mcp.git
   cd n8n-mcp
   npm install
   npm run build
   npm run rebuild
   ```

2. **Configure Claude Desktop:**
   ```json
   {
     "mcpServers": {
       "n8n-mcp": {
         "command": "node",
         "args": ["/absolute/path/to/n8n-mcp/dist/mcp/index.js"],
         "env": {
           "NODE_ENV": "production",
           "LOG_LEVEL": "error",
           "MCP_MODE": "stdio",
           "DISABLE_CONSOLE_OUTPUT": "true"
         }
       }
     }
   }
   ```

⚠️ **Important**: 
- Use absolute paths, not relative paths
- The environment variables shown above are critical for proper stdio communication

### Method 2: Docker 🐳

No installation needed - runs directly from Docker:

```json
{
  "mcpServers": {
    "n8n-mcp": {
      "command": "docker",
      "args": [
        "run", "-i", "--rm",
        "-e", "MCP_MODE=stdio",
        "-e", "LOG_LEVEL=error",
        "-e", "DISABLE_CONSOLE_OUTPUT=true",
        "ghcr.io/czlonkowski/n8n-mcp:latest"
      ]
    }
  }
}
```

✨ **Benefits**: No setup required, always up-to-date, isolated environment.

### Method 3: Remote Server Connection (Advanced)

⚠️ **Note**: Remote connections are complex and may have compatibility issues. Consider using local installation instead.

For production deployments with multiple users:

1. **Deploy server with HTTP mode** (see [HTTP Deployment Guide](./HTTP_DEPLOYMENT.md))

2. **Connect using custom HTTP client:**
   ```json
   {
     "mcpServers": {
       "n8n-remote": {
         "command": "node",
         "args": [
           "/path/to/n8n-mcp/scripts/mcp-http-client.js",
           "http://your-server.com:3000/mcp"
         ],
         "env": {
           "MCP_AUTH_TOKEN": "your-auth-token"
         }
       }
     }
   }
   ```

📝 **Note**: Native remote MCP support is available in Claude Pro/Team/Enterprise via Settings > Integrations.

## 📁 Configuration File Locations

Find your `claude_desktop_config.json` file:

- **macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
- **Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
- **Linux**: `~/.config/Claude/claude_desktop_config.json`

🔄 **Important**: After editing, restart Claude Desktop (Cmd/Ctrl+R or quit and reopen).

## ✅ Verify Installation

After restarting Claude Desktop:

1. Look for "n8n-docker" or "n8n-documentation" in the MCP servers list
2. Try asking Claude: "What n8n nodes are available for working with Slack?"
3. Or use a tool directly: "Use the list_nodes tool to show me trigger nodes"

## 🔧 Available Tools (v2.5.1)

### Essential Tool - Start Here!
- **`tools_documentation`** - Get documentation for any MCP tool (ALWAYS use this first!)

### Core Tools
- **`list_nodes`** - List all n8n nodes with filtering options
- **`get_node_info`** - Get comprehensive information (now includes aiToolCapabilities)
- **`get_node_essentials`** - Get only 10-20 essential properties (95% smaller!)
- **`search_nodes`** - Full-text search across all node documentation
- **`search_node_properties`** - Find specific properties within nodes
- **`get_node_documentation`** - Get parsed documentation from n8n-docs
- **`get_database_statistics`** - View database metrics and coverage

### AI Tools (Enhanced in v2.5.1)
- **`list_ai_tools`** - List AI-capable nodes (ANY node can be used as AI tool!)
- **`get_node_as_tool_info`** - Get guidance on using any node as an AI tool

### Task & Template Tools
- **`get_node_for_task`** - Pre-configured node settings for common tasks
- **`list_tasks`** - Discover available task templates
- **`list_node_templates`** - Find workflow templates using specific nodes
- **`get_template`** - Get complete workflow JSON for import
- **`search_templates`** - Search templates by keywords
- **`get_templates_for_task`** - Get curated templates for common tasks

### Validation Tools (Professional Grade)
- **`validate_node_operation`** - Smart validation with operation awareness
- **`validate_node_minimal`** - Quick validation for just required fields
- **`validate_workflow`** - Complete workflow validation (validates AI tool connections)
- **`validate_workflow_connections`** - Check workflow structure
- **`validate_workflow_expressions`** - Validate n8n expressions including $fromAI()
- **`get_property_dependencies`** - Analyze property visibility conditions

### Example Questions to Ask Claude:
- "Show me all n8n nodes for working with databases"
- "How do I use the HTTP Request node?"
- "Get the essentials for Slack node" (uses get_node_essentials)
- "How can I use Google Sheets as an AI tool?"
- "Validate my workflow before deployment"
- "Find templates for webhook automation"

## 🔍 Troubleshooting

### Server Not Appearing in Claude

1. **Check JSON syntax**: 
   ```bash
   # Validate your config file
   cat ~/Library/Application\ Support/Claude/claude_desktop_config.json | jq .
   ```

2. **Verify paths are absolute** (not relative)

3. **Restart Claude Desktop completely** (quit and reopen)

### Remote Connection Issues

**"TransformStream is not defined" error:**
- Cause: Node.js version < 18
- Fix: Update Node.js to v18 or newer
  ```bash
  node --version  # Should be v18.0.0 or higher
  ```

**"Server disconnected" error:**
- Check AUTH_TOKEN matches between server and client
- Verify server is running: `curl https://your-server.com/health`
- Check for VPN interference

### Docker Issues

**"Cannot find image" error:**
```bash
# Pull the latest image
docker pull ghcr.io/czlonkowski/n8n-mcp:latest
```

**Permission denied:**
```bash
# Ensure Docker is running
docker ps
```

### Common Issues

**"Expected ',' or ']' after array element" errors in logs:**
- Cause: Console output interfering with stdio communication
- Fix: Ensure all required environment variables are set:
  - `MCP_MODE=stdio`
  - `LOG_LEVEL=error`
  - `DISABLE_CONSOLE_OUTPUT=true`

**"NODE_MODULE_VERSION mismatch" warnings:**
- Not a problem! The server automatically falls back to a pure JavaScript implementation
- The warnings are suppressed with proper environment variables

**Server appears but tools don't work:**
- Check that you've built the project: `npm run build`
- Verify the database exists: `npm run rebuild`
- Restart Claude Desktop completely (quit and reopen)

### Quick Fixes

- 🔄 **Always restart Claude** after config changes
- 📋 **Copy example configs exactly** (watch for typos)
- 📂 **Use absolute paths** (/Users/... not ~/...)
- 🔍 **Check logs**: View > Developer > Logs in Claude Desktop
- 🛑 **Set all environment variables** shown in the examples

For more help, see [Troubleshooting Guide](./TROUBLESHOOTING.md)
```

--------------------------------------------------------------------------------
/.github/workflows/update-n8n-deps.yml:
--------------------------------------------------------------------------------

```yaml
name: Update n8n Dependencies

on:
  # Run every Monday at 9 AM UTC
  schedule:
    - cron: '0 9 * * 1'
  
  # Allow manual trigger
  workflow_dispatch:
    inputs:
      create_pr:
        description: 'Create a PR for updates'
        required: true
        type: boolean
        default: true
      auto_merge:
        description: 'Auto-merge PR if tests pass'
        required: true
        type: boolean
        default: false

jobs:
  check-and-update:
    runs-on: ubuntu-latest
    
    permissions:
      contents: write
      pull-requests: write
    
    steps:
      - name: Checkout repository
        uses: actions/checkout@v4
        with:
          token: ${{ secrets.GITHUB_TOKEN }}
          lfs: true
      
      - name: Setup Node.js
        uses: actions/setup-node@v4
        with:
          node-version: '20'
          cache: 'npm'
      
      - name: Check for updates (dry run)
        id: check
        run: |
          # Ensure we're in the right directory
          cd ${{ github.workspace }}
          
          # First do a dry run to check if updates are needed
          node scripts/update-n8n-deps.js --dry-run > update-check.log 2>&1 || {
            echo "❌ Error running update check:"
            cat update-check.log
            exit 1
          }
          
          # Check if updates are available
          if grep -q "update available" update-check.log; then
            echo "updates_available=true" >> $GITHUB_OUTPUT
            echo "📦 Updates available!"
          else
            echo "updates_available=false" >> $GITHUB_OUTPUT
            echo "✅ All dependencies are up to date"
          fi
          
          # Show the check results
          cat update-check.log
      
      - name: Apply updates
        if: steps.check.outputs.updates_available == 'true'
        id: update
        run: |
          # Ensure we're in the right directory
          cd ${{ github.workspace }}
          
          # Run the actual update
          node scripts/update-n8n-deps.js || {
            echo "❌ Error running update:"
            exit 1
          }
          
          # Check if files changed
          if git diff --quiet; then
            echo "files_changed=false" >> $GITHUB_OUTPUT
          else
            echo "files_changed=true" >> $GITHUB_OUTPUT
          fi
      
      - name: Create update branch
        if: steps.update.outputs.files_changed == 'true' && (github.event_name == 'schedule' || inputs.create_pr)
        id: branch
        run: |
          BRANCH_NAME="update-n8n-deps-$(date +%Y%m%d)"
          echo "branch_name=$BRANCH_NAME" >> $GITHUB_OUTPUT
          
          git config user.name "github-actions[bot]"
          git config user.email "github-actions[bot]@users.noreply.github.com"
          
          git checkout -b $BRANCH_NAME
          git add package.json package-lock.json
          
          # Get update summary (file is written by the update script)
          UPDATE_SUMMARY=$(cat update-summary.txt 2>/dev/null || echo "Updated n8n dependencies")
          
          # Create commit message using heredoc
          COMMIT_MSG=$(cat <<'COMMIT_EOF'
          chore: update n8n dependencies
          
          ${UPDATE_SUMMARY}
          
          🤖 Automated dependency update
          COMMIT_EOF
          )
          # Replace placeholder with actual summary
          COMMIT_MSG="${COMMIT_MSG//\${UPDATE_SUMMARY}/$UPDATE_SUMMARY}"
          
          git commit -m "$COMMIT_MSG"
          git push origin $BRANCH_NAME
          
          # Save update summary as output for PR
          {
            echo 'UPDATE_SUMMARY<<EOF'
            if [ -f update-summary.txt ]; then
              cat update-summary.txt
            else
              echo "See commit for details"
            fi
            echo 'EOF'
          } >> $GITHUB_OUTPUT
      
      - name: Create Pull Request
        if: steps.branch.outputs.branch_name != ''
        uses: peter-evans/create-pull-request@v5
        with:
          token: ${{ secrets.GITHUB_TOKEN }}
          branch: ${{ steps.branch.outputs.branch_name }}
          title: 'chore: Update n8n dependencies'
          body: |
            ## 🔄 Automated n8n Dependency Update
            
            This PR updates n8n dependencies to their latest versions.
            
            ### 📦 Updates
            ```
            ${{ steps.update.outputs.UPDATE_SUMMARY }}
            ```
            
            ### ✅ Validation
            - [x] Dependencies updated
            - [x] Lock file updated
            - [x] Database rebuilt successfully
            - [x] All tests passed
            
            ### 🔍 Review Checklist
            - [ ] Review the [n8n release notes](https://docs.n8n.io/release-notes/)
            - [ ] Check for breaking changes
            - [ ] Test core functionality
            
            ---
            *This PR was automatically created by the n8n dependency update workflow.*
          labels: |
            dependencies
            automated
          assignees: ${{ github.repository_owner }}
          
      - name: Auto-merge PR (if enabled)
        if: steps.branch.outputs.branch_name != '' && inputs.auto_merge
        env:
          GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
        run: |
          # Wait for PR to be created
          sleep 10
          
          # Find the PR
          PR_NUMBER=$(gh pr list --head ${{ steps.branch.outputs.branch_name }} --json number -q '.[0].number')
          
          if [ -n "$PR_NUMBER" ]; then
            echo "Auto-merging PR #$PR_NUMBER..."
            gh pr merge $PR_NUMBER --merge --auto
          fi

  # Direct commit option (for manual trigger)
  direct-update:
    if: github.event_name == 'workflow_dispatch' && !inputs.create_pr
    runs-on: ubuntu-latest
    
    permissions:
      contents: write
    
    steps:
      - name: Checkout repository
        uses: actions/checkout@v4
        with:
          token: ${{ secrets.GITHUB_TOKEN }}
          lfs: true
      
      - name: Setup Node.js
        uses: actions/setup-node@v4
        with:
          node-version: '20'
          cache: 'npm'
      
      - name: Update dependencies
        run: |
          node scripts/update-n8n-deps.js
          
          # Check if files changed
          if ! git diff --quiet; then
            git config user.name "github-actions[bot]"
            git config user.email "github-actions[bot]@users.noreply.github.com"
            
            git add package.json package-lock.json
            
            # Get update summary
            UPDATE_SUMMARY=$(cat update-summary.txt || echo "Updated n8n dependencies")
            
            # Create commit message using heredoc
            COMMIT_MSG=$(cat <<'COMMIT_EOF'
            chore: update n8n dependencies
            
            ${UPDATE_SUMMARY}
            
            🤖 Automated dependency update
            COMMIT_EOF
            )
            # Replace placeholder with actual summary
            COMMIT_MSG="${COMMIT_MSG//\${UPDATE_SUMMARY}/$UPDATE_SUMMARY}"
            
            git commit -m "$COMMIT_MSG"
            
            git push
          else
            echo "No updates needed"
          fi
```

--------------------------------------------------------------------------------
/src/utils/node-type-normalizer.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Universal Node Type Normalizer - FOR DATABASE OPERATIONS ONLY
 *
 * ⚠️ WARNING: Do NOT use before n8n API calls!
 *
 * This class converts node types to SHORT form (database format).
 * The n8n API requires FULL form (n8n-nodes-base.*).
 *
 * **Use this ONLY when:**
 * - Querying the node database
 * - Searching for node information
 * - Looking up node metadata
 *
 * **Do NOT use before:**
 * - Creating workflows (n8n_create_workflow)
 * - Updating workflows (n8n_update_workflow)
 * - Any n8n API calls
 *
 * **IMPORTANT:** The n8n-mcp database stores nodes in SHORT form:
 * - n8n-nodes-base → nodes-base
 * - @n8n/n8n-nodes-langchain → nodes-langchain
 *
 * But the n8n API requires FULL form:
 * - nodes-base → n8n-nodes-base
 * - nodes-langchain → @n8n/n8n-nodes-langchain
 *
 * @example Database Lookup (CORRECT usage)
 * const dbType = NodeTypeNormalizer.normalizeToFullForm('n8n-nodes-base.webhook')
 * // → 'nodes-base.webhook'
 * const node = await repository.getNode(dbType)
 *
 * @example API Call (INCORRECT - Do NOT do this!)
 * const workflow = { nodes: [{ type: 'n8n-nodes-base.webhook' }] }
 * const normalized = NodeTypeNormalizer.normalizeWorkflowNodeTypes(workflow)
 * // ❌ WRONG! normalized has SHORT form, API needs FULL form
 * await client.createWorkflow(normalized) // FAILS!
 *
 * @example API Call (CORRECT)
 * const workflow = { nodes: [{ type: 'n8n-nodes-base.webhook' }] }
 * // ✅ Send as-is to API (FULL form required)
 * await client.createWorkflow(workflow) // WORKS!
 */

export interface NodeTypeNormalizationResult {
  original: string;
  normalized: string;
  wasNormalized: boolean;
  package: 'base' | 'langchain' | 'community' | 'unknown';
}

export class NodeTypeNormalizer {
  /**
   * Normalize node type to canonical SHORT form (database format)
   *
   * This is the PRIMARY method to use throughout the codebase.
   * It converts any node type variation to the SHORT form that the database uses.
   *
   * **NOTE:** Method name says "ToFullForm" for backward compatibility,
   * but actually normalizes TO SHORT form to match database storage.
   *
   * @param type - Node type in any format
   * @returns Normalized node type in short form (database format)
   *
   * @example
   * normalizeToFullForm('n8n-nodes-base.webhook')
   * // → 'nodes-base.webhook'
   *
   * @example
   * normalizeToFullForm('nodes-base.webhook')
   * // → 'nodes-base.webhook' (unchanged)
   *
   * @example
   * normalizeToFullForm('@n8n/n8n-nodes-langchain.agent')
   * // → 'nodes-langchain.agent'
   */
  static normalizeToFullForm(type: string): string {
    if (!type || typeof type !== 'string') {
      return type;
    }

    // Normalize full forms to short form (database format)
    if (type.startsWith('n8n-nodes-base.')) {
      return type.replace(/^n8n-nodes-base\./, 'nodes-base.');
    }
    if (type.startsWith('@n8n/n8n-nodes-langchain.')) {
      return type.replace(/^@n8n\/n8n-nodes-langchain\./, 'nodes-langchain.');
    }
    // Handle n8n-nodes-langchain without @n8n/ prefix
    if (type.startsWith('n8n-nodes-langchain.')) {
      return type.replace(/^n8n-nodes-langchain\./, 'nodes-langchain.');
    }

    // Already in short form or community node - return unchanged
    return type;
  }

  /**
   * Normalize with detailed result including metadata
   *
   * Use this when you need to know if normalization occurred
   * or what package the node belongs to.
   *
   * @param type - Node type in any format
   * @returns Detailed normalization result
   *
   * @example
   * normalizeWithDetails('nodes-base.webhook')
   * // → {
   * //   original: 'nodes-base.webhook',
   * //   normalized: 'n8n-nodes-base.webhook',
   * //   wasNormalized: true,
   * //   package: 'base'
   * // }
   */
  static normalizeWithDetails(type: string): NodeTypeNormalizationResult {
    const original = type;
    const normalized = this.normalizeToFullForm(type);

    return {
      original,
      normalized,
      wasNormalized: original !== normalized,
      package: this.detectPackage(normalized)
    };
  }

  /**
   * Detect package type from node type
   *
   * @param type - Node type (in any form)
   * @returns Package identifier
   */
  private static detectPackage(type: string): 'base' | 'langchain' | 'community' | 'unknown' {
    // Check both short and full forms
    if (type.startsWith('nodes-base.') || type.startsWith('n8n-nodes-base.')) return 'base';
    if (type.startsWith('nodes-langchain.') || type.startsWith('@n8n/n8n-nodes-langchain.') || type.startsWith('n8n-nodes-langchain.')) return 'langchain';
    if (type.includes('.')) return 'community';
    return 'unknown';
  }

  /**
   * Batch normalize multiple node types
   *
   * Use this when you need to normalize multiple types at once.
   *
   * @param types - Array of node types
   * @returns Map of original → normalized types
   *
   * @example
   * normalizeBatch(['nodes-base.webhook', 'nodes-base.set'])
   * // → Map {
   * //   'nodes-base.webhook' => 'n8n-nodes-base.webhook',
   * //   'nodes-base.set' => 'n8n-nodes-base.set'
   * // }
   */
  static normalizeBatch(types: string[]): Map<string, string> {
    const result = new Map<string, string>();
    for (const type of types) {
      result.set(type, this.normalizeToFullForm(type));
    }
    return result;
  }

  /**
   * Normalize all node types in a workflow
   *
   * This is the key method for fixing workflows before validation.
   * It normalizes all node types in place while preserving all other
   * workflow properties.
   *
   * @param workflow - Workflow object with nodes array
   * @returns Workflow with normalized node types
   *
   * @example
   * const workflow = {
   *   nodes: [
   *     { type: 'nodes-base.webhook', id: '1', name: 'Webhook' },
   *     { type: 'nodes-base.set', id: '2', name: 'Set' }
   *   ],
   *   connections: {}
   * };
   * const normalized = normalizeWorkflowNodeTypes(workflow);
   * // workflow.nodes[0].type → 'n8n-nodes-base.webhook'
   * // workflow.nodes[1].type → 'n8n-nodes-base.set'
   */
  static normalizeWorkflowNodeTypes(workflow: any): any {
    if (!workflow?.nodes || !Array.isArray(workflow.nodes)) {
      return workflow;
    }

    return {
      ...workflow,
      nodes: workflow.nodes.map((node: any) => ({
        ...node,
        type: this.normalizeToFullForm(node.type)
      }))
    };
  }

  /**
   * Check if a node type is in full form (needs normalization)
   *
   * @param type - Node type to check
   * @returns True if in full form (will be normalized to short)
   */
  static isFullForm(type: string): boolean {
    if (!type || typeof type !== 'string') {
      return false;
    }

    return (
      type.startsWith('n8n-nodes-base.') ||
      type.startsWith('@n8n/n8n-nodes-langchain.') ||
      type.startsWith('n8n-nodes-langchain.')
    );
  }

  /**
   * Check if a node type is in short form (database format)
   *
   * @param type - Node type to check
   * @returns True if in short form (already in database format)
   */
  static isShortForm(type: string): boolean {
    if (!type || typeof type !== 'string') {
      return false;
    }

    return (
      type.startsWith('nodes-base.') ||
      type.startsWith('nodes-langchain.')
    );
  }
}

```

--------------------------------------------------------------------------------
/src/types/instance-context.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Instance Context for flexible configuration support
 *
 * Allows the n8n-mcp engine to accept instance-specific configuration
 * at runtime, enabling flexible deployment scenarios while maintaining
 * backward compatibility with environment-based configuration.
 */

export interface InstanceContext {
  /**
   * Instance-specific n8n API configuration
   * When provided, these override environment variables
   */
  n8nApiUrl?: string;
  n8nApiKey?: string;
  n8nApiTimeout?: number;
  n8nApiMaxRetries?: number;

  /**
   * Instance identification
   * Used for session management and logging
   */
  instanceId?: string;
  sessionId?: string;

  /**
   * Extensible metadata for future use
   * Allows passing additional configuration without interface changes
   */
  metadata?: Record<string, any>;
}

/**
 * Validate URL format with enhanced checks
 */
function isValidUrl(url: string): boolean {
  try {
    const parsed = new URL(url);

    // Allow only http and https protocols
    if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') {
      return false;
    }

    // Check for reasonable hostname (not empty or invalid)
    if (!parsed.hostname || parsed.hostname.length === 0) {
      return false;
    }

    // Validate port if present
    if (parsed.port && (isNaN(Number(parsed.port)) || Number(parsed.port) < 1 || Number(parsed.port) > 65535)) {
      return false;
    }

    // Allow localhost, IP addresses, and domain names
    const hostname = parsed.hostname.toLowerCase();

    // Allow localhost for development
    if (hostname === 'localhost' || hostname === '127.0.0.1' || hostname === '::1') {
      return true;
    }

    // Basic IPv4 address validation
    const ipv4Pattern = /^(\d{1,3}\.){3}\d{1,3}$/;
    if (ipv4Pattern.test(hostname)) {
      const parts = hostname.split('.');
      return parts.every(part => {
        const num = parseInt(part, 10);
        return num >= 0 && num <= 255;
      });
    }

    // Basic IPv6 pattern check (simplified)
    if (hostname.includes(':') || hostname.startsWith('[') && hostname.endsWith(']')) {
      // Basic IPv6 validation - just checking it's not obviously wrong
      return true;
    }

    // Domain name validation - allow subdomains and TLDs
    const domainPattern = /^([a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?\.)*[a-zA-Z]([a-zA-Z0-9-]*[a-zA-Z0-9])?$/;
    return domainPattern.test(hostname);
  } catch {
    return false;
  }
}

/**
 * Validate API key format (basic check for non-empty string)
 */
function isValidApiKey(key: string): boolean {
  // API key should be non-empty and not contain obvious placeholder values
  return key.length > 0 &&
         !key.toLowerCase().includes('your_api_key') &&
         !key.toLowerCase().includes('placeholder') &&
         !key.toLowerCase().includes('example');
}

/**
 * Type guard to check if an object is an InstanceContext
 */
export function isInstanceContext(obj: any): obj is InstanceContext {
  if (!obj || typeof obj !== 'object') return false;

  // Check for known properties with validation
  const hasValidUrl = obj.n8nApiUrl === undefined ||
    (typeof obj.n8nApiUrl === 'string' && isValidUrl(obj.n8nApiUrl));

  const hasValidKey = obj.n8nApiKey === undefined ||
    (typeof obj.n8nApiKey === 'string' && isValidApiKey(obj.n8nApiKey));

  const hasValidTimeout = obj.n8nApiTimeout === undefined ||
    (typeof obj.n8nApiTimeout === 'number' && obj.n8nApiTimeout > 0);

  const hasValidRetries = obj.n8nApiMaxRetries === undefined ||
    (typeof obj.n8nApiMaxRetries === 'number' && obj.n8nApiMaxRetries >= 0);

  const hasValidInstanceId = obj.instanceId === undefined || typeof obj.instanceId === 'string';
  const hasValidSessionId = obj.sessionId === undefined || typeof obj.sessionId === 'string';
  const hasValidMetadata = obj.metadata === undefined ||
    (typeof obj.metadata === 'object' && obj.metadata !== null);

  return hasValidUrl && hasValidKey && hasValidTimeout && hasValidRetries &&
         hasValidInstanceId && hasValidSessionId && hasValidMetadata;
}

/**
 * Validate and sanitize InstanceContext
 * Provides field-specific error messages for better debugging
 */
export function validateInstanceContext(context: InstanceContext): {
  valid: boolean;
  errors?: string[]
} {
  const errors: string[] = [];

  // Validate URL if provided (even empty string should be validated)
  if (context.n8nApiUrl !== undefined) {
    if (context.n8nApiUrl === '') {
      errors.push(`Invalid n8nApiUrl: empty string - URL is required when field is provided`);
    } else if (!isValidUrl(context.n8nApiUrl)) {
      // Provide specific reason for URL invalidity
      try {
        const parsed = new URL(context.n8nApiUrl);
        if (parsed.protocol !== 'http:' && parsed.protocol !== 'https:') {
          errors.push(`Invalid n8nApiUrl: URL must use HTTP or HTTPS protocol, got ${parsed.protocol}`);
        }
      } catch {
        errors.push(`Invalid n8nApiUrl: URL format is malformed or incomplete`);
      }
    }
  }

  // Validate API key if provided
  if (context.n8nApiKey !== undefined) {
    if (context.n8nApiKey === '') {
      errors.push(`Invalid n8nApiKey: empty string - API key is required when field is provided`);
    } else if (!isValidApiKey(context.n8nApiKey)) {
      // Provide specific reason for API key invalidity
      if (context.n8nApiKey.toLowerCase().includes('your_api_key')) {
        errors.push(`Invalid n8nApiKey: contains placeholder 'your_api_key' - Please provide actual API key`);
      } else if (context.n8nApiKey.toLowerCase().includes('placeholder')) {
        errors.push(`Invalid n8nApiKey: contains placeholder text - Please provide actual API key`);
      } else if (context.n8nApiKey.toLowerCase().includes('example')) {
        errors.push(`Invalid n8nApiKey: contains example text - Please provide actual API key`);
      } else {
        errors.push(`Invalid n8nApiKey: format validation failed - Ensure key is valid`);
      }
    }
  }

  // Validate timeout
  if (context.n8nApiTimeout !== undefined) {
    if (typeof context.n8nApiTimeout !== 'number') {
      errors.push(`Invalid n8nApiTimeout: ${context.n8nApiTimeout} - Must be a number, got ${typeof context.n8nApiTimeout}`);
    } else if (context.n8nApiTimeout <= 0) {
      errors.push(`Invalid n8nApiTimeout: ${context.n8nApiTimeout} - Must be positive (greater than 0)`);
    } else if (!isFinite(context.n8nApiTimeout)) {
      errors.push(`Invalid n8nApiTimeout: ${context.n8nApiTimeout} - Must be a finite number (not Infinity or NaN)`);
    }
  }

  // Validate retries
  if (context.n8nApiMaxRetries !== undefined) {
    if (typeof context.n8nApiMaxRetries !== 'number') {
      errors.push(`Invalid n8nApiMaxRetries: ${context.n8nApiMaxRetries} - Must be a number, got ${typeof context.n8nApiMaxRetries}`);
    } else if (context.n8nApiMaxRetries < 0) {
      errors.push(`Invalid n8nApiMaxRetries: ${context.n8nApiMaxRetries} - Must be non-negative (0 or greater)`);
    } else if (!isFinite(context.n8nApiMaxRetries)) {
      errors.push(`Invalid n8nApiMaxRetries: ${context.n8nApiMaxRetries} - Must be a finite number (not Infinity or NaN)`);
    }
  }

  return {
    valid: errors.length === 0,
    errors: errors.length > 0 ? errors : undefined
  };
}
```

--------------------------------------------------------------------------------
/.github/workflows/benchmark.yml:
--------------------------------------------------------------------------------

```yaml
name: Performance Benchmarks

on:
  push:
    branches: [main, feat/comprehensive-testing-suite]
    paths-ignore:
      - '**.md'
      - '**.txt'
      - 'docs/**'
      - 'examples/**'
      - '.github/FUNDING.yml'
      - '.github/ISSUE_TEMPLATE/**'
      - '.github/pull_request_template.md'
      - '.gitignore'
      - 'LICENSE*'
      - 'ATTRIBUTION.md'
      - 'SECURITY.md'
      - 'CODE_OF_CONDUCT.md'
  pull_request:
    branches: [main]
    paths-ignore:
      - '**.md'
      - '**.txt'
      - 'docs/**'
      - 'examples/**'
      - '.github/FUNDING.yml'
      - '.github/ISSUE_TEMPLATE/**'
      - '.github/pull_request_template.md'
      - '.gitignore'
      - 'LICENSE*'
      - 'ATTRIBUTION.md'
      - 'SECURITY.md'
      - 'CODE_OF_CONDUCT.md'
  workflow_dispatch:

permissions:
  # For PR comments
  pull-requests: write
  # For pushing to gh-pages branch
  contents: write
  # For deployment to GitHub Pages
  pages: write
  id-token: write

jobs:
  benchmark:
    runs-on: ubuntu-latest
    steps:
      - uses: actions/checkout@v4
        with:
          # Fetch all history for proper benchmark comparison
          fetch-depth: 0

      - name: Setup Node.js
        uses: actions/setup-node@v4
        with:
          node-version: 20
          cache: 'npm'

      - name: Install dependencies
        run: npm ci

      - name: Build project
        run: npm run build

      - name: Run benchmarks
        run: npm run benchmark:ci
        
      - name: Format benchmark results
        run: node scripts/format-benchmark-results.js
        
      - name: Upload benchmark artifacts
        uses: actions/upload-artifact@v4
        with:
          name: benchmark-results
          path: |
            benchmark-results.json
            benchmark-results-formatted.json
            benchmark-summary.json

      # Ensure gh-pages branch exists
      - name: Check and create gh-pages branch
        run: |
          git fetch origin gh-pages:gh-pages 2>/dev/null || {
            echo "gh-pages branch doesn't exist. Creating it..."
            git checkout --orphan gh-pages
            git rm -rf .
            echo "# Benchmark Results" > README.md
            git add README.md
            git config user.name "github-actions[bot]"
            git config user.email "github-actions[bot]@users.noreply.github.com"
            git commit -m "Initial gh-pages commit"
            git push origin gh-pages
            git checkout ${{ github.ref_name }}
          }

      # Clean up workspace before benchmark action
      - name: Clean workspace
        run: |
          git add -A
          git stash || true
      
      # Store benchmark results and compare
      - name: Store benchmark result
        uses: benchmark-action/github-action-benchmark@v1
        continue-on-error: true
        id: benchmark
        with:
          name: n8n-mcp Benchmarks
          tool: 'customSmallerIsBetter'
          output-file-path: benchmark-results-formatted.json
          github-token: ${{ secrets.GITHUB_TOKEN }}
          auto-push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }}
          # Where to store benchmark data
          benchmark-data-dir-path: 'benchmarks'
          # Alert when performance regresses by 10%
          alert-threshold: '110%'
          # Comment on PR when regression is detected
          comment-on-alert: true
          alert-comment-cc-users: '@czlonkowski'
          # Summary always
          summary-always: true
          # Max number of data points to retain
          max-items-in-chart: 50
          fail-on-alert: false

      # Comment on PR with benchmark results
      - name: Comment PR with results
        uses: actions/github-script@v7
        if: github.event_name == 'pull_request'
        continue-on-error: true
        with:
          github-token: ${{ secrets.GITHUB_TOKEN }}
          script: |
            try {
              const fs = require('fs');
              const summary = JSON.parse(fs.readFileSync('benchmark-summary.json', 'utf8'));
              
              // Format results for PR comment
              let comment = '## 📊 Performance Benchmark Results\n\n';
              comment += `🕐 Run at: ${new Date(summary.timestamp).toLocaleString()}\n\n`;
              comment += '| Benchmark | Time | Ops/sec | Range |\n';
              comment += '|-----------|------|---------|-------|\n';
              
              // Group benchmarks by category
              const categories = {};
              for (const benchmark of summary.benchmarks) {
                const [category, ...nameParts] = benchmark.name.split(' - ');
                if (!categories[category]) categories[category] = [];
                categories[category].push({
                  ...benchmark,
                  shortName: nameParts.join(' - ')
                });
              }
              
              // Display by category
              for (const [category, benchmarks] of Object.entries(categories)) {
                comment += `\n### ${category}\n`;
                for (const benchmark of benchmarks) {
                  comment += `| ${benchmark.shortName} | ${benchmark.time} | ${benchmark.opsPerSec} | ${benchmark.range} |\n`;
                }
              }
              
              // Add comparison link
              comment += '\n\n📈 [View historical benchmark trends](https://czlonkowski.github.io/n8n-mcp/benchmarks/)\n';
              comment += '\n⚡ Performance regressions >10% will be flagged automatically.\n';
              
              await github.rest.issues.createComment({
                issue_number: context.issue.number,
                owner: context.repo.owner,
                repo: context.repo.repo,
                body: comment
              });
            } catch (error) {
              console.error('Failed to create PR comment:', error.message);
              console.log('This is likely due to insufficient permissions for external PRs.');
              console.log('Benchmark results have been saved to artifacts instead.');
            }

  # Deploy benchmark results to GitHub Pages
  deploy:
    needs: benchmark
    if: github.ref == 'refs/heads/main'
    runs-on: ubuntu-latest
    environment:
      name: github-pages
      url: ${{ steps.deployment.outputs.page_url }}
    steps:
      - name: Checkout
        uses: actions/checkout@v4
        with:
          ref: gh-pages
        continue-on-error: true
        
      # If gh-pages checkout failed, create a minimal structure
      - name: Ensure gh-pages content exists
        run: |
          if [ ! -f "index.html" ]; then
            echo "Creating minimal gh-pages structure..."
            mkdir -p benchmarks
            echo '<!DOCTYPE html><html><head><title>n8n-mcp Benchmarks</title></head><body><h1>n8n-mcp Benchmarks</h1><p>Benchmark data will appear here after the first run.</p></body></html>' > index.html
          fi
          
      - name: Setup Pages
        uses: actions/configure-pages@v4
        
      - name: Upload Pages artifact
        uses: actions/upload-pages-artifact@v3
        with:
          path: '.'
          
      - name: Deploy to GitHub Pages
        id: deployment
        uses: actions/deploy-pages@v4
```

--------------------------------------------------------------------------------
/tests/helpers/env-helpers.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Test Environment Helper Utilities
 * 
 * Common utilities for working with test environment configuration
 */

import { getTestConfig, TestConfig } from '../setup/test-env';
import * as path from 'path';
import * as fs from 'fs';

/**
 * Create a test database path with unique suffix
 */
export function createTestDatabasePath(suffix?: string): string {
  const config = getTestConfig();
  if (config.database.path === ':memory:') {
    return ':memory:';
  }
  
  const timestamp = Date.now();
  const randomSuffix = Math.random().toString(36).substring(7);
  const dbName = suffix 
    ? `test-${suffix}-${timestamp}-${randomSuffix}.db`
    : `test-${timestamp}-${randomSuffix}.db`;
    
  return path.join(config.paths.data, dbName);
}

/**
 * Clean up test databases
 */
export async function cleanupTestDatabases(pattern?: RegExp): Promise<void> {
  const config = getTestConfig();
  const dataPath = path.resolve(config.paths.data);
  
  if (!fs.existsSync(dataPath)) {
    return;
  }
  
  const files = fs.readdirSync(dataPath);
  const testDbPattern = pattern || /^test-.*\.db$/;
  
  for (const file of files) {
    if (testDbPattern.test(file)) {
      try {
        fs.unlinkSync(path.join(dataPath, file));
      } catch (error) {
        console.error(`Failed to delete test database: ${file}`, error);
      }
    }
  }
}

/**
 * Override environment variables temporarily
 */
export function withEnvOverrides<T>(
  overrides: Partial<NodeJS.ProcessEnv>,
  fn: () => T
): T {
  const originalValues: Partial<NodeJS.ProcessEnv> = {};
  
  // Save original values and apply overrides
  for (const [key, value] of Object.entries(overrides)) {
    originalValues[key] = process.env[key];
    if (value === undefined) {
      delete process.env[key];
    } else {
      process.env[key] = value;
    }
  }
  
  try {
    return fn();
  } finally {
    // Restore original values
    for (const [key, value] of Object.entries(originalValues)) {
      if (value === undefined) {
        delete process.env[key];
      } else {
        process.env[key] = value;
      }
    }
  }
}

/**
 * Async version of withEnvOverrides
 */
export async function withEnvOverridesAsync<T>(
  overrides: Partial<NodeJS.ProcessEnv>,
  fn: () => Promise<T>
): Promise<T> {
  const originalValues: Partial<NodeJS.ProcessEnv> = {};
  
  // Save original values and apply overrides
  for (const [key, value] of Object.entries(overrides)) {
    originalValues[key] = process.env[key];
    if (value === undefined) {
      delete process.env[key];
    } else {
      process.env[key] = value;
    }
  }
  
  try {
    return await fn();
  } finally {
    // Restore original values
    for (const [key, value] of Object.entries(originalValues)) {
      if (value === undefined) {
        delete process.env[key];
      } else {
        process.env[key] = value;
      }
    }
  }
}

/**
 * Create a mock API server URL
 */
export function getMockApiUrl(endpoint?: string): string {
  const config = getTestConfig();
  const baseUrl = config.api.url;
  return endpoint ? `${baseUrl}${endpoint}` : baseUrl;
}

/**
 * Get test fixture path
 */
export function getFixturePath(fixtureName: string): string {
  const config = getTestConfig();
  return path.resolve(config.paths.fixtures, fixtureName);
}

/**
 * Load test fixture data
 */
export function loadFixture<T = any>(fixtureName: string): T {
  const fixturePath = getFixturePath(fixtureName);
  
  if (!fs.existsSync(fixturePath)) {
    throw new Error(`Fixture not found: ${fixturePath}`);
  }
  
  const content = fs.readFileSync(fixturePath, 'utf-8');
  
  if (fixturePath.endsWith('.json')) {
    return JSON.parse(content);
  }
  
  return content as any;
}

/**
 * Save test snapshot
 */
export function saveSnapshot(name: string, data: any): void {
  const config = getTestConfig();
  const snapshotDir = path.resolve(config.paths.snapshots);
  
  if (!fs.existsSync(snapshotDir)) {
    fs.mkdirSync(snapshotDir, { recursive: true });
  }
  
  const snapshotPath = path.join(snapshotDir, `${name}.snap`);
  const content = typeof data === 'string' ? data : JSON.stringify(data, null, 2);
  
  fs.writeFileSync(snapshotPath, content);
}

/**
 * Performance measurement helper
 */
export class PerformanceMeasure {
  private startTime: number;
  private marks: Map<string, number> = new Map();
  
  constructor(private name: string) {
    this.startTime = performance.now();
  }
  
  mark(label: string): void {
    this.marks.set(label, performance.now());
  }
  
  end(): { total: number; marks: Record<string, number> } {
    const endTime = performance.now();
    const total = endTime - this.startTime;
    
    const markTimes: Record<string, number> = {};
    for (const [label, time] of this.marks) {
      markTimes[label] = time - this.startTime;
    }
    
    return { total, marks: markTimes };
  }
  
  assertThreshold(threshold: keyof TestConfig['performance']['thresholds']): void {
    const config = getTestConfig();
    const { total } = this.end();
    const maxTime = config.performance.thresholds[threshold];
    
    if (total > maxTime) {
      throw new Error(
        `Performance threshold exceeded for ${this.name}: ` +
        `${total.toFixed(2)}ms > ${maxTime}ms`
      );
    }
  }
}

/**
 * Create a performance measure
 */
export function measurePerformance(name: string): PerformanceMeasure {
  return new PerformanceMeasure(name);
}

/**
 * Wait for a condition with timeout
 */
export async function waitForCondition(
  condition: () => boolean | Promise<boolean>,
  options: {
    timeout?: number;
    interval?: number;
    message?: string;
  } = {}
): Promise<void> {
  const {
    timeout = 5000,
    interval = 100,
    message = 'Condition not met'
  } = options;
  
  const startTime = Date.now();
  
  while (Date.now() - startTime < timeout) {
    const result = await condition();
    if (result) {
      return;
    }
    await new Promise(resolve => setTimeout(resolve, interval));
  }
  
  throw new Error(`${message} (timeout: ${timeout}ms)`);
}

/**
 * Create a test logger that respects configuration
 */
export function createTestLogger(namespace: string) {
  const config = getTestConfig();
  
  return {
    debug: (...args: any[]) => {
      if (config.logging.debug || config.logging.verbose) {
        console.debug(`[${namespace}]`, ...args);
      }
    },
    info: (...args: any[]) => {
      if (config.logging.level !== 'error') {
        console.info(`[${namespace}]`, ...args);
      }
    },
    warn: (...args: any[]) => {
      if (config.logging.level !== 'error') {
        console.warn(`[${namespace}]`, ...args);
      }
    },
    error: (...args: any[]) => {
      console.error(`[${namespace}]`, ...args);
    }
  };
}

/**
 * Check if running in CI environment
 */
export function isCI(): boolean {
  return process.env.CI === 'true' || 
         process.env.CONTINUOUS_INTEGRATION === 'true' ||
         process.env.GITHUB_ACTIONS === 'true' ||
         process.env.GITLAB_CI === 'true' ||
         process.env.CIRCLECI === 'true';
}

/**
 * Get appropriate test timeout based on environment
 */
export function getAdaptiveTimeout(baseTimeout: number): number {
  const multiplier = isCI() ? 2 : 1; // Double timeouts in CI
  return baseTimeout * multiplier;
}
```

--------------------------------------------------------------------------------
/tests/integration/database/node-fts5-search.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Integration tests for node FTS5 search functionality
 * Ensures the production search failures (Issue #296) are prevented
 */
import { describe, it, expect, beforeAll, afterAll } from 'vitest';
import { createDatabaseAdapter } from '../../../src/database/database-adapter';
import { NodeRepository } from '../../../src/database/node-repository';
import * as fs from 'fs';
import * as path from 'path';

describe('Node FTS5 Search Integration Tests', () => {
  let db: any;
  let repository: NodeRepository;

  beforeAll(async () => {
    // Use test database
    const testDbPath = './data/nodes.db';
    db = await createDatabaseAdapter(testDbPath);
    repository = new NodeRepository(db);
  });

  afterAll(() => {
    if (db) {
      db.close();
    }
  });

  describe('FTS5 Table Existence', () => {
    it('should have nodes_fts table in schema', () => {
      const schemaPath = path.join(__dirname, '../../../src/database/schema.sql');
      const schema = fs.readFileSync(schemaPath, 'utf-8');

      expect(schema).toContain('CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5');
      expect(schema).toContain('CREATE TRIGGER IF NOT EXISTS nodes_fts_insert');
      expect(schema).toContain('CREATE TRIGGER IF NOT EXISTS nodes_fts_update');
      expect(schema).toContain('CREATE TRIGGER IF NOT EXISTS nodes_fts_delete');
    });

    it('should have nodes_fts table in database', () => {
      const result = db.prepare(`
        SELECT name FROM sqlite_master
        WHERE type='table' AND name='nodes_fts'
      `).get();

      expect(result).toBeDefined();
      expect(result.name).toBe('nodes_fts');
    });

    it('should have FTS5 triggers in database', () => {
      const triggers = db.prepare(`
        SELECT name FROM sqlite_master
        WHERE type='trigger' AND name LIKE 'nodes_fts_%'
      `).all();

      expect(triggers).toHaveLength(3);
      const triggerNames = triggers.map((t: any) => t.name);
      expect(triggerNames).toContain('nodes_fts_insert');
      expect(triggerNames).toContain('nodes_fts_update');
      expect(triggerNames).toContain('nodes_fts_delete');
    });
  });

  describe('FTS5 Index Population', () => {
    it('should have nodes_fts count matching nodes count', () => {
      const nodesCount = db.prepare('SELECT COUNT(*) as count FROM nodes').get();
      const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get();

      expect(nodesCount.count).toBeGreaterThan(500); // Should have both packages
      expect(ftsCount.count).toBe(nodesCount.count);
    });

    it('should not have empty FTS5 index', () => {
      const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get();

      expect(ftsCount.count).toBeGreaterThan(0);
    });
  });

  describe('Critical Node Searches (Production Failure Cases)', () => {
    it('should find webhook node via FTS5', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'webhook'
      `).all();

      expect(results.length).toBeGreaterThan(0);
      const nodeTypes = results.map((r: any) => r.node_type);
      expect(nodeTypes).toContain('nodes-base.webhook');
    });

    it('should find merge node via FTS5', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'merge'
      `).all();

      expect(results.length).toBeGreaterThan(0);
      const nodeTypes = results.map((r: any) => r.node_type);
      expect(nodeTypes).toContain('nodes-base.merge');
    });

    it('should find split batch node via FTS5', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'split OR batch'
      `).all();

      expect(results.length).toBeGreaterThan(0);
      const nodeTypes = results.map((r: any) => r.node_type);
      expect(nodeTypes).toContain('nodes-base.splitInBatches');
    });

    it('should find code node via FTS5', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'code'
      `).all();

      expect(results.length).toBeGreaterThan(0);
      const nodeTypes = results.map((r: any) => r.node_type);
      expect(nodeTypes).toContain('nodes-base.code');
    });

    it('should find http request node via FTS5', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'http OR request'
      `).all();

      expect(results.length).toBeGreaterThan(0);
      const nodeTypes = results.map((r: any) => r.node_type);
      expect(nodeTypes).toContain('nodes-base.httpRequest');
    });
  });

  describe('FTS5 Search Quality', () => {
    it('should rank exact matches higher', () => {
      const results = db.prepare(`
        SELECT node_type, rank FROM nodes_fts
        WHERE nodes_fts MATCH 'webhook'
        ORDER BY rank
        LIMIT 10
      `).all();

      expect(results.length).toBeGreaterThan(0);
      // Exact match should be in top results
      const topResults = results.slice(0, 3).map((r: any) => r.node_type);
      expect(topResults).toContain('nodes-base.webhook');
    });

    it('should support phrase searches', () => {
      const results = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH '"http request"'
      `).all();

      expect(results.length).toBeGreaterThan(0);
    });

    it('should support boolean operators', () => {
      const andResults = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'google AND sheets'
      `).all();

      const orResults = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'google OR sheets'
      `).all();

      expect(andResults.length).toBeGreaterThan(0);
      expect(orResults.length).toBeGreaterThanOrEqual(andResults.length);
    });
  });

  describe('FTS5 Index Synchronization', () => {
    it('should keep FTS5 in sync after node updates', () => {
      // This test ensures triggers work properly
      const beforeCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get();

      // Insert a test node
      db.prepare(`
        INSERT INTO nodes (
          node_type, package_name, display_name, description,
          category, development_style, is_ai_tool, is_trigger,
          is_webhook, is_versioned, version, properties_schema,
          operations, credentials_required
        ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
      `).run(
        'test.node',
        'test-package',
        'Test Node',
        'A test node for FTS5 synchronization',
        'Test',
        'programmatic',
        0, 0, 0, 0,
        '1.0',
        '[]', '[]', '[]'
      );

      const afterInsert = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get();
      expect(afterInsert.count).toBe(beforeCount.count + 1);

      // Verify the new node is searchable
      const searchResults = db.prepare(`
        SELECT node_type FROM nodes_fts
        WHERE nodes_fts MATCH 'test synchronization'
      `).all();
      expect(searchResults.length).toBeGreaterThan(0);

      // Clean up
      db.prepare('DELETE FROM nodes WHERE node_type = ?').run('test.node');

      const afterDelete = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get();
      expect(afterDelete.count).toBe(beforeCount.count);
    });
  });
});

```

--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------

```json
{
  "name": "n8n-mcp",
  "version": "2.19.6",
  "description": "Integration between n8n workflow automation and Model Context Protocol (MCP)",
  "main": "dist/index.js",
  "types": "dist/index.d.ts",
  "exports": {
    ".": {
      "types": "./dist/index.d.ts",
      "require": "./dist/index.js",
      "import": "./dist/index.js"
    }
  },
  "bin": {
    "n8n-mcp": "./dist/mcp/index.js"
  },
  "scripts": {
    "build": "tsc -p tsconfig.build.json",
    "rebuild": "node dist/scripts/rebuild.js",
    "rebuild:optimized": "node dist/scripts/rebuild-optimized.js",
    "validate": "node dist/scripts/validate.js",
    "test-nodes": "node dist/scripts/test-nodes.js",
    "start": "node dist/mcp/index.js",
    "start:http": "MCP_MODE=http node dist/mcp/index.js",
    "start:http:fixed": "MCP_MODE=http USE_FIXED_HTTP=true node dist/mcp/index.js",
    "start:n8n": "N8N_MODE=true MCP_MODE=http node dist/mcp/index.js",
    "http": "npm run build && npm run start:http:fixed",
    "dev": "npm run build && npm run rebuild && npm run validate",
    "dev:http": "MCP_MODE=http nodemon --watch src --ext ts --exec 'npm run build && npm run start:http'",
    "test:single-session": "./scripts/test-single-session.sh",
    "test:mcp-endpoint": "node scripts/test-mcp-endpoint.js",
    "test:mcp-endpoint:curl": "./scripts/test-mcp-endpoint.sh",
    "test:mcp-stdio": "npm run build && node scripts/test-mcp-stdio.js",
    "test": "vitest",
    "test:ui": "vitest --ui",
    "test:run": "vitest run",
    "test:coverage": "vitest run --coverage",
    "test:ci": "vitest run --coverage --coverage.thresholds.lines=0 --coverage.thresholds.functions=0 --coverage.thresholds.branches=0 --coverage.thresholds.statements=0 --reporter=default --reporter=junit",
    "test:watch": "vitest watch",
    "test:unit": "vitest run tests/unit",
    "test:integration": "vitest run --config vitest.config.integration.ts",
    "test:integration:n8n": "vitest run tests/integration/n8n-api",
    "test:cleanup:orphans": "tsx tests/integration/n8n-api/scripts/cleanup-orphans.ts",
    "test:e2e": "vitest run tests/e2e",
    "lint": "tsc --noEmit",
    "typecheck": "tsc --noEmit",
    "update:n8n": "node scripts/update-n8n-deps.js",
    "update:n8n:check": "node scripts/update-n8n-deps.js --dry-run",
    "fetch:templates": "node dist/scripts/fetch-templates.js",
    "fetch:templates:update": "node dist/scripts/fetch-templates.js --update",
    "fetch:templates:extract": "node dist/scripts/fetch-templates.js --extract-only",
    "fetch:templates:robust": "node dist/scripts/fetch-templates-robust.js",
    "prebuild:fts5": "npx tsx scripts/prebuild-fts5.ts",
    "test:templates": "node dist/scripts/test-templates.js",
    "test:protocol-negotiation": "npx tsx src/scripts/test-protocol-negotiation.ts",
    "test:workflow-validation": "node dist/scripts/test-workflow-validation.js",
    "test:template-validation": "node dist/scripts/test-template-validation.js",
    "test:essentials": "node dist/scripts/test-essentials.js",
    "test:enhanced-validation": "node dist/scripts/test-enhanced-validation.js",
    "test:ai-workflow-validation": "node dist/scripts/test-ai-workflow-validation.js",
    "test:mcp-tools": "node dist/scripts/test-mcp-tools.js",
    "test:n8n-manager": "node dist/scripts/test-n8n-manager-integration.js",
    "test:n8n-validate-workflow": "node dist/scripts/test-n8n-validate-workflow.js",
    "test:typeversion-validation": "node dist/scripts/test-typeversion-validation.js",
    "test:error-handling": "node dist/scripts/test-error-handling-validation.js",
    "test:workflow-diff": "node dist/scripts/test-workflow-diff.js",
    "test:transactional-diff": "node dist/scripts/test-transactional-diff.js",
    "test:tools-documentation": "node dist/scripts/test-tools-documentation.js",
    "test:url-configuration": "npm run build && ts-node scripts/test-url-configuration.ts",
    "test:search-improvements": "node dist/scripts/test-search-improvements.js",
    "test:fts5-search": "node dist/scripts/test-fts5-search.js",
    "migrate:fts5": "node dist/scripts/migrate-nodes-fts.js",
    "test:mcp:update-partial": "node dist/scripts/test-mcp-n8n-update-partial.js",
    "test:update-partial:debug": "node dist/scripts/test-update-partial-debug.js",
    "test:issue-45-fix": "node dist/scripts/test-issue-45-fix.js",
    "test:auth-logging": "tsx scripts/test-auth-logging.ts",
    "test:docker": "./scripts/test-docker-config.sh all",
    "test:docker:unit": "./scripts/test-docker-config.sh unit",
    "test:docker:integration": "./scripts/test-docker-config.sh integration",
    "test:docker:security": "./scripts/test-docker-config.sh security",
    "sanitize:templates": "node dist/scripts/sanitize-templates.js",
    "db:rebuild": "node dist/scripts/rebuild-database.js",
    "benchmark": "vitest bench --config vitest.config.benchmark.ts",
    "benchmark:watch": "vitest bench --watch --config vitest.config.benchmark.ts",
    "benchmark:ui": "vitest bench --ui --config vitest.config.benchmark.ts",
    "benchmark:ci": "CI=true node scripts/run-benchmarks-ci.js",
    "db:init": "node -e \"new (require('./dist/services/sqlite-storage-service').SQLiteStorageService)(); console.log('Database initialized')\"",
    "docs:rebuild": "ts-node src/scripts/rebuild-database.ts",
    "sync:runtime-version": "node scripts/sync-runtime-version.js",
    "update:readme-version": "node scripts/update-readme-version.js",
    "prepare:publish": "./scripts/publish-npm.sh",
    "update:all": "./scripts/update-and-publish-prep.sh",
    "test:release-automation": "node scripts/test-release-automation.js",
    "prepare:release": "node scripts/prepare-release.js"
  },
  "repository": {
    "type": "git",
    "url": "git+https://github.com/czlonkowski/n8n-mcp.git"
  },
  "keywords": [
    "n8n",
    "mcp",
    "model-context-protocol",
    "ai",
    "workflow",
    "automation"
  ],
  "author": "Romuald Czlonkowski @ www.aiadvisors.pl/en",
  "license": "MIT",
  "bugs": {
    "url": "https://github.com/czlonkowski/n8n-mcp/issues"
  },
  "homepage": "https://github.com/czlonkowski/n8n-mcp#readme",
  "files": [
    "dist/**/*",
    "data/nodes.db",
    ".env.example",
    "README.md",
    "LICENSE",
    "package.runtime.json"
  ],
  "devDependencies": {
    "@faker-js/faker": "^9.9.0",
    "@testing-library/jest-dom": "^6.6.4",
    "@types/better-sqlite3": "^7.6.13",
    "@types/express": "^5.0.3",
    "@types/node": "^22.15.30",
    "@types/ws": "^8.18.1",
    "@vitest/coverage-v8": "^3.2.4",
    "@vitest/runner": "^3.2.4",
    "@vitest/ui": "^3.2.4",
    "axios": "^1.11.0",
    "axios-mock-adapter": "^2.1.0",
    "fishery": "^2.3.1",
    "msw": "^2.10.4",
    "nodemon": "^3.1.10",
    "ts-node": "^10.9.2",
    "typescript": "^5.8.3",
    "vitest": "^3.2.4"
  },
  "dependencies": {
    "@modelcontextprotocol/sdk": "^1.13.2",
    "@n8n/n8n-nodes-langchain": "^1.114.1",
    "@supabase/supabase-js": "^2.57.4",
    "dotenv": "^16.5.0",
    "express": "^5.1.0",
    "express-rate-limit": "^7.1.5",
    "lru-cache": "^11.2.1",
    "n8n": "^1.115.2",
    "n8n-core": "^1.114.0",
    "n8n-workflow": "^1.112.0",
    "openai": "^4.77.0",
    "sql.js": "^1.13.0",
    "uuid": "^10.0.0",
    "zod": "^3.24.1"
  },
  "optionalDependencies": {
    "@rollup/rollup-darwin-arm64": "^4.50.0",
    "@rollup/rollup-linux-x64-gnu": "^4.50.0",
    "better-sqlite3": "^11.10.0"
  },
  "overrides": {
    "pyodide": "0.26.4"
  }
}

```
Page 7/45FirstPrevNextLast