#
tokens: 49942/50000 49/630 files (page 3/48)
lines: off (toggle) GitHub
raw markdown copy
This is page 3 of 48. Use http://codebase.md/czlonkowski/n8n-mcp?lines=false&page={x} to view the full context.

# Directory Structure

```
├── _config.yml
├── .claude
│   └── agents
│       ├── code-reviewer.md
│       ├── context-manager.md
│       ├── debugger.md
│       ├── deployment-engineer.md
│       ├── mcp-backend-engineer.md
│       ├── n8n-mcp-tester.md
│       ├── technical-researcher.md
│       └── test-automator.md
├── .dockerignore
├── .env.docker
├── .env.example
├── .env.n8n.example
├── .env.test
├── .env.test.example
├── .github
│   ├── ABOUT.md
│   ├── BENCHMARK_THRESHOLDS.md
│   ├── FUNDING.yml
│   ├── gh-pages.yml
│   ├── secret_scanning.yml
│   └── workflows
│       ├── benchmark-pr.yml
│       ├── benchmark.yml
│       ├── docker-build-fast.yml
│       ├── docker-build-n8n.yml
│       ├── docker-build.yml
│       ├── release.yml
│       ├── test.yml
│       └── update-n8n-deps.yml
├── .gitignore
├── .npmignore
├── ATTRIBUTION.md
├── CHANGELOG.md
├── CLAUDE.md
├── codecov.yml
├── coverage.json
├── data
│   ├── .gitkeep
│   ├── nodes.db
│   ├── nodes.db-shm
│   ├── nodes.db-wal
│   └── templates.db
├── deploy
│   └── quick-deploy-n8n.sh
├── docker
│   ├── docker-entrypoint.sh
│   ├── n8n-mcp
│   ├── parse-config.js
│   └── README.md
├── docker-compose.buildkit.yml
├── docker-compose.extract.yml
├── docker-compose.n8n.yml
├── docker-compose.override.yml.example
├── docker-compose.test-n8n.yml
├── docker-compose.yml
├── Dockerfile
├── Dockerfile.railway
├── Dockerfile.test
├── docs
│   ├── AUTOMATED_RELEASES.md
│   ├── BENCHMARKS.md
│   ├── bugfix-onSessionCreated-event.md
│   ├── CHANGELOG.md
│   ├── CLAUDE_CODE_SETUP.md
│   ├── CLAUDE_INTERVIEW.md
│   ├── CODECOV_SETUP.md
│   ├── CODEX_SETUP.md
│   ├── CURSOR_SETUP.md
│   ├── DEPENDENCY_UPDATES.md
│   ├── DOCKER_README.md
│   ├── DOCKER_TROUBLESHOOTING.md
│   ├── FINAL_AI_VALIDATION_SPEC.md
│   ├── FLEXIBLE_INSTANCE_CONFIGURATION.md
│   ├── HTTP_DEPLOYMENT.md
│   ├── img
│   │   ├── cc_command.png
│   │   ├── cc_connected.png
│   │   ├── codex_connected.png
│   │   ├── cursor_tut.png
│   │   ├── Railway_api.png
│   │   ├── Railway_server_address.png
│   │   ├── vsc_ghcp_chat_agent_mode.png
│   │   ├── vsc_ghcp_chat_instruction_files.png
│   │   ├── vsc_ghcp_chat_thinking_tool.png
│   │   └── windsurf_tut.png
│   ├── INSTALLATION.md
│   ├── LIBRARY_USAGE.md
│   ├── local
│   │   ├── DEEP_DIVE_ANALYSIS_2025-10-02.md
│   │   ├── DEEP_DIVE_ANALYSIS_README.md
│   │   ├── Deep_dive_p1_p2.md
│   │   ├── integration-testing-plan.md
│   │   ├── integration-tests-phase1-summary.md
│   │   ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
│   │   ├── P0_IMPLEMENTATION_PLAN.md
│   │   └── TEMPLATE_MINING_ANALYSIS.md
│   ├── MCP_ESSENTIALS_README.md
│   ├── MCP_QUICK_START_GUIDE.md
│   ├── MULTI_APP_INTEGRATION.md
│   ├── N8N_DEPLOYMENT.md
│   ├── RAILWAY_DEPLOYMENT.md
│   ├── README_CLAUDE_SETUP.md
│   ├── README.md
│   ├── tools-documentation-usage.md
│   ├── VS_CODE_PROJECT_SETUP.md
│   ├── WINDSURF_SETUP.md
│   └── workflow-diff-examples.md
├── examples
│   └── enhanced-documentation-demo.js
├── fetch_log.txt
├── IMPLEMENTATION_GUIDE.md
├── LICENSE
├── MEMORY_N8N_UPDATE.md
├── MEMORY_TEMPLATE_UPDATE.md
├── monitor_fetch.sh
├── MVP_DEPLOYMENT_PLAN.md
├── N8N_HTTP_STREAMABLE_SETUP.md
├── n8n-nodes.db
├── P0-R3-TEST-PLAN.md
├── package-lock.json
├── package.json
├── package.runtime.json
├── PRIVACY.md
├── railway.json
├── README.md
├── renovate.json
├── scripts
│   ├── analyze-optimization.sh
│   ├── audit-schema-coverage.ts
│   ├── build-optimized.sh
│   ├── compare-benchmarks.js
│   ├── demo-optimization.sh
│   ├── deploy-http.sh
│   ├── deploy-to-vm.sh
│   ├── export-webhook-workflows.ts
│   ├── extract-changelog.js
│   ├── extract-from-docker.js
│   ├── extract-nodes-docker.sh
│   ├── extract-nodes-simple.sh
│   ├── format-benchmark-results.js
│   ├── generate-benchmark-stub.js
│   ├── generate-detailed-reports.js
│   ├── generate-test-summary.js
│   ├── http-bridge.js
│   ├── mcp-http-client.js
│   ├── migrate-nodes-fts.ts
│   ├── migrate-tool-docs.ts
│   ├── n8n-docs-mcp.service
│   ├── nginx-n8n-mcp.conf
│   ├── prebuild-fts5.ts
│   ├── prepare-release.js
│   ├── publish-npm-quick.sh
│   ├── publish-npm.sh
│   ├── quick-test.ts
│   ├── run-benchmarks-ci.js
│   ├── sync-runtime-version.js
│   ├── test-ai-validation-debug.ts
│   ├── test-code-node-enhancements.ts
│   ├── test-code-node-fixes.ts
│   ├── test-docker-config.sh
│   ├── test-docker-fingerprint.ts
│   ├── test-docker-optimization.sh
│   ├── test-docker.sh
│   ├── test-empty-connection-validation.ts
│   ├── test-error-message-tracking.ts
│   ├── test-error-output-validation.ts
│   ├── test-error-validation.js
│   ├── test-essentials.ts
│   ├── test-expression-code-validation.ts
│   ├── test-expression-format-validation.js
│   ├── test-fts5-search.ts
│   ├── test-fuzzy-fix.ts
│   ├── test-fuzzy-simple.ts
│   ├── test-helpers-validation.ts
│   ├── test-http-search.ts
│   ├── test-http.sh
│   ├── test-jmespath-validation.ts
│   ├── test-multi-tenant-simple.ts
│   ├── test-multi-tenant.ts
│   ├── test-n8n-integration.sh
│   ├── test-node-info.js
│   ├── test-node-type-validation.ts
│   ├── test-nodes-base-prefix.ts
│   ├── test-operation-validation.ts
│   ├── test-optimized-docker.sh
│   ├── test-release-automation.js
│   ├── test-search-improvements.ts
│   ├── test-security.ts
│   ├── test-single-session.sh
│   ├── test-sqljs-triggers.ts
│   ├── test-telemetry-debug.ts
│   ├── test-telemetry-direct.ts
│   ├── test-telemetry-env.ts
│   ├── test-telemetry-integration.ts
│   ├── test-telemetry-no-select.ts
│   ├── test-telemetry-security.ts
│   ├── test-telemetry-simple.ts
│   ├── test-typeversion-validation.ts
│   ├── test-url-configuration.ts
│   ├── test-user-id-persistence.ts
│   ├── test-webhook-validation.ts
│   ├── test-workflow-insert.ts
│   ├── test-workflow-sanitizer.ts
│   ├── test-workflow-tracking-debug.ts
│   ├── update-and-publish-prep.sh
│   ├── update-n8n-deps.js
│   ├── update-readme-version.js
│   ├── vitest-benchmark-json-reporter.js
│   └── vitest-benchmark-reporter.ts
├── SECURITY.md
├── src
│   ├── config
│   │   └── n8n-api.ts
│   ├── data
│   │   └── canonical-ai-tool-examples.json
│   ├── database
│   │   ├── database-adapter.ts
│   │   ├── migrations
│   │   │   └── add-template-node-configs.sql
│   │   ├── node-repository.ts
│   │   ├── nodes.db
│   │   ├── schema-optimized.sql
│   │   └── schema.sql
│   ├── errors
│   │   └── validation-service-error.ts
│   ├── http-server-single-session.ts
│   ├── http-server.ts
│   ├── index.ts
│   ├── loaders
│   │   └── node-loader.ts
│   ├── mappers
│   │   └── docs-mapper.ts
│   ├── mcp
│   │   ├── handlers-n8n-manager.ts
│   │   ├── handlers-workflow-diff.ts
│   │   ├── index.ts
│   │   ├── server.ts
│   │   ├── stdio-wrapper.ts
│   │   ├── tool-docs
│   │   │   ├── configuration
│   │   │   │   ├── get-node-as-tool-info.ts
│   │   │   │   ├── get-node-documentation.ts
│   │   │   │   ├── get-node-essentials.ts
│   │   │   │   ├── get-node-info.ts
│   │   │   │   ├── get-property-dependencies.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── search-node-properties.ts
│   │   │   ├── discovery
│   │   │   │   ├── get-database-statistics.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-ai-tools.ts
│   │   │   │   ├── list-nodes.ts
│   │   │   │   └── search-nodes.ts
│   │   │   ├── guides
│   │   │   │   ├── ai-agents-guide.ts
│   │   │   │   └── index.ts
│   │   │   ├── index.ts
│   │   │   ├── system
│   │   │   │   ├── index.ts
│   │   │   │   ├── n8n-diagnostic.ts
│   │   │   │   ├── n8n-health-check.ts
│   │   │   │   ├── n8n-list-available-tools.ts
│   │   │   │   └── tools-documentation.ts
│   │   │   ├── templates
│   │   │   │   ├── get-template.ts
│   │   │   │   ├── get-templates-for-task.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-node-templates.ts
│   │   │   │   ├── list-tasks.ts
│   │   │   │   ├── search-templates-by-metadata.ts
│   │   │   │   └── search-templates.ts
│   │   │   ├── types.ts
│   │   │   ├── validation
│   │   │   │   ├── index.ts
│   │   │   │   ├── validate-node-minimal.ts
│   │   │   │   ├── validate-node-operation.ts
│   │   │   │   ├── validate-workflow-connections.ts
│   │   │   │   ├── validate-workflow-expressions.ts
│   │   │   │   └── validate-workflow.ts
│   │   │   └── workflow_management
│   │   │       ├── index.ts
│   │   │       ├── n8n-autofix-workflow.ts
│   │   │       ├── n8n-create-workflow.ts
│   │   │       ├── n8n-delete-execution.ts
│   │   │       ├── n8n-delete-workflow.ts
│   │   │       ├── n8n-get-execution.ts
│   │   │       ├── n8n-get-workflow-details.ts
│   │   │       ├── n8n-get-workflow-minimal.ts
│   │   │       ├── n8n-get-workflow-structure.ts
│   │   │       ├── n8n-get-workflow.ts
│   │   │       ├── n8n-list-executions.ts
│   │   │       ├── n8n-list-workflows.ts
│   │   │       ├── n8n-trigger-webhook-workflow.ts
│   │   │       ├── n8n-update-full-workflow.ts
│   │   │       ├── n8n-update-partial-workflow.ts
│   │   │       └── n8n-validate-workflow.ts
│   │   ├── tools-documentation.ts
│   │   ├── tools-n8n-friendly.ts
│   │   ├── tools-n8n-manager.ts
│   │   ├── tools.ts
│   │   └── workflow-examples.ts
│   ├── mcp-engine.ts
│   ├── mcp-tools-engine.ts
│   ├── n8n
│   │   ├── MCPApi.credentials.ts
│   │   └── MCPNode.node.ts
│   ├── parsers
│   │   ├── node-parser.ts
│   │   ├── property-extractor.ts
│   │   └── simple-parser.ts
│   ├── scripts
│   │   ├── debug-http-search.ts
│   │   ├── extract-from-docker.ts
│   │   ├── fetch-templates-robust.ts
│   │   ├── fetch-templates.ts
│   │   ├── rebuild-database.ts
│   │   ├── rebuild-optimized.ts
│   │   ├── rebuild.ts
│   │   ├── sanitize-templates.ts
│   │   ├── seed-canonical-ai-examples.ts
│   │   ├── test-autofix-documentation.ts
│   │   ├── test-autofix-workflow.ts
│   │   ├── test-execution-filtering.ts
│   │   ├── test-node-suggestions.ts
│   │   ├── test-protocol-negotiation.ts
│   │   ├── test-summary.ts
│   │   ├── test-webhook-autofix.ts
│   │   ├── validate.ts
│   │   └── validation-summary.ts
│   ├── services
│   │   ├── ai-node-validator.ts
│   │   ├── ai-tool-validators.ts
│   │   ├── confidence-scorer.ts
│   │   ├── config-validator.ts
│   │   ├── enhanced-config-validator.ts
│   │   ├── example-generator.ts
│   │   ├── execution-processor.ts
│   │   ├── expression-format-validator.ts
│   │   ├── expression-validator.ts
│   │   ├── n8n-api-client.ts
│   │   ├── n8n-validation.ts
│   │   ├── node-documentation-service.ts
│   │   ├── node-similarity-service.ts
│   │   ├── node-specific-validators.ts
│   │   ├── operation-similarity-service.ts
│   │   ├── property-dependencies.ts
│   │   ├── property-filter.ts
│   │   ├── resource-similarity-service.ts
│   │   ├── sqlite-storage-service.ts
│   │   ├── task-templates.ts
│   │   ├── universal-expression-validator.ts
│   │   ├── workflow-auto-fixer.ts
│   │   ├── workflow-diff-engine.ts
│   │   └── workflow-validator.ts
│   ├── telemetry
│   │   ├── batch-processor.ts
│   │   ├── config-manager.ts
│   │   ├── early-error-logger.ts
│   │   ├── error-sanitization-utils.ts
│   │   ├── error-sanitizer.ts
│   │   ├── event-tracker.ts
│   │   ├── event-validator.ts
│   │   ├── index.ts
│   │   ├── performance-monitor.ts
│   │   ├── rate-limiter.ts
│   │   ├── startup-checkpoints.ts
│   │   ├── telemetry-error.ts
│   │   ├── telemetry-manager.ts
│   │   ├── telemetry-types.ts
│   │   └── workflow-sanitizer.ts
│   ├── templates
│   │   ├── batch-processor.ts
│   │   ├── metadata-generator.ts
│   │   ├── README.md
│   │   ├── template-fetcher.ts
│   │   ├── template-repository.ts
│   │   └── template-service.ts
│   ├── types
│   │   ├── index.ts
│   │   ├── instance-context.ts
│   │   ├── n8n-api.ts
│   │   ├── node-types.ts
│   │   ├── session-restoration.ts
│   │   └── workflow-diff.ts
│   └── utils
│       ├── auth.ts
│       ├── bridge.ts
│       ├── cache-utils.ts
│       ├── console-manager.ts
│       ├── documentation-fetcher.ts
│       ├── enhanced-documentation-fetcher.ts
│       ├── error-handler.ts
│       ├── example-generator.ts
│       ├── fixed-collection-validator.ts
│       ├── logger.ts
│       ├── mcp-client.ts
│       ├── n8n-errors.ts
│       ├── node-source-extractor.ts
│       ├── node-type-normalizer.ts
│       ├── node-type-utils.ts
│       ├── node-utils.ts
│       ├── npm-version-checker.ts
│       ├── protocol-version.ts
│       ├── simple-cache.ts
│       ├── ssrf-protection.ts
│       ├── template-node-resolver.ts
│       ├── template-sanitizer.ts
│       ├── url-detector.ts
│       ├── validation-schemas.ts
│       └── version.ts
├── supabase-telemetry-aggregation.sql
├── TELEMETRY_PRUNING_GUIDE.md
├── telemetry-pruning-analysis.md
├── test-output.txt
├── test-reinit-fix.sh
├── tests
│   ├── __snapshots__
│   │   └── .gitkeep
│   ├── auth.test.ts
│   ├── benchmarks
│   │   ├── database-queries.bench.ts
│   │   ├── index.ts
│   │   ├── mcp-tools.bench.ts
│   │   ├── mcp-tools.bench.ts.disabled
│   │   ├── mcp-tools.bench.ts.skip
│   │   ├── node-loading.bench.ts.disabled
│   │   ├── README.md
│   │   ├── search-operations.bench.ts.disabled
│   │   └── validation-performance.bench.ts.disabled
│   ├── bridge.test.ts
│   ├── comprehensive-extraction-test.js
│   ├── data
│   │   └── .gitkeep
│   ├── debug-slack-doc.js
│   ├── demo-enhanced-documentation.js
│   ├── docker-tests-README.md
│   ├── error-handler.test.ts
│   ├── examples
│   │   └── using-database-utils.test.ts
│   ├── extracted-nodes-db
│   │   ├── database-import.json
│   │   ├── extraction-report.json
│   │   ├── insert-nodes.sql
│   │   ├── n8n-nodes-base__Airtable.json
│   │   ├── n8n-nodes-base__Discord.json
│   │   ├── n8n-nodes-base__Function.json
│   │   ├── n8n-nodes-base__HttpRequest.json
│   │   ├── n8n-nodes-base__If.json
│   │   ├── n8n-nodes-base__Slack.json
│   │   ├── n8n-nodes-base__SplitInBatches.json
│   │   └── n8n-nodes-base__Webhook.json
│   ├── factories
│   │   ├── node-factory.ts
│   │   └── property-definition-factory.ts
│   ├── fixtures
│   │   ├── .gitkeep
│   │   ├── database
│   │   │   └── test-nodes.json
│   │   ├── factories
│   │   │   ├── node.factory.ts
│   │   │   └── parser-node.factory.ts
│   │   └── template-configs.ts
│   ├── helpers
│   │   └── env-helpers.ts
│   ├── http-server-auth.test.ts
│   ├── integration
│   │   ├── ai-validation
│   │   │   ├── ai-agent-validation.test.ts
│   │   │   ├── ai-tool-validation.test.ts
│   │   │   ├── chat-trigger-validation.test.ts
│   │   │   ├── e2e-validation.test.ts
│   │   │   ├── helpers.ts
│   │   │   ├── llm-chain-validation.test.ts
│   │   │   ├── README.md
│   │   │   └── TEST_REPORT.md
│   │   ├── ci
│   │   │   └── database-population.test.ts
│   │   ├── database
│   │   │   ├── connection-management.test.ts
│   │   │   ├── empty-database.test.ts
│   │   │   ├── fts5-search.test.ts
│   │   │   ├── node-fts5-search.test.ts
│   │   │   ├── node-repository.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── template-node-configs.test.ts
│   │   │   ├── template-repository.test.ts
│   │   │   ├── test-utils.ts
│   │   │   └── transactions.test.ts
│   │   ├── database-integration.test.ts
│   │   ├── docker
│   │   │   ├── docker-config.test.ts
│   │   │   ├── docker-entrypoint.test.ts
│   │   │   └── test-helpers.ts
│   │   ├── flexible-instance-config.test.ts
│   │   ├── mcp
│   │   │   └── template-examples-e2e.test.ts
│   │   ├── mcp-protocol
│   │   │   ├── basic-connection.test.ts
│   │   │   ├── error-handling.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── protocol-compliance.test.ts
│   │   │   ├── README.md
│   │   │   ├── session-management.test.ts
│   │   │   ├── test-helpers.ts
│   │   │   ├── tool-invocation.test.ts
│   │   │   └── workflow-error-validation.test.ts
│   │   ├── msw-setup.test.ts
│   │   ├── n8n-api
│   │   │   ├── executions
│   │   │   │   ├── delete-execution.test.ts
│   │   │   │   ├── get-execution.test.ts
│   │   │   │   ├── list-executions.test.ts
│   │   │   │   └── trigger-webhook.test.ts
│   │   │   ├── scripts
│   │   │   │   └── cleanup-orphans.ts
│   │   │   ├── system
│   │   │   │   ├── diagnostic.test.ts
│   │   │   │   ├── health-check.test.ts
│   │   │   │   └── list-tools.test.ts
│   │   │   ├── test-connection.ts
│   │   │   ├── types
│   │   │   │   └── mcp-responses.ts
│   │   │   ├── utils
│   │   │   │   ├── cleanup-helpers.ts
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── factories.ts
│   │   │   │   ├── fixtures.ts
│   │   │   │   ├── mcp-context.ts
│   │   │   │   ├── n8n-client.ts
│   │   │   │   ├── node-repository.ts
│   │   │   │   ├── response-types.ts
│   │   │   │   ├── test-context.ts
│   │   │   │   └── webhook-workflows.ts
│   │   │   └── workflows
│   │   │       ├── autofix-workflow.test.ts
│   │   │       ├── create-workflow.test.ts
│   │   │       ├── delete-workflow.test.ts
│   │   │       ├── get-workflow-details.test.ts
│   │   │       ├── get-workflow-minimal.test.ts
│   │   │       ├── get-workflow-structure.test.ts
│   │   │       ├── get-workflow.test.ts
│   │   │       ├── list-workflows.test.ts
│   │   │       ├── smart-parameters.test.ts
│   │   │       ├── update-partial-workflow.test.ts
│   │   │       ├── update-workflow.test.ts
│   │   │       └── validate-workflow.test.ts
│   │   ├── security
│   │   │   ├── command-injection-prevention.test.ts
│   │   │   └── rate-limiting.test.ts
│   │   ├── session
│   │   │   └── test-onSessionCreated-event.ts
│   │   ├── session-lifecycle-retry.test.ts
│   │   ├── session-persistence.test.ts
│   │   ├── session-restoration-warmstart.test.ts
│   │   ├── setup
│   │   │   ├── integration-setup.ts
│   │   │   └── msw-test-server.ts
│   │   ├── telemetry
│   │   │   ├── docker-user-id-stability.test.ts
│   │   │   └── mcp-telemetry.test.ts
│   │   ├── templates
│   │   │   └── metadata-operations.test.ts
│   │   └── workflow-creation-node-type-format.test.ts
│   ├── logger.test.ts
│   ├── MOCKING_STRATEGY.md
│   ├── mocks
│   │   ├── n8n-api
│   │   │   ├── data
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── executions.ts
│   │   │   │   └── workflows.ts
│   │   │   ├── handlers.ts
│   │   │   └── index.ts
│   │   └── README.md
│   ├── node-storage-export.json
│   ├── setup
│   │   ├── global-setup.ts
│   │   ├── msw-setup.ts
│   │   ├── TEST_ENV_DOCUMENTATION.md
│   │   └── test-env.ts
│   ├── test-database-extraction.js
│   ├── test-direct-extraction.js
│   ├── test-enhanced-documentation.js
│   ├── test-enhanced-integration.js
│   ├── test-mcp-extraction.js
│   ├── test-mcp-server-extraction.js
│   ├── test-mcp-tools-integration.js
│   ├── test-node-documentation-service.js
│   ├── test-node-list.js
│   ├── test-package-info.js
│   ├── test-parsing-operations.js
│   ├── test-slack-node-complete.js
│   ├── test-small-rebuild.js
│   ├── test-sqlite-search.js
│   ├── test-storage-system.js
│   ├── unit
│   │   ├── __mocks__
│   │   │   ├── n8n-nodes-base.test.ts
│   │   │   ├── n8n-nodes-base.ts
│   │   │   └── README.md
│   │   ├── database
│   │   │   ├── __mocks__
│   │   │   │   └── better-sqlite3.ts
│   │   │   ├── database-adapter-unit.test.ts
│   │   │   ├── node-repository-core.test.ts
│   │   │   ├── node-repository-operations.test.ts
│   │   │   ├── node-repository-outputs.test.ts
│   │   │   ├── README.md
│   │   │   └── template-repository-core.test.ts
│   │   ├── docker
│   │   │   ├── config-security.test.ts
│   │   │   ├── edge-cases.test.ts
│   │   │   ├── parse-config.test.ts
│   │   │   └── serve-command.test.ts
│   │   ├── errors
│   │   │   └── validation-service-error.test.ts
│   │   ├── examples
│   │   │   └── using-n8n-nodes-base-mock.test.ts
│   │   ├── flexible-instance-security-advanced.test.ts
│   │   ├── flexible-instance-security.test.ts
│   │   ├── http-server
│   │   │   └── multi-tenant-support.test.ts
│   │   ├── http-server-n8n-mode.test.ts
│   │   ├── http-server-n8n-reinit.test.ts
│   │   ├── http-server-session-management.test.ts
│   │   ├── loaders
│   │   │   └── node-loader.test.ts
│   │   ├── mappers
│   │   │   └── docs-mapper.test.ts
│   │   ├── mcp
│   │   │   ├── get-node-essentials-examples.test.ts
│   │   │   ├── handlers-n8n-manager-simple.test.ts
│   │   │   ├── handlers-n8n-manager.test.ts
│   │   │   ├── handlers-workflow-diff.test.ts
│   │   │   ├── lru-cache-behavior.test.ts
│   │   │   ├── multi-tenant-tool-listing.test.ts.disabled
│   │   │   ├── parameter-validation.test.ts
│   │   │   ├── search-nodes-examples.test.ts
│   │   │   ├── tools-documentation.test.ts
│   │   │   └── tools.test.ts
│   │   ├── monitoring
│   │   │   └── cache-metrics.test.ts
│   │   ├── MULTI_TENANT_TEST_COVERAGE.md
│   │   ├── multi-tenant-integration.test.ts
│   │   ├── parsers
│   │   │   ├── node-parser-outputs.test.ts
│   │   │   ├── node-parser.test.ts
│   │   │   ├── property-extractor.test.ts
│   │   │   └── simple-parser.test.ts
│   │   ├── scripts
│   │   │   └── fetch-templates-extraction.test.ts
│   │   ├── services
│   │   │   ├── ai-node-validator.test.ts
│   │   │   ├── ai-tool-validators.test.ts
│   │   │   ├── confidence-scorer.test.ts
│   │   │   ├── config-validator-basic.test.ts
│   │   │   ├── config-validator-edge-cases.test.ts
│   │   │   ├── config-validator-node-specific.test.ts
│   │   │   ├── config-validator-security.test.ts
│   │   │   ├── debug-validator.test.ts
│   │   │   ├── enhanced-config-validator-integration.test.ts
│   │   │   ├── enhanced-config-validator-operations.test.ts
│   │   │   ├── enhanced-config-validator.test.ts
│   │   │   ├── example-generator.test.ts
│   │   │   ├── execution-processor.test.ts
│   │   │   ├── expression-format-validator.test.ts
│   │   │   ├── expression-validator-edge-cases.test.ts
│   │   │   ├── expression-validator.test.ts
│   │   │   ├── fixed-collection-validation.test.ts
│   │   │   ├── loop-output-edge-cases.test.ts
│   │   │   ├── n8n-api-client.test.ts
│   │   │   ├── n8n-validation.test.ts
│   │   │   ├── node-similarity-service.test.ts
│   │   │   ├── node-specific-validators.test.ts
│   │   │   ├── operation-similarity-service-comprehensive.test.ts
│   │   │   ├── operation-similarity-service.test.ts
│   │   │   ├── property-dependencies.test.ts
│   │   │   ├── property-filter-edge-cases.test.ts
│   │   │   ├── property-filter.test.ts
│   │   │   ├── resource-similarity-service-comprehensive.test.ts
│   │   │   ├── resource-similarity-service.test.ts
│   │   │   ├── task-templates.test.ts
│   │   │   ├── template-service.test.ts
│   │   │   ├── universal-expression-validator.test.ts
│   │   │   ├── validation-fixes.test.ts
│   │   │   ├── workflow-auto-fixer.test.ts
│   │   │   ├── workflow-diff-engine.test.ts
│   │   │   ├── workflow-fixed-collection-validation.test.ts
│   │   │   ├── workflow-validator-comprehensive.test.ts
│   │   │   ├── workflow-validator-edge-cases.test.ts
│   │   │   ├── workflow-validator-error-outputs.test.ts
│   │   │   ├── workflow-validator-expression-format.test.ts
│   │   │   ├── workflow-validator-loops-simple.test.ts
│   │   │   ├── workflow-validator-loops.test.ts
│   │   │   ├── workflow-validator-mocks.test.ts
│   │   │   ├── workflow-validator-performance.test.ts
│   │   │   ├── workflow-validator-with-mocks.test.ts
│   │   │   └── workflow-validator.test.ts
│   │   ├── session-lifecycle-events.test.ts
│   │   ├── session-management-api.test.ts
│   │   ├── session-restoration-retry.test.ts
│   │   ├── session-restoration.test.ts
│   │   ├── telemetry
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── config-manager.test.ts
│   │   │   ├── event-tracker.test.ts
│   │   │   ├── event-validator.test.ts
│   │   │   ├── rate-limiter.test.ts
│   │   │   ├── telemetry-error.test.ts
│   │   │   ├── telemetry-manager.test.ts
│   │   │   ├── v2.18.3-fixes-verification.test.ts
│   │   │   └── workflow-sanitizer.test.ts
│   │   ├── templates
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── metadata-generator.test.ts
│   │   │   ├── template-repository-metadata.test.ts
│   │   │   └── template-repository-security.test.ts
│   │   ├── test-env-example.test.ts
│   │   ├── test-infrastructure.test.ts
│   │   ├── types
│   │   │   ├── instance-context-coverage.test.ts
│   │   │   └── instance-context-multi-tenant.test.ts
│   │   ├── utils
│   │   │   ├── auth-timing-safe.test.ts
│   │   │   ├── cache-utils.test.ts
│   │   │   ├── console-manager.test.ts
│   │   │   ├── database-utils.test.ts
│   │   │   ├── fixed-collection-validator.test.ts
│   │   │   ├── n8n-errors.test.ts
│   │   │   ├── node-type-normalizer.test.ts
│   │   │   ├── node-type-utils.test.ts
│   │   │   ├── node-utils.test.ts
│   │   │   ├── simple-cache-memory-leak-fix.test.ts
│   │   │   ├── ssrf-protection.test.ts
│   │   │   └── template-node-resolver.test.ts
│   │   └── validation-fixes.test.ts
│   └── utils
│       ├── assertions.ts
│       ├── builders
│       │   └── workflow.builder.ts
│       ├── data-generators.ts
│       ├── database-utils.ts
│       ├── README.md
│       └── test-helpers.ts
├── thumbnail.png
├── tsconfig.build.json
├── tsconfig.json
├── types
│   ├── mcp.d.ts
│   └── test-env.d.ts
├── verify-telemetry-fix.js
├── versioned-nodes.md
├── vitest.config.benchmark.ts
├── vitest.config.integration.ts
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/src/scripts/debug-http-search.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env npx tsx

import { createDatabaseAdapter } from '../database/database-adapter';
import { NodeRepository } from '../database/node-repository';
import { NodeSimilarityService } from '../services/node-similarity-service';
import path from 'path';

async function debugHttpSearch() {
  const dbPath = path.join(process.cwd(), 'data/nodes.db');
  const db = await createDatabaseAdapter(dbPath);
  const repository = new NodeRepository(db);
  const service = new NodeSimilarityService(repository);

  console.log('Testing "http" search...\n');

  // Check if httpRequest exists
  const httpNode = repository.getNode('nodes-base.httpRequest');
  console.log('HTTP Request node exists:', httpNode ? 'Yes' : 'No');
  if (httpNode) {
    console.log('  Display name:', httpNode.displayName);
  }

  // Test the search with internal details
  const suggestions = await service.findSimilarNodes('http', 5);
  console.log('\nSuggestions for "http":', suggestions.length);
  suggestions.forEach(s => {
    console.log(`  - ${s.nodeType} (${Math.round(s.confidence * 100)}%)`);
  });

  // Manually calculate score for httpRequest
  console.log('\nManual score calculation for httpRequest:');
  const testNode = {
    nodeType: 'nodes-base.httpRequest',
    displayName: 'HTTP Request',
    category: 'Core Nodes'
  };

  const cleanInvalid = 'http';
  const cleanValid = 'nodesbasehttprequest';
  const displayNameClean = 'httprequest';

  // Check substring
  const hasSubstring = cleanValid.includes(cleanInvalid) || displayNameClean.includes(cleanInvalid);
  console.log(`  Substring match: ${hasSubstring}`);

  // This should give us pattern match score
  const patternScore = hasSubstring ? 35 : 0; // Using 35 for short searches
  console.log(`  Pattern score: ${patternScore}`);

  // Name similarity would be low
  console.log(`  Total score would need to be >= 50 to appear`);

  // Get all nodes and check which ones contain 'http'
  const allNodes = repository.getAllNodes();
  const httpNodes = allNodes.filter(n =>
    n.nodeType.toLowerCase().includes('http') ||
    (n.displayName && n.displayName.toLowerCase().includes('http'))
  );

  console.log('\n\nNodes containing "http" in name:');
  httpNodes.slice(0, 5).forEach(n => {
    console.log(`  - ${n.nodeType} (${n.displayName})`);

    // Calculate score for this node
    const normalizedSearch = 'http';
    const normalizedType = n.nodeType.toLowerCase().replace(/[^a-z0-9]/g, '');
    const normalizedDisplay = (n.displayName || '').toLowerCase().replace(/[^a-z0-9]/g, '');

    const containsInType = normalizedType.includes(normalizedSearch);
    const containsInDisplay = normalizedDisplay.includes(normalizedSearch);

    console.log(`    Type check: "${normalizedType}" includes "${normalizedSearch}" = ${containsInType}`);
    console.log(`    Display check: "${normalizedDisplay}" includes "${normalizedSearch}" = ${containsInDisplay}`);
  });
}

debugHttpSearch().catch(console.error);
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-delete-execution.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const n8nDeleteExecutionDoc: ToolDocumentation = {
  name: 'n8n_delete_execution',
  category: 'workflow_management',
  essentials: {
    description: 'Delete an execution record. This only removes the execution history, not any data processed.',
    keyParameters: ['id'],
    example: 'n8n_delete_execution({id: "12345"})',
    performance: 'Immediate deletion, no undo available',
    tips: [
      'Deletion is permanent - execution cannot be recovered',
      'Only removes execution history, not external data changes',
      'Use for cleanup of test executions or sensitive data'
    ]
  },
  full: {
    description: `Permanently deletes a workflow execution record from n8n's history. This removes the execution metadata, logs, and any stored input/output data. However, it does NOT undo any actions the workflow performed (API calls, database changes, file operations, etc.). Use this for cleaning up test executions, removing sensitive data, or managing storage.`,
    parameters: {
      id: {
        type: 'string',
        required: true,
        description: 'The execution ID to delete. This action cannot be undone'
      }
    },
    returns: `Confirmation of deletion or error if execution not found. No data is returned about the deleted execution.`,
    examples: [
      'n8n_delete_execution({id: "12345"}) - Delete a specific execution',
      'n8n_delete_execution({id: "test-run-567"}) - Clean up test execution',
      'n8n_delete_execution({id: "sensitive-data-890"}) - Remove execution with sensitive data',
      'n8n_delete_execution({id: "failed-execution-123"}) - Delete failed execution after debugging'
    ],
    useCases: [
      'Clean up test or development execution history',
      'Remove executions containing sensitive or personal data',
      'Manage storage by deleting old execution records',
      'Clean up after debugging failed workflows',
      'Comply with data retention policies'
    ],
    performance: `Deletion is immediate and permanent. The operation is fast (< 100ms) as it only removes database records. No external systems or data are affected.`,
    bestPractices: [
      'Verify execution ID before deletion - action cannot be undone',
      'Consider exporting execution data before deletion if needed',
      'Use list_executions to find executions to delete',
      'Document why executions were deleted for audit trails',
      'Remember deletion only affects n8n records, not external changes'
    ],
    pitfalls: [
      'Deletion is PERMANENT - no undo or recovery possible',
      'Does NOT reverse workflow actions (API calls, DB changes, etc.)',
      'Deleting executions breaks audit trails and debugging history',
      'Cannot delete currently running executions (waiting status)',
      'Bulk deletion not supported - must delete one at a time'
    ],
    relatedTools: ['n8n_list_executions', 'n8n_get_execution', 'n8n_trigger_webhook_workflow']
  }
};
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/discovery/list-ai-tools.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const listAiToolsDoc: ToolDocumentation = {
  name: 'list_ai_tools',
  category: 'discovery',
  essentials: {
    description: 'DEPRECATED: Basic list of 263 AI nodes. For comprehensive AI Agent guidance, use tools_documentation({topic: "ai_agents_guide"}). That guide covers architecture, connections, tools, validation, and best practices. Use search_nodes({query: "AI", includeExamples: true}) for AI nodes with working examples.',
    keyParameters: [],
    example: 'tools_documentation({topic: "ai_agents_guide"}) // Recommended alternative',
    performance: 'Instant (cached)',
    tips: [
      'NEW: Use ai_agents_guide for comprehensive AI workflow documentation',
      'Use search_nodes({includeExamples: true}) for AI nodes with real-world examples',
      'ANY node can be an AI tool - not limited to AI-specific nodes',
      'Use get_node_as_tool_info for guidance on any node'
    ]
  },
  full: {
    description: '**DEPRECATED in favor of ai_agents_guide**. Lists 263 nodes with built-in AI capabilities. For comprehensive documentation on building AI Agent workflows, use tools_documentation({topic: "ai_agents_guide"}) which covers architecture, the 8 AI connection types, validation, and best practices with real examples. IMPORTANT: This basic list is NOT a complete guide - use the full AI Agents guide instead.',
    parameters: {},
    returns: 'Array of 263 AI-optimized nodes. RECOMMENDED: Use ai_agents_guide for comprehensive guidance, or search_nodes({query: "AI", includeExamples: true}) for AI nodes with working configuration examples.',
    examples: [
      '// RECOMMENDED: Use the comprehensive AI Agents guide',
      'tools_documentation({topic: "ai_agents_guide"})',
      '',
      '// Or search for AI nodes with real-world examples',
      'search_nodes({query: "AI Agent", includeExamples: true})',
      '',
      '// Basic list (deprecated)',
      'list_ai_tools() - Returns 263 AI-optimized nodes'
    ],
    useCases: [
      'Discover AI model integrations (OpenAI, Anthropic, Google AI)',
      'Find vector databases for RAG applications',
      'Locate embedding generators and processors',
      'Build AI agent tool chains with ANY n8n node'
    ],
    performance: 'Instant - results are pre-cached in memory',
    bestPractices: [
      'Remember: ANY node works as an AI tool when connected to AI Agent',
      'Common non-AI nodes used as tools: Slack (messaging), Google Sheets (data), HTTP Request (APIs), Code (custom logic)',
      'For community nodes: set N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true'
    ],
    pitfalls: [
      'This list is NOT exhaustive - it only shows nodes with AI-specific features',
      'Don\'t limit yourself to this list when building AI workflows',
      'Community nodes require environment variable to work as tools'
    ],
    relatedTools: ['get_node_as_tool_info for any node usage', 'search_nodes to find specific nodes', 'get_node_essentials to configure nodes']
  }
};
```

--------------------------------------------------------------------------------
/scripts/test-sqljs-triggers.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node
/**
 * Test script to verify trigger detection works with sql.js adapter
 */

import { createDatabaseAdapter } from '../src/database/database-adapter';
import { NodeRepository } from '../src/database/node-repository';
import { logger } from '../src/utils/logger';
import path from 'path';

async function testSqlJsTriggers() {
  logger.info('🧪 Testing trigger detection with sql.js adapter...\n');
  
  try {
    // Force sql.js by temporarily renaming better-sqlite3
    const originalRequire = require.cache[require.resolve('better-sqlite3')];
    if (originalRequire) {
      delete require.cache[require.resolve('better-sqlite3')];
    }
    
    // Mock better-sqlite3 to force sql.js usage
    const Module = require('module');
    const originalResolveFilename = Module._resolveFilename;
    Module._resolveFilename = function(request: string, parent: any, isMain: boolean) {
      if (request === 'better-sqlite3') {
        throw new Error('Forcing sql.js adapter for testing');
      }
      return originalResolveFilename.apply(this, arguments);
    };
    
    // Now create adapter - should use sql.js
    const dbPath = path.join(process.cwd(), 'data', 'nodes.db');
    logger.info(`📁 Database path: ${dbPath}`);
    
    const adapter = await createDatabaseAdapter(dbPath);
    logger.info('✅ Adapter created (should be sql.js)\n');
    
    // Test direct query
    logger.info('📊 Testing direct database query:');
    const triggerNodes = ['nodes-base.webhook', 'nodes-base.cron', 'nodes-base.interval', 'nodes-base.emailReadImap'];
    
    for (const nodeType of triggerNodes) {
      const row = adapter.prepare('SELECT * FROM nodes WHERE node_type = ?').get(nodeType);
      if (row) {
        logger.info(`${nodeType}:`);
        logger.info(`  is_trigger raw value: ${row.is_trigger} (type: ${typeof row.is_trigger})`);
        logger.info(`  !!is_trigger: ${!!row.is_trigger}`);
        logger.info(`  Number(is_trigger) === 1: ${Number(row.is_trigger) === 1}`);
      }
    }
    
    // Test through repository
    logger.info('\n📦 Testing through NodeRepository:');
    const repository = new NodeRepository(adapter);
    
    for (const nodeType of triggerNodes) {
      const node = repository.getNode(nodeType);
      if (node) {
        logger.info(`${nodeType}: isTrigger = ${node.isTrigger}`);
      }
    }
    
    // Test list query
    logger.info('\n📋 Testing list query:');
    const allTriggers = adapter.prepare(
      'SELECT node_type, is_trigger FROM nodes WHERE node_type IN (?, ?, ?, ?)'
    ).all(...triggerNodes);
    
    for (const node of allTriggers) {
      logger.info(`${node.node_type}: is_trigger = ${node.is_trigger} (type: ${typeof node.is_trigger})`);
    }
    
    adapter.close();
    logger.info('\n✅ Test complete!');
    
    // Restore original require
    Module._resolveFilename = originalResolveFilename;
    
  } catch (error) {
    logger.error('Test failed:', error);
    process.exit(1);
  }
}

// Run test
testSqlJsTriggers().catch(console.error);
```

--------------------------------------------------------------------------------
/scripts/extract-nodes-simple.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
set -e

echo "🐳 Simple n8n Node Extraction via Docker"
echo "======================================="

# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color

# Function to print colored output
print_status() {
    echo -e "${GREEN}[$(date +'%H:%M:%S')]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[$(date +'%H:%M:%S')]${NC} ⚠️  $1"
}

print_error() {
    echo -e "${RED}[$(date +'%H:%M:%S')]${NC} ❌ $1"
}

# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
    print_error "Docker is not running. Please start Docker and try again."
    exit 1
fi

print_status "Docker is running ✅"

# Build the project first
print_status "Building the project..."
npm run build

# Create a temporary directory for extraction
TEMP_DIR=$(mktemp -d)
print_status "Created temporary directory: $TEMP_DIR"

# Run Docker container to copy node files
print_status "Running n8n container to extract nodes..."
docker run --rm -d --name n8n-temp n8nio/n8n:latest sleep 300

# Wait a bit for container to start
sleep 5

# Copy n8n modules from container
print_status "Copying n8n modules from container..."
docker cp n8n-temp:/usr/local/lib/node_modules/n8n/node_modules "$TEMP_DIR/node_modules" || {
    print_error "Failed to copy node_modules"
    docker stop n8n-temp
    rm -rf "$TEMP_DIR"
    exit 1
}

# Stop the container
docker stop n8n-temp

# Run our extraction script locally
print_status "Running extraction script..."
NODE_ENV=development \
NODE_DB_PATH=./data/nodes-fresh.db \
N8N_MODULES_PATH="$TEMP_DIR/node_modules" \
node scripts/extract-from-docker.js

# Clean up
print_status "Cleaning up temporary files..."
rm -rf "$TEMP_DIR"

# Check the results
print_status "Checking extraction results..."
if [ -f "./data/nodes-fresh.db" ]; then
    NODE_COUNT=$(sqlite3 ./data/nodes-fresh.db "SELECT COUNT(*) FROM nodes;" 2>/dev/null || echo "0")
    print_status "Extracted $NODE_COUNT nodes"
    
    # Check if we got the If node source code and look for version
    IF_SOURCE=$(sqlite3 ./data/nodes-fresh.db "SELECT source_code FROM nodes WHERE node_type='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "")
    if [[ $IF_SOURCE =~ version:[[:space:]]*([0-9]+) ]]; then
        IF_CODE_VERSION="${BASH_REMATCH[1]}"
        print_status "If node version from source code: v$IF_CODE_VERSION"
        
        if [ "$IF_CODE_VERSION" -ge "2" ]; then
            print_status "✅ Successfully extracted latest If node (v$IF_CODE_VERSION)!"
        else
            print_warning "If node is still v$IF_CODE_VERSION, expected v2 or higher"
        fi
    fi
else
    print_error "Database file not found after extraction"
fi

print_status "✨ Extraction complete!"

# Offer to restart the MCP server
echo ""
read -p "Would you like to restart the MCP server with the new nodes? (y/n) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
    print_status "Restarting MCP server..."
    # Kill any existing server process
    pkill -f "node.*dist/index.js" || true
    
    # Start the server
    npm start &
    print_status "MCP server restarted with fresh node database"
fi
```

--------------------------------------------------------------------------------
/tests/unit/http-server-n8n-reinit.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { SingleSessionHTTPServer } from '../../src/http-server-single-session';
import express from 'express';

describe('HTTP Server n8n Re-initialization', () => {
  let server: SingleSessionHTTPServer;
  let app: express.Application;

  beforeEach(() => {
    // Set required environment variables for testing
    process.env.AUTH_TOKEN = 'test-token-32-chars-minimum-length-for-security';
    process.env.NODE_DB_PATH = ':memory:';
  });

  afterEach(async () => {
    if (server) {
      await server.shutdown();
    }
    // Clean up environment
    delete process.env.AUTH_TOKEN;
    delete process.env.NODE_DB_PATH;
  });

  it('should handle re-initialization requests gracefully', async () => {
    // Create mock request and response
    const mockReq = {
      method: 'POST',
      url: '/mcp',
      headers: {},
      body: {
        jsonrpc: '2.0',
        id: 1,
        method: 'initialize',
        params: {
          protocolVersion: '2024-11-05',
          capabilities: { tools: {} },
          clientInfo: { name: 'n8n', version: '1.0.0' }
        }
      },
      get: (header: string) => {
        if (header === 'user-agent') return 'test-agent';
        if (header === 'content-length') return '100';
        if (header === 'content-type') return 'application/json';
        return undefined;
      },
      ip: '127.0.0.1'
    } as any;

    const mockRes = {
      headersSent: false,
      statusCode: 200,
      finished: false,
      status: (code: number) => mockRes,
      json: (data: any) => mockRes,
      setHeader: (name: string, value: string) => mockRes,
      end: () => mockRes
    } as any;

    try {
      server = new SingleSessionHTTPServer();
      
      // First request should work
      await server.handleRequest(mockReq, mockRes);
      expect(mockRes.statusCode).toBe(200);
      
      // Second request (re-initialization) should also work
      mockReq.body.id = 2;
      await server.handleRequest(mockReq, mockRes);
      expect(mockRes.statusCode).toBe(200);
      
    } catch (error) {
      // This test mainly ensures the logic doesn't throw errors
      // The actual MCP communication would need a more complex setup
      console.log('Expected error in unit test environment:', error);
      expect(error).toBeDefined(); // We expect some error due to simplified mock setup
    }
  });

  it('should identify initialize requests correctly', () => {
    const initializeRequest = {
      jsonrpc: '2.0',
      id: 1,
      method: 'initialize',
      params: {}
    };

    const nonInitializeRequest = {
      jsonrpc: '2.0',
      id: 1,
      method: 'tools/list'
    };

    // Test the logic we added for detecting initialize requests
    const isInitReq1 = initializeRequest && 
      initializeRequest.method === 'initialize' && 
      initializeRequest.jsonrpc === '2.0';
    
    const isInitReq2 = nonInitializeRequest && 
      nonInitializeRequest.method === 'initialize' && 
      nonInitializeRequest.jsonrpc === '2.0';

    expect(isInitReq1).toBe(true);
    expect(isInitReq2).toBe(false);
  });
});
```

--------------------------------------------------------------------------------
/scripts/test-nodes-base-prefix.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env tsx

/**
 * Specific test for nodes-base. prefix validation
 */

import { WorkflowValidator } from '../src/services/workflow-validator';
import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator';
import { NodeRepository } from '../src/database/node-repository';
import { createDatabaseAdapter } from '../src/database/database-adapter';
import { Logger } from '../src/utils/logger';

const logger = new Logger({ prefix: '[TestNodesBasePrefix]' });

async function testValidation() {
  const adapter = await createDatabaseAdapter('./data/nodes.db');
  const repository = new NodeRepository(adapter);
  const validator = new WorkflowValidator(repository, EnhancedConfigValidator);

  logger.info('Testing nodes-base. prefix validation...\n');

  // Test various nodes-base. prefixed types
  const testCases = [
    { type: 'nodes-base.webhook', expected: 'n8n-nodes-base.webhook' },
    { type: 'nodes-base.httpRequest', expected: 'n8n-nodes-base.httpRequest' },
    { type: 'nodes-base.set', expected: 'n8n-nodes-base.set' },
    { type: 'nodes-base.code', expected: 'n8n-nodes-base.code' },
    { type: 'nodes-base.slack', expected: 'n8n-nodes-base.slack' },
  ];

  for (const testCase of testCases) {
    const workflow = {
      name: `Test ${testCase.type}`,
      nodes: [{
        id: 'test-node',
        name: 'Test Node',
        type: testCase.type,
        typeVersion: 1,
        position: [100, 100] as [number, number],
        parameters: {}
      }],
      connections: {}
    };

    logger.info(`Testing: "${testCase.type}"`);
    const result = await validator.validateWorkflow(workflow as any);
    
    const nodeTypeError = result.errors.find(err => 
      err && typeof err === 'object' && 'message' in err && 
      err.message.includes(testCase.type) && 
      err.message.includes(testCase.expected)
    );

    if (nodeTypeError) {
      logger.info(`✅ Caught and suggested: "${testCase.expected}"`);
    } else {
      logger.error(`❌ Failed to catch invalid type: "${testCase.type}"`);
      result.errors.forEach(err => {
        if (err && typeof err === 'object' && 'message' in err) {
          logger.error(`   Error: ${err.message}`);
        }
      });
    }
  }

  // Test that n8n-nodes-base. prefix still works
  const validWorkflow = {
    name: 'Valid Workflow',
    nodes: [{
      id: 'webhook',
      name: 'Webhook',
      type: 'n8n-nodes-base.webhook',
      typeVersion: 2,
      position: [100, 100] as [number, number],
      parameters: {}
    }],
    connections: {}
  };

  logger.info('\nTesting valid n8n-nodes-base.webhook:');
  const validResult = await validator.validateWorkflow(validWorkflow as any);
  
  const hasNodeTypeError = validResult.errors.some(err => 
    err && typeof err === 'object' && 'message' in err && 
    err.message.includes('node type')
  );

  if (!hasNodeTypeError) {
    logger.info('✅ Correctly accepted n8n-nodes-base.webhook');
  } else {
    logger.error('❌ Incorrectly rejected valid n8n-nodes-base.webhook');
  }

  adapter.close();
}

testValidation().catch(err => {
  logger.error('Test failed:', err);
  process.exit(1);
});
```

--------------------------------------------------------------------------------
/docs/CODECOV_SETUP.md:
--------------------------------------------------------------------------------

```markdown
# Codecov Setup Guide

This guide explains how to set up and configure Codecov for the n8n-MCP project.

## Prerequisites

1. A Codecov account (sign up at https://codecov.io)
2. Repository admin access to add the CODECOV_TOKEN secret

## Setup Steps

### 1. Get Your Codecov Token

1. Sign in to [Codecov](https://codecov.io)
2. Add your repository: `czlonkowski/n8n-mcp`
3. Copy the upload token from the repository settings

### 2. Add Token to GitHub Secrets

1. Go to your GitHub repository settings
2. Navigate to `Settings` → `Secrets and variables` → `Actions`
3. Click "New repository secret"
4. Name: `CODECOV_TOKEN`
5. Value: Paste your Codecov token
6. Click "Add secret"

### 3. Update the Badge Token

Edit the README.md file and replace `YOUR_TOKEN` in the Codecov badge with your actual token:

```markdown
[![codecov](https://codecov.io/gh/czlonkowski/n8n-mcp/graph/badge.svg?token=YOUR_ACTUAL_TOKEN)](https://codecov.io/gh/czlonkowski/n8n-mcp)
```

Note: The token in the badge URL is a read-only token and safe to commit.

## Configuration Details

### codecov.yml

The configuration file sets:
- **Target coverage**: 80% for both project and patch
- **Coverage precision**: 2 decimal places
- **Comment behavior**: Comments on all PRs with coverage changes
- **Ignored files**: Test files, scripts, node_modules, and build outputs

### GitHub Actions

The workflow:
1. Runs tests with coverage using `npm run test:coverage`
2. Generates LCOV format coverage report
3. Uploads to Codecov using the official action
4. Fails the build if upload fails

### Vitest Configuration

Coverage settings in `vitest.config.ts`:
- **Provider**: V8 (fast and accurate)
- **Reporters**: text, json, html, and lcov
- **Thresholds**: 80% lines, 80% functions, 75% branches, 80% statements

## Viewing Coverage

### Local Coverage

```bash
# Generate coverage report
npm run test:coverage

# View HTML report
open coverage/index.html
```

### Online Coverage

1. Visit https://codecov.io/gh/czlonkowski/n8n-mcp
2. View detailed reports, graphs, and file-by-file coverage
3. Check PR comments for coverage changes

## Troubleshooting

### Coverage Not Uploading

1. Verify CODECOV_TOKEN is set in GitHub secrets
2. Check GitHub Actions logs for errors
3. Ensure coverage/lcov.info is generated

### Badge Not Showing

1. Wait a few minutes after first upload
2. Verify the token in the badge URL is correct
3. Check if the repository is public/private settings match

### Low Coverage Areas

Current areas with lower coverage that could be improved:
- HTTP server implementations
- MCP index files
- Some edge cases in validators

## Best Practices

1. **Write tests first**: Aim for TDD when adding features
2. **Focus on critical paths**: Prioritize testing core functionality
3. **Mock external dependencies**: Use MSW for HTTP, mock for databases
4. **Keep coverage realistic**: 80% is good, 100% isn't always practical
5. **Monitor trends**: Watch coverage over time, not just absolute numbers

## Resources

- [Codecov Documentation](https://docs.codecov.io/)
- [Vitest Coverage](https://vitest.dev/guide/coverage.html)
- [GitHub Actions + Codecov](https://github.com/codecov/codecov-action)
```

--------------------------------------------------------------------------------
/src/mcp/stdio-wrapper.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

/**
 * Stdio wrapper for MCP server
 * Ensures clean JSON-RPC communication by suppressing all non-JSON output
 */

// CRITICAL: Set environment BEFORE any imports to prevent any initialization logs
process.env.MCP_MODE = 'stdio';
process.env.DISABLE_CONSOLE_OUTPUT = 'true';
process.env.LOG_LEVEL = 'error';

// Suppress all console output before anything else
const originalConsoleLog = console.log;
const originalConsoleError = console.error;
const originalConsoleWarn = console.warn;
const originalConsoleInfo = console.info;
const originalConsoleDebug = console.debug;
const originalConsoleTrace = console.trace;
const originalConsoleDir = console.dir;
const originalConsoleTime = console.time;
const originalConsoleTimeEnd = console.timeEnd;

// Override ALL console methods to prevent any output
console.log = () => {};
console.error = () => {};
console.warn = () => {};
console.info = () => {};
console.debug = () => {};
console.trace = () => {};
console.dir = () => {};
console.time = () => {};
console.timeEnd = () => {};
console.timeLog = () => {};
console.group = () => {};
console.groupEnd = () => {};
console.table = () => {};
console.clear = () => {};
console.count = () => {};
console.countReset = () => {};

// Import and run the server AFTER suppressing output
import { N8NDocumentationMCPServer } from './server';

let server: N8NDocumentationMCPServer | null = null;

async function main() {
  try {
    server = new N8NDocumentationMCPServer();
    await server.run();
  } catch (error) {
    // In case of fatal error, output to stderr only
    originalConsoleError('Fatal error:', error);
    process.exit(1);
  }
}

// Handle uncaught errors silently
process.on('uncaughtException', (error) => {
  originalConsoleError('Uncaught exception:', error);
  process.exit(1);
});

process.on('unhandledRejection', (reason) => {
  originalConsoleError('Unhandled rejection:', reason);
  process.exit(1);
});

// Handle termination signals for proper cleanup
let isShuttingDown = false;

async function shutdown(signal: string) {
  if (isShuttingDown) return;
  isShuttingDown = true;
  
  // Log to stderr only (not stdout which would corrupt JSON-RPC)
  originalConsoleError(`Received ${signal}, shutting down gracefully...`);
  
  try {
    // Shutdown the server if it exists
    if (server) {
      await server.shutdown();
    }
  } catch (error) {
    originalConsoleError('Error during shutdown:', error);
  }
  
  // Close stdin to signal we're done reading
  process.stdin.pause();
  process.stdin.destroy();
  
  // Exit with timeout to ensure we don't hang
  setTimeout(() => {
    process.exit(0);
  }, 500).unref(); // unref() allows process to exit if this is the only thing keeping it alive
  
  // But also exit immediately if nothing else is pending
  process.exit(0);
}

// Register signal handlers
process.on('SIGTERM', () => void shutdown('SIGTERM'));
process.on('SIGINT', () => void shutdown('SIGINT'));
process.on('SIGHUP', () => void shutdown('SIGHUP'));

// Also handle stdin close (when Claude Desktop closes the pipe)
process.stdin.on('end', () => {
  originalConsoleError('stdin closed, shutting down...');
  void shutdown('STDIN_CLOSE');
});

main();
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/discovery/list-nodes.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const listNodesDoc: ToolDocumentation = {
  name: 'list_nodes',
  category: 'discovery',
  essentials: {
    description: 'Lists n8n nodes with filtering options. Returns up to 525 total nodes. Default limit is 50, use limit:200 to get all nodes. Filter by category to find specific node types like triggers (104 nodes) or AI nodes (263 nodes).',
    keyParameters: ['category', 'package', 'limit', 'isAITool'],
    example: 'list_nodes({limit:200})',
    performance: '<10ms for any query size',
    tips: [
      'Use limit:200 to get all 525 nodes',
      'Categories: trigger (104), transform (250+), output/input (50+)',
      'Use search_nodes for keyword search'
    ]
  },
  full: {
    description: 'Lists n8n nodes with comprehensive filtering options. Returns an array of node metadata including type, name, description, and category. Database contains 525 total nodes: 456 from n8n-nodes-base package and 69 from @n8n/n8n-nodes-langchain package.',
    parameters: {
      category: { type: 'string', description: 'Filter by category: "trigger" (104 nodes), "transform" (250+ nodes), "output", "input", or "AI"', required: false },
      package: { type: 'string', description: 'Filter by package: "n8n-nodes-base" (456 core nodes) or "@n8n/n8n-nodes-langchain" (69 AI nodes)', required: false },
      limit: { type: 'number', description: 'Maximum results to return. Default: 50. Use 200+ to get all 525 nodes', required: false },
      isAITool: { type: 'boolean', description: 'Filter to show only AI-capable nodes (263 nodes)', required: false },
      developmentStyle: { type: 'string', description: 'Filter by style: "programmatic" or "declarative". Most nodes are programmatic', required: false }
    },
    returns: 'Array of node objects, each containing: nodeType (e.g., "nodes-base.webhook"), displayName (e.g., "Webhook"), description, category, package, isAITool flag',
    examples: [
      'list_nodes({limit:200}) - Returns all 525 nodes',
      'list_nodes({category:"trigger"}) - Returns 104 trigger nodes (Webhook, Schedule, Email Trigger, etc.)',
      'list_nodes({package:"@n8n/n8n-nodes-langchain"}) - Returns 69 AI/LangChain nodes',
      'list_nodes({isAITool:true}) - Returns 263 AI-capable nodes',
      'list_nodes({category:"trigger", isAITool:true}) - Combines filters for AI-capable triggers'
    ],
    useCases: [
      'Browse all available nodes when building workflows',
      'Find all trigger nodes to start workflows',
      'Discover AI/ML nodes for intelligent automation',
      'Check available nodes in specific packages'
    ],
    performance: '<10ms for any query size. Results are cached in memory',
    bestPractices: [
      'Use limit:200 when you need the complete node inventory',
      'Filter by category for focused discovery',
      'Combine with get_node_essentials to configure selected nodes'
    ],
    pitfalls: [
      'No text search capability - use search_nodes for keyword search',
      'developmentStyle filter rarely useful - most nodes are "programmatic"'
    ],
    relatedTools: ['search_nodes for keyword search', 'list_ai_tools for AI-specific discovery', 'get_node_essentials to configure nodes']
  }
};
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
# syntax=docker/dockerfile:1.7
# Ultra-optimized Dockerfile - minimal runtime dependencies (no n8n packages)

# Stage 1: Builder (TypeScript compilation only)
FROM node:22-alpine AS builder
WORKDIR /app

# Copy tsconfig files for TypeScript compilation
COPY tsconfig*.json ./

# Create minimal package.json and install ONLY build dependencies
# Note: openai and zod are needed for TypeScript compilation of template metadata modules
RUN --mount=type=cache,target=/root/.npm \
    echo '{}' > package.json && \
    npm install --no-save typescript@^5.8.3 @types/node@^22.15.30 @types/express@^5.0.3 \
        @modelcontextprotocol/sdk@^1.12.1 dotenv@^16.5.0 express@^5.1.0 axios@^1.10.0 \
        n8n-workflow@^1.96.0 uuid@^11.0.5 @types/uuid@^10.0.0 \
        openai@^4.77.0 zod@^3.24.1 lru-cache@^11.2.1 @supabase/supabase-js@^2.57.4

# Copy source and build
COPY src ./src
# Note: src/n8n contains TypeScript types needed for compilation
# These will be compiled but not included in runtime
RUN npx tsc -p tsconfig.build.json

# Stage 2: Runtime (minimal dependencies)
FROM node:22-alpine AS runtime
WORKDIR /app

# Install only essential runtime tools
RUN apk add --no-cache curl su-exec && \
    rm -rf /var/cache/apk/*

# Copy runtime-only package.json
COPY package.runtime.json package.json

# Install runtime dependencies with cache mount
RUN --mount=type=cache,target=/root/.npm \
    npm install --production --no-audit --no-fund

# Copy built application
COPY --from=builder /app/dist ./dist

# Copy pre-built database and required files
# Cache bust: 2025-07-06-trigger-fix-v3 - includes is_trigger=true for webhook,cron,interval,emailReadImap
COPY data/nodes.db ./data/
COPY src/database/schema-optimized.sql ./src/database/
COPY .env.example ./

# Copy entrypoint script, config parser, and n8n-mcp command
COPY docker/docker-entrypoint.sh /usr/local/bin/
COPY docker/parse-config.js /app/docker/
COPY docker/n8n-mcp /usr/local/bin/
RUN chmod +x /usr/local/bin/docker-entrypoint.sh /usr/local/bin/n8n-mcp

# Add container labels
LABEL org.opencontainers.image.source="https://github.com/czlonkowski/n8n-mcp"
LABEL org.opencontainers.image.description="n8n MCP Server - Runtime Only"
LABEL org.opencontainers.image.licenses="MIT"
LABEL org.opencontainers.image.title="n8n-mcp"

# Create non-root user with unpredictable UID/GID
# Using a hash of the build time to generate unpredictable IDs
RUN BUILD_HASH=$(date +%s | sha256sum | head -c 8) && \
    UID=$((10000 + 0x${BUILD_HASH} % 50000)) && \
    GID=$((10000 + 0x${BUILD_HASH} % 50000)) && \
    addgroup -g ${GID} -S nodejs && \
    adduser -S nodejs -u ${UID} -G nodejs && \
    chown -R nodejs:nodejs /app

# Switch to non-root user
USER nodejs

# Set Docker environment flag
ENV IS_DOCKER=true

# Telemetry: Anonymous usage statistics are ENABLED by default
# To opt-out, uncomment the following line:
# ENV N8N_MCP_TELEMETRY_DISABLED=true

# Expose HTTP port
EXPOSE 3000

# Set stop signal to SIGTERM (default, but explicit is better)
STOPSIGNAL SIGTERM

# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
  CMD curl -f http://127.0.0.1:3000/health || exit 1

# Optimized entrypoint
ENTRYPOINT ["/usr/local/bin/docker-entrypoint.sh"]
CMD ["node", "dist/mcp/index.js"]

```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/configuration/get-node-as-tool-info.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const getNodeAsToolInfoDoc: ToolDocumentation = {
  name: 'get_node_as_tool_info',
  category: 'configuration',
  essentials: {
    description: 'Explains how to use ANY node as an AI tool with requirements and examples.',
    keyParameters: ['nodeType'],
    example: 'get_node_as_tool_info({nodeType: "nodes-base.slack"})',
    performance: 'Fast - returns guidance and examples',
    tips: [
      'ANY node can be used as AI tool, not just AI-marked ones',
      'Community nodes need N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true',
      'Provides specific use cases and connection requirements'
    ]
  },
  full: {
    description: `Shows how to use any n8n node as an AI tool in AI Agent workflows. In n8n, ANY node can be connected to an AI Agent's tool port, allowing the AI to use that node's functionality. This tool provides specific guidance, requirements, and examples for using a node as an AI tool.`,
    parameters: {
      nodeType: {
        type: 'string',
        required: true,
        description: 'Full node type WITH prefix: "nodes-base.slack", "nodes-base.googleSheets", etc.',
        examples: [
          'nodes-base.slack',
          'nodes-base.httpRequest',
          'nodes-base.googleSheets',
          'nodes-langchain.documentLoader'
        ]
      }
    },
    returns: `Object containing:
- nodeType: The node's full type identifier
- displayName: Human-readable name
- isMarkedAsAITool: Whether node has usableAsTool property
- aiToolCapabilities: Detailed AI tool usage information including:
  - canBeUsedAsTool: Always true in n8n
  - requiresEnvironmentVariable: For community nodes
  - commonUseCases: Specific AI tool use cases
  - requirements: Connection and environment setup
  - examples: Code examples for common scenarios
  - tips: Best practices for AI tool usage`,
    examples: [
      'get_node_as_tool_info({nodeType: "nodes-base.slack"}) - Get AI tool guidance for Slack',
      'get_node_as_tool_info({nodeType: "nodes-base.httpRequest"}) - Learn to use HTTP Request as AI tool',
      'get_node_as_tool_info({nodeType: "nodes-base.postgres"}) - Database queries as AI tools'
    ],
    useCases: [
      'Understanding how to connect any node to AI Agent',
      'Learning environment requirements for community nodes',
      'Getting specific use case examples for AI tool usage',
      'Checking if a node is optimized for AI usage',
      'Understanding credential requirements for AI tools'
    ],
    performance: 'Very fast - returns pre-computed guidance and examples',
    bestPractices: [
      'Use this before configuring nodes as AI tools',
      'Check environment requirements for community nodes',
      'Review common use cases to understand best applications',
      'Test nodes independently before connecting to AI Agent',
      'Give tools descriptive names in AI Agent configuration'
    ],
    pitfalls: [
      'Community nodes require environment variable to be used as tools',
      'Not all nodes make sense as AI tools (e.g., triggers)',
      'Some nodes require specific credentials configuration',
      'Tool descriptions in AI Agent must be clear and detailed'
    ],
    relatedTools: ['list_ai_tools', 'get_node_essentials', 'validate_node_operation']
  }
};
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/system/n8n-list-available-tools.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const n8nListAvailableToolsDoc: ToolDocumentation = {
  name: 'n8n_list_available_tools',
  category: 'system',
  essentials: {
    description: 'List all available n8n management tools and their capabilities',
    keyParameters: [],
    example: 'n8n_list_available_tools({})',
    performance: 'Instant - returns static tool list',
    tips: [
      'Shows only tools available with current API configuration',
      'If no n8n tools appear, run n8n_diagnostic to troubleshoot',
      'Tool availability depends on N8N_API_URL and N8N_API_KEY being set'
    ]
  },
  full: {
    description: `Lists all available n8n management tools based on current configuration.

This tool provides:
- Complete list of n8n management tools (when API is configured)
- Tool descriptions and capabilities
- Categorized tool listing (workflow, execution, system)
- Dynamic availability based on API configuration

The tool list is dynamic:
- Shows 14+ management tools when N8N_API_URL and N8N_API_KEY are configured
- Shows only documentation tools when API is not configured
- Helps discover available functionality
- Provides quick reference for tool names and purposes`,
    parameters: {},
    returns: `Object containing:
- tools: Array of available tool objects, each with:
  - name: Tool identifier (e.g., 'n8n_create_workflow')
  - description: Brief description of tool functionality
  - category: Tool category ('workflow', 'execution', 'system')
  - requiresApi: Whether tool needs API configuration
- categories: Summary count by category
- totalTools: Total number of available tools
- apiConfigured: Whether n8n API is configured`,
    examples: [
      'n8n_list_available_tools({}) - List all available tools',
      '// Check for specific tool availability\nconst tools = await n8n_list_available_tools({});\nconst hasWorkflowTools = tools.tools.some(t => t.category === "workflow");',
      '// Discover management capabilities\nconst result = await n8n_list_available_tools({});\nconsole.log(`${result.totalTools} tools available`);'
    ],
    useCases: [
      'Discovering available n8n management capabilities',
      'Checking if API configuration is working correctly',
      'Finding the right tool for a specific task',
      'Generating help documentation or command lists',
      'Verifying tool availability before automation scripts'
    ],
    performance: `Instant response:
- No API calls required
- Returns pre-defined tool list
- Filtered based on configuration
- Zero network overhead`,
    bestPractices: [
      'Check tool availability before building automation workflows',
      'Use with n8n_diagnostic if expected tools are missing',
      'Reference tool names exactly as returned by this tool',
      'Group operations by category for better organization',
      'Cache results as tool list only changes with configuration'
    ],
    pitfalls: [
      'Tool list is empty if N8N_API_URL and N8N_API_KEY are not set',
      'Does not validate if tools will actually work - just shows availability',
      'Tool names must be used exactly as returned',
      'Does not show tool parameters - use tools_documentation for details'
    ],
    relatedTools: ['n8n_diagnostic', 'n8n_health_check', 'tools_documentation']
  }
};
```

--------------------------------------------------------------------------------
/scripts/test-webhook-validation.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env npx tsx

import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator.js';

console.log('🧪 Testing Webhook Data Access Validation\n');

const testCases = [
  {
    name: 'Direct webhook data access (incorrect)',
    config: {
      language: 'javaScript',
      jsCode: `// Processing data from Webhook node
const prevWebhook = $('Webhook').first();
const command = items[0].json.testCommand;
const data = items[0].json.payload;
return [{json: {command, data}}];`
    },
    expectWarning: true
  },
  {
    name: 'Correct webhook data access through body',
    config: {
      language: 'javaScript',
      jsCode: `// Processing data from Webhook node
const webhookData = items[0].json.body;
const command = webhookData.testCommand;
const data = webhookData.payload;
return [{json: {command, data}}];`
    },
    expectWarning: false
  },
  {
    name: 'Common webhook field names without body',
    config: {
      language: 'javaScript',
      jsCode: `// Processing webhook
const command = items[0].json.command;
const action = items[0].json.action;
const payload = items[0].json.payload;
return [{json: {command, action, payload}}];`
    },
    expectWarning: true
  },
  {
    name: 'Non-webhook data access (should not warn)',
    config: {
      language: 'javaScript',
      jsCode: `// Processing data from HTTP Request node
const data = items[0].json.results;
const status = items[0].json.status;
return [{json: {data, status}}];`
    },
    expectWarning: false
  },
  {
    name: 'Mixed correct and incorrect access',
    config: {
      language: 'javaScript',
      jsCode: `// Mixed access patterns
const webhookBody = items[0].json.body;  // Correct
const directAccess = items[0].json.command;  // Incorrect if webhook
return [{json: {webhookBody, directAccess}}];`
    },
    expectWarning: false  // If user already uses .body, we assume they know the pattern
  }
];

let passCount = 0;
let failCount = 0;

for (const test of testCases) {
  console.log(`Test: ${test.name}`);
  const result = EnhancedConfigValidator.validateWithMode(
    'nodes-base.code',
    test.config,
    [
      { name: 'language', type: 'options', options: ['javaScript', 'python'] },
      { name: 'jsCode', type: 'string' }
    ],
    'operation',
    'ai-friendly'
  );
  
  const hasWebhookWarning = result.warnings.some(w => 
    w.message.includes('Webhook data is nested under .body') ||
    w.message.includes('webhook data, remember it\'s nested under .body')
  );
  
  const passed = hasWebhookWarning === test.expectWarning;
  
  console.log(`  Expected warning: ${test.expectWarning}`);
  console.log(`  Has webhook warning: ${hasWebhookWarning}`);
  console.log(`  Result: ${passed ? '✅ PASS' : '❌ FAIL'}`);
  
  if (result.warnings.length > 0) {
    const relevantWarnings = result.warnings
      .filter(w => w.message.includes('webhook') || w.message.includes('Webhook'))
      .map(w => w.message);
    if (relevantWarnings.length > 0) {
      console.log(`  Webhook warnings: ${relevantWarnings.join(', ')}`);
    }
  }
  
  if (passed) passCount++;
  else failCount++;
  
  console.log();
}

console.log(`\n📊 Results: ${passCount} passed, ${failCount} failed`);
console.log(failCount === 0 ? '✅ All webhook validation tests passed!' : '❌ Some tests failed');
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/discovery/search-nodes.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const searchNodesDoc: ToolDocumentation = {
  name: 'search_nodes',
  category: 'discovery',
  essentials: {
    description: 'Text search across node names and descriptions. Returns most relevant nodes first, with frequently-used nodes (HTTP Request, Webhook, Set, Code, Slack) prioritized in results. Searches all 525 nodes in the database.',
    keyParameters: ['query', 'mode', 'limit'],
    example: 'search_nodes({query: "webhook"})',
    performance: '<20ms even for complex queries',
    tips: [
      'OR mode (default): Matches any search word',
      'AND mode: Requires all words present',
      'FUZZY mode: Handles typos and spelling errors',
      'Use quotes for exact phrases: "google sheets"'
    ]
  },
  full: {
    description: 'Full-text search engine for n8n nodes using SQLite FTS5. Searches across node names, descriptions, and aliases. Results are ranked by relevance with commonly-used nodes given priority. Common nodes include: HTTP Request, Webhook, Set, Code, IF, Switch, Merge, SplitInBatches, Slack, Google Sheets.',
    parameters: {
      query: { type: 'string', description: 'Search keywords. Use quotes for exact phrases like "google sheets"', required: true },
      limit: { type: 'number', description: 'Maximum results to return. Default: 20, Max: 100', required: false },
      mode: { type: 'string', description: 'Search mode: "OR" (any word matches, default), "AND" (all words required), "FUZZY" (typo-tolerant)', required: false }
    },
    returns: 'Array of node objects sorted by relevance score. Each object contains: nodeType, displayName, description, category, relevance score. Common nodes appear first when relevance is similar.',
    examples: [
      'search_nodes({query: "webhook"}) - Returns Webhook node as top result',
      'search_nodes({query: "database"}) - Returns MySQL, Postgres, MongoDB, Redis, etc.',
      'search_nodes({query: "google sheets", mode: "AND"}) - Requires both words',
      'search_nodes({query: "slak", mode: "FUZZY"}) - Finds Slack despite typo',
      'search_nodes({query: "http api"}) - Finds HTTP Request, GraphQL, REST nodes',
      'search_nodes({query: "transform data"}) - Finds Set, Code, Function, Item Lists nodes'
    ],
    useCases: [
      'Finding nodes when you know partial names',
      'Discovering nodes by functionality (e.g., "email", "database", "transform")',
      'Handling user typos in node names',
      'Finding all nodes related to a service (e.g., "google", "aws", "microsoft")'
    ],
    performance: '<20ms for simple queries, <50ms for complex FUZZY searches. Uses FTS5 index for speed',
    bestPractices: [
      'Start with single keywords for broadest results',
      'Use FUZZY mode when users might misspell node names',
      'AND mode works best for 2-3 word searches',
      'Combine with get_node_essentials after finding the right node'
    ],
    pitfalls: [
      'AND mode searches all fields (name, description) not just node names',
      'FUZZY mode with very short queries (1-2 chars) may return unexpected results',
      'Exact matches in quotes are case-sensitive'
    ],
    relatedTools: ['list_nodes for browsing by category', 'get_node_essentials to configure found nodes', 'list_ai_tools for AI-specific search']
  }
};
```

--------------------------------------------------------------------------------
/scripts/mcp-http-client.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * Minimal MCP HTTP Client for Node.js v16 compatibility
 * This bypasses mcp-remote and its TransformStream dependency
 */

const http = require('http');
const https = require('https');
const readline = require('readline');

// Get configuration from command line arguments
const url = process.argv[2];
const authToken = process.env.MCP_AUTH_TOKEN;

if (!url) {
  console.error('Usage: node mcp-http-client.js <server-url>');
  process.exit(1);
}

if (!authToken) {
  console.error('Error: MCP_AUTH_TOKEN environment variable is required');
  process.exit(1);
}

// Parse URL
const parsedUrl = new URL(url);
const isHttps = parsedUrl.protocol === 'https:';
const httpModule = isHttps ? https : http;

// Create readline interface for stdio
const rl = readline.createInterface({
  input: process.stdin,
  output: process.stdout,
  terminal: false
});

// Buffer for incomplete JSON messages
let buffer = '';

// Function to send JSON-RPC request
function sendRequest(request) {
  const requestBody = JSON.stringify(request);
  
  const options = {
    hostname: parsedUrl.hostname,
    port: parsedUrl.port || (isHttps ? 443 : 80),
    path: parsedUrl.pathname,
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'Content-Length': Buffer.byteLength(requestBody),
      'Authorization': `Bearer ${authToken}`
    }
  };

  const req = httpModule.request(options, (res) => {
    let responseData = '';
    
    res.on('data', (chunk) => {
      responseData += chunk;
    });
    
    res.on('end', () => {
      try {
        const response = JSON.parse(responseData);
        // Ensure the response has the correct structure
        if (response.jsonrpc && (response.result !== undefined || response.error !== undefined)) {
          console.log(JSON.stringify(response));
        } else {
          // Wrap non-JSON-RPC responses
          console.log(JSON.stringify({
            jsonrpc: '2.0',
            id: request.id || null,
            error: {
              code: -32603,
              message: 'Internal error',
              data: response
            }
          }));
        }
      } catch (err) {
        console.log(JSON.stringify({
          jsonrpc: '2.0',
          id: request.id || null,
          error: {
            code: -32700,
            message: 'Parse error',
            data: err.message
          }
        }));
      }
    });
  });

  req.on('error', (err) => {
    console.log(JSON.stringify({
      jsonrpc: '2.0',
      id: request.id || null,
      error: {
        code: -32000,
        message: 'Transport error',
        data: err.message
      }
    }));
  });

  req.write(requestBody);
  req.end();
}

// Process incoming JSON-RPC messages from stdin
rl.on('line', (line) => {
  // Try to parse each line as a complete JSON-RPC message
  try {
    const request = JSON.parse(line);
    
    // Forward the request to the HTTP server
    sendRequest(request);
  } catch (err) {
    // Log parse errors to stdout in JSON-RPC format
    console.log(JSON.stringify({
      jsonrpc: '2.0',
      id: null,
      error: {
        code: -32700,
        message: 'Parse error',
        data: err.message
      }
    }));
  }
});

// Handle process termination
process.on('SIGINT', () => {
  process.exit(0);
});

process.on('SIGTERM', () => {
  process.exit(0);
});
```

--------------------------------------------------------------------------------
/tests/test-node-documentation-service.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

const { NodeDocumentationService } = require('../dist/services/node-documentation-service');

async function testService() {
  console.log('=== Testing Node Documentation Service ===\n');
  
  // Use the main database
  const service = new NodeDocumentationService('./data/nodes.db');
  
  try {
    // Test 1: List nodes
    console.log('1️⃣ Testing list nodes...');
    const nodes = await service.listNodes();
    console.log(`   Found ${nodes.length} nodes in database`);
    
    if (nodes.length === 0) {
      console.log('\n⚠️  No nodes found. Running rebuild...');
      const stats = await service.rebuildDatabase();
      console.log(`   Rebuild complete: ${stats.successful} nodes stored`);
    }
    
    // Test 2: Get specific node info (IF node)
    console.log('\n2️⃣ Testing get node info for "If" node...');
    const ifNode = await service.getNodeInfo('n8n-nodes-base.if');
    
    if (ifNode) {
      console.log('   ✅ Found IF node:');
      console.log(`      Name: ${ifNode.displayName}`);
      console.log(`      Description: ${ifNode.description}`);
      console.log(`      Has source code: ${!!ifNode.sourceCode}`);
      console.log(`      Source code length: ${ifNode.sourceCode?.length || 0} bytes`);
      console.log(`      Has documentation: ${!!ifNode.documentation}`);
      console.log(`      Has example: ${!!ifNode.exampleWorkflow}`);
      
      if (ifNode.exampleWorkflow) {
        console.log('\n   📋 Example workflow:');
        console.log(JSON.stringify(ifNode.exampleWorkflow, null, 2).substring(0, 500) + '...');
      }
    } else {
      console.log('   ❌ IF node not found');
    }
    
    // Test 3: Search nodes
    console.log('\n3️⃣ Testing search functionality...');
    
    // Search for webhook nodes
    const webhookNodes = await service.searchNodes({ query: 'webhook' });
    console.log(`\n   🔍 Search for "webhook": ${webhookNodes.length} results`);
    webhookNodes.slice(0, 3).forEach(node => {
      console.log(`      - ${node.displayName} (${node.nodeType})`);
    });
    
    // Search for HTTP nodes
    const httpNodes = await service.searchNodes({ query: 'http' });
    console.log(`\n   🔍 Search for "http": ${httpNodes.length} results`);
    httpNodes.slice(0, 3).forEach(node => {
      console.log(`      - ${node.displayName} (${node.nodeType})`);
    });
    
    // Test 4: Get statistics
    console.log('\n4️⃣ Testing database statistics...');
    const stats = service.getStatistics();
    console.log('   📊 Database stats:');
    console.log(`      Total nodes: ${stats.totalNodes}`);
    console.log(`      Nodes with docs: ${stats.nodesWithDocs}`);
    console.log(`      Nodes with examples: ${stats.nodesWithExamples}`);
    console.log(`      Trigger nodes: ${stats.triggerNodes}`);
    console.log(`      Webhook nodes: ${stats.webhookNodes}`);
    console.log(`      Total packages: ${stats.totalPackages}`);
    
    // Test 5: Category filtering
    console.log('\n5️⃣ Testing category filtering...');
    const coreNodes = await service.searchNodes({ category: 'Core Nodes' });
    console.log(`   Found ${coreNodes.length} core nodes`);
    
    console.log('\n✅ All tests completed!');
    
  } catch (error) {
    console.error('\n❌ Test failed:', error);
    process.exit(1);
  } finally {
    service.close();
  }
}

// Run tests
testService().catch(console.error);
```

--------------------------------------------------------------------------------
/scripts/prebuild-fts5.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env npx tsx
/**
 * Pre-build FTS5 indexes for the database
 * This ensures FTS5 tables are created before the database is deployed to Docker
 */
import { createDatabaseAdapter } from '../src/database/database-adapter';
import { logger } from '../src/utils/logger';
import * as fs from 'fs';

async function prebuildFTS5() {
  console.log('🔍 Pre-building FTS5 indexes...\n');
  
  const dbPath = './data/nodes.db';
  
  if (!fs.existsSync(dbPath)) {
    console.error('❌ Database not found at', dbPath);
    console.error('   Please run npm run rebuild first');
    process.exit(1);
  }
  
  const db = await createDatabaseAdapter(dbPath);
  
  // Check FTS5 support
  const hasFTS5 = db.checkFTS5Support();
  
  if (!hasFTS5) {
    console.log('ℹ️  FTS5 not supported in this SQLite build');
    console.log('   Skipping FTS5 pre-build');
    db.close();
    return;
  }
  
  console.log('✅ FTS5 is supported');
  
  try {
    // Create FTS5 virtual table for templates
    console.log('\n📋 Creating FTS5 table for templates...');
    db.exec(`
      CREATE VIRTUAL TABLE IF NOT EXISTS templates_fts USING fts5(
        name, description, content=templates
      );
    `);
    
    // Create triggers to keep FTS5 in sync
    console.log('🔗 Creating synchronization triggers...');
    
    db.exec(`
      CREATE TRIGGER IF NOT EXISTS templates_ai AFTER INSERT ON templates BEGIN
        INSERT INTO templates_fts(rowid, name, description)
        VALUES (new.id, new.name, new.description);
      END;
    `);
    
    db.exec(`
      CREATE TRIGGER IF NOT EXISTS templates_au AFTER UPDATE ON templates BEGIN
        UPDATE templates_fts SET name = new.name, description = new.description
        WHERE rowid = new.id;
      END;
    `);
    
    db.exec(`
      CREATE TRIGGER IF NOT EXISTS templates_ad AFTER DELETE ON templates BEGIN
        DELETE FROM templates_fts WHERE rowid = old.id;
      END;
    `);
    
    // Rebuild FTS5 index from existing data
    console.log('🔄 Rebuilding FTS5 index from existing templates...');
    
    // Clear existing FTS data
    db.exec('DELETE FROM templates_fts');
    
    // Repopulate from templates table
    db.exec(`
      INSERT INTO templates_fts(rowid, name, description)
      SELECT id, name, description FROM templates
    `);
    
    // Get counts
    const templateCount = db.prepare('SELECT COUNT(*) as count FROM templates').get() as { count: number };
    const ftsCount = db.prepare('SELECT COUNT(*) as count FROM templates_fts').get() as { count: number };
    
    console.log(`\n✅ FTS5 pre-build complete!`);
    console.log(`   Templates: ${templateCount.count}`);
    console.log(`   FTS5 entries: ${ftsCount.count}`);
    
    // Test FTS5 search
    console.log('\n🧪 Testing FTS5 search...');
    const testResults = db.prepare(`
      SELECT COUNT(*) as count FROM templates t
      JOIN templates_fts ON t.id = templates_fts.rowid
      WHERE templates_fts MATCH 'webhook'
    `).get() as { count: number };
    
    console.log(`   Found ${testResults.count} templates matching "webhook"`);
    
  } catch (error) {
    console.error('❌ Error pre-building FTS5:', error);
    process.exit(1);
  }
  
  db.close();
  console.log('\n✅ Database is ready for Docker deployment!');
}

// Run if called directly
if (require.main === module) {
  prebuildFTS5().catch(console.error);
}

export { prebuildFTS5 };
```

--------------------------------------------------------------------------------
/scripts/test-telemetry-debug.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env npx tsx
/**
 * Debug script for telemetry integration
 * Tests direct Supabase connection
 */

import { createClient } from '@supabase/supabase-js';
import dotenv from 'dotenv';

// Load environment variables
dotenv.config();

async function debugTelemetry() {
  console.log('🔍 Debugging Telemetry Integration\n');

  const supabaseUrl = process.env.SUPABASE_URL;
  const supabaseAnonKey = process.env.SUPABASE_ANON_KEY;

  if (!supabaseUrl || !supabaseAnonKey) {
    console.error('❌ Missing SUPABASE_URL or SUPABASE_ANON_KEY');
    process.exit(1);
  }

  console.log('Environment:');
  console.log('  URL:', supabaseUrl);
  console.log('  Key:', supabaseAnonKey.substring(0, 30) + '...');

  // Create Supabase client
  const supabase = createClient(supabaseUrl, supabaseAnonKey, {
    auth: {
      persistSession: false,
      autoRefreshToken: false,
    }
  });

  // Test 1: Direct insert to telemetry_events
  console.log('\n📝 Test 1: Direct insert to telemetry_events...');
  const testEvent = {
    user_id: 'test-user-123',
    event: 'test_event',
    properties: {
      test: true,
      timestamp: new Date().toISOString()
    }
  };

  const { data: eventData, error: eventError } = await supabase
    .from('telemetry_events')
    .insert([testEvent])
    .select();

  if (eventError) {
    console.error('❌ Event insert failed:', eventError);
  } else {
    console.log('✅ Event inserted successfully:', eventData);
  }

  // Test 2: Direct insert to telemetry_workflows
  console.log('\n📝 Test 2: Direct insert to telemetry_workflows...');
  const testWorkflow = {
    user_id: 'test-user-123',
    workflow_hash: 'test-hash-' + Date.now(),
    node_count: 3,
    node_types: ['webhook', 'http', 'slack'],
    has_trigger: true,
    has_webhook: true,
    complexity: 'simple',
    sanitized_workflow: {
      nodes: [],
      connections: {}
    }
  };

  const { data: workflowData, error: workflowError } = await supabase
    .from('telemetry_workflows')
    .insert([testWorkflow])
    .select();

  if (workflowError) {
    console.error('❌ Workflow insert failed:', workflowError);
  } else {
    console.log('✅ Workflow inserted successfully:', workflowData);
  }

  // Test 3: Try to read data (should fail with anon key due to RLS)
  console.log('\n📖 Test 3: Attempting to read data (should fail due to RLS)...');
  const { data: readData, error: readError } = await supabase
    .from('telemetry_events')
    .select('*')
    .limit(1);

  if (readError) {
    console.log('✅ Read correctly blocked by RLS:', readError.message);
  } else {
    console.log('⚠️  Unexpected: Read succeeded (RLS may not be working):', readData);
  }

  // Test 4: Check table existence
  console.log('\n🔍 Test 4: Verifying tables exist...');
  const { data: tables, error: tablesError } = await supabase
    .rpc('get_tables', { schema_name: 'public' })
    .select('*');

  if (tablesError) {
    // This is expected - the RPC function might not exist
    console.log('ℹ️  Cannot list tables (RPC function not available)');
  } else {
    console.log('Tables found:', tables);
  }

  console.log('\n✨ Debug completed! Check your Supabase dashboard for the test data.');
  console.log('Dashboard: https://supabase.com/dashboard/project/ydyufsohxdfpopqbubwk/editor');
}

debugTelemetry().catch(error => {
  console.error('❌ Debug failed:', error);
  process.exit(1);
});
```

--------------------------------------------------------------------------------
/src/loaders/node-loader.ts:
--------------------------------------------------------------------------------

```typescript
import path from 'path';

export interface LoadedNode {
  packageName: string;
  nodeName: string;
  NodeClass: any;
}

export class N8nNodeLoader {
  private readonly CORE_PACKAGES = [
    { name: 'n8n-nodes-base', path: 'n8n-nodes-base' },
    { name: '@n8n/n8n-nodes-langchain', path: '@n8n/n8n-nodes-langchain' }
  ];

  async loadAllNodes(): Promise<LoadedNode[]> {
    const results: LoadedNode[] = [];
    
    for (const pkg of this.CORE_PACKAGES) {
      try {
        console.log(`\n📦 Loading package: ${pkg.name} from ${pkg.path}`);
        // Use the path property to locate the package
        const packageJson = require(`${pkg.path}/package.json`);
        console.log(`  Found ${Object.keys(packageJson.n8n?.nodes || {}).length} nodes in package.json`);
        const nodes = await this.loadPackageNodes(pkg.name, pkg.path, packageJson);
        results.push(...nodes);
      } catch (error) {
        console.error(`Failed to load ${pkg.name}:`, error);
      }
    }
    
    return results;
  }

  private async loadPackageNodes(packageName: string, packagePath: string, packageJson: any): Promise<LoadedNode[]> {
    const n8nConfig = packageJson.n8n || {};
    const nodes: LoadedNode[] = [];
    
    // Check if nodes is an array or object
    const nodesList = n8nConfig.nodes || [];
    
    if (Array.isArray(nodesList)) {
      // Handle array format (n8n-nodes-base uses this)
      for (const nodePath of nodesList) {
        try {
          const fullPath = require.resolve(`${packagePath}/${nodePath}`);
          const nodeModule = require(fullPath);
          
          // Extract node name from path (e.g., "dist/nodes/Slack/Slack.node.js" -> "Slack")
          const nodeNameMatch = nodePath.match(/\/([^\/]+)\.node\.(js|ts)$/);
          const nodeName = nodeNameMatch ? nodeNameMatch[1] : path.basename(nodePath, '.node.js');
          
          // Handle default export and various export patterns
          const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
          if (NodeClass) {
            nodes.push({ packageName, nodeName, NodeClass });
            console.log(`  ✓ Loaded ${nodeName} from ${packageName}`);
          } else {
            console.warn(`  ⚠ No valid export found for ${nodeName} in ${packageName}`);
          }
        } catch (error) {
          console.error(`  ✗ Failed to load node from ${packageName}/${nodePath}:`, (error as Error).message);
        }
      }
    } else {
      // Handle object format (for other packages)
      for (const [nodeName, nodePath] of Object.entries(nodesList)) {
        try {
          const fullPath = require.resolve(`${packagePath}/${nodePath as string}`);
          const nodeModule = require(fullPath);
          
          // Handle default export and various export patterns
          const NodeClass = nodeModule.default || nodeModule[nodeName] || Object.values(nodeModule)[0];
          if (NodeClass) {
            nodes.push({ packageName, nodeName, NodeClass });
            console.log(`  ✓ Loaded ${nodeName} from ${packageName}`);
          } else {
            console.warn(`  ⚠ No valid export found for ${nodeName} in ${packageName}`);
          }
        } catch (error) {
          console.error(`  ✗ Failed to load node ${nodeName} from ${packageName}:`, (error as Error).message);
        }
      }
    }
    
    return nodes;
  }
}
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-create-workflow.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const n8nCreateWorkflowDoc: ToolDocumentation = {
  name: 'n8n_create_workflow',
  category: 'workflow_management',
  essentials: {
    description: 'Create workflow. Requires: name, nodes[], connections{}. Created inactive. Returns workflow with ID.',
    keyParameters: ['name', 'nodes', 'connections'],
    example: 'n8n_create_workflow({name: "My Flow", nodes: [...], connections: {...}})',
    performance: 'Network-dependent',
    tips: [
      'Workflow created inactive',
      'Returns ID for future updates',
      'Validate first with validate_workflow'
    ]
  },
  full: {
    description: 'Creates a new workflow in n8n with specified nodes and connections. Workflow is created in inactive state. Each node requires: id, name, type, typeVersion, position, and parameters.',
    parameters: {
      name: { type: 'string', required: true, description: 'Workflow name' },
      nodes: { type: 'array', required: true, description: 'Array of nodes with id, name, type, typeVersion, position, parameters' },
      connections: { type: 'object', required: true, description: 'Node connections. Keys are source node IDs' },
      settings: { type: 'object', description: 'Optional workflow settings (timezone, error handling, etc.)' }
    },
    returns: 'Created workflow object with id, name, nodes, connections, active status',
    examples: [
      `// Basic webhook to Slack workflow
n8n_create_workflow({
  name: "Webhook to Slack",
  nodes: [
    {
      id: "webhook_1",
      name: "Webhook",
      type: "n8n-nodes-base.webhook",
      typeVersion: 1,
      position: [250, 300],
      parameters: {
        httpMethod: "POST",
        path: "slack-notify"
      }
    },
    {
      id: "slack_1",
      name: "Slack",
      type: "n8n-nodes-base.slack",
      typeVersion: 1,
      position: [450, 300],
      parameters: {
        resource: "message",
        operation: "post",
        channel: "#general",
        text: "={{$json.message}}"
      }
    }
  ],
  connections: {
    "webhook_1": {
      "main": [[{node: "slack_1", type: "main", index: 0}]]
    }
  }
})`,
      `// Workflow with settings and error handling
n8n_create_workflow({
  name: "Data Processing",
  nodes: [...],
  connections: {...},
  settings: {
    timezone: "America/New_York",
    errorWorkflow: "error_handler_workflow_id",
    saveDataSuccessExecution: "all",
    saveDataErrorExecution: "all"
  }
})`
    ],
    useCases: [
      'Deploy validated workflows',
      'Automate workflow creation',
      'Clone workflow structures',
      'Template deployment'
    ],
    performance: 'Network-dependent - Typically 100-500ms depending on workflow size',
    bestPractices: [
      'Validate with validate_workflow first',
      'Use unique node IDs',
      'Position nodes for readability',
      'Test with n8n_trigger_webhook_workflow'
    ],
    pitfalls: [
      '**REQUIRES N8N_API_URL and N8N_API_KEY environment variables** - tool unavailable without n8n API access',
      'Workflows created in INACTIVE state - must activate separately',
      'Node IDs must be unique within workflow',
      'Credentials must be configured separately in n8n',
      'Node type names must include package prefix (e.g., "n8n-nodes-base.slack")'
    ],
    relatedTools: ['validate_workflow', 'n8n_update_partial_workflow', 'n8n_trigger_webhook_workflow']
  }
};
```

--------------------------------------------------------------------------------
/src/utils/url-detector.ts:
--------------------------------------------------------------------------------

```typescript
import { Request } from 'express';
import { logger } from './logger';

/**
 * Validates a hostname to prevent header injection attacks
 */
function isValidHostname(host: string): boolean {
  // Allow alphanumeric, dots, hyphens, and optional port
  return /^[a-zA-Z0-9.-]+(:[0-9]+)?$/.test(host) && host.length < 256;
}

/**
 * Validates a URL string
 */
function isValidUrl(url: string): boolean {
  try {
    const parsed = new URL(url);
    // Only allow http and https protocols
    return parsed.protocol === 'http:' || parsed.protocol === 'https:';
  } catch {
    return false;
  }
}

/**
 * Detects the base URL for the server, considering:
 * 1. Explicitly configured BASE_URL or PUBLIC_URL
 * 2. Proxy headers (X-Forwarded-Proto, X-Forwarded-Host)
 * 3. Host and port configuration
 */
export function detectBaseUrl(req: Request | null, host: string, port: number): string {
  try {
    // 1. Check for explicitly configured URL
    const configuredUrl = process.env.BASE_URL || process.env.PUBLIC_URL;
    if (configuredUrl) {
      if (isValidUrl(configuredUrl)) {
        logger.debug('Using configured BASE_URL/PUBLIC_URL', { url: configuredUrl });
        return configuredUrl.replace(/\/$/, ''); // Remove trailing slash
      } else {
        logger.warn('Invalid BASE_URL/PUBLIC_URL configured, falling back to auto-detection', { url: configuredUrl });
      }
    }

    // 2. If we have a request, try to detect from proxy headers
    if (req && process.env.TRUST_PROXY && Number(process.env.TRUST_PROXY) > 0) {
      const proto = req.get('X-Forwarded-Proto') || req.protocol || 'http';
      const forwardedHost = req.get('X-Forwarded-Host');
      const hostHeader = req.get('Host');
      
      const detectedHost = forwardedHost || hostHeader;
      if (detectedHost && isValidHostname(detectedHost)) {
        const baseUrl = `${proto}://${detectedHost}`;
        logger.debug('Detected URL from proxy headers', { 
          proto, 
          forwardedHost, 
          hostHeader,
          baseUrl 
        });
        return baseUrl;
      } else if (detectedHost) {
        logger.warn('Invalid hostname detected in proxy headers, using fallback', { detectedHost });
      }
    }

    // 3. Fall back to configured host and port
    const displayHost = host === '0.0.0.0' ? 'localhost' : host;
    const protocol = 'http'; // Default to http for local bindings
    
    // Don't show standard ports (for http only in this fallback case)
    const needsPort = port !== 80;
    const baseUrl = needsPort ? 
      `${protocol}://${displayHost}:${port}` : 
      `${protocol}://${displayHost}`;
    
    logger.debug('Using fallback URL from host/port', { 
      host, 
      displayHost, 
      port, 
      baseUrl 
    });
    
    return baseUrl;
  } catch (error) {
    logger.error('Error detecting base URL, using fallback', error);
    // Safe fallback
    return `http://localhost:${port}`;
  }
}

/**
 * Gets the base URL for console display during startup
 * This is used when we don't have a request object yet
 */
export function getStartupBaseUrl(host: string, port: number): string {
  return detectBaseUrl(null, host, port);
}

/**
 * Formats endpoint URLs for display
 */
export function formatEndpointUrls(baseUrl: string): {
  health: string;
  mcp: string;
  root: string;
} {
  return {
    health: `${baseUrl}/health`,
    mcp: `${baseUrl}/mcp`,
    root: baseUrl
  };
}
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/system/tools-documentation.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const toolsDocumentationDoc: ToolDocumentation = {
  name: 'tools_documentation',
  category: 'system',
  essentials: {
    description: 'The meta-documentation tool. Returns documentation for any MCP tool, including itself. Call without parameters for a comprehensive overview of all available tools. This is your starting point for discovering n8n MCP capabilities.',
    keyParameters: ['topic', 'depth'],
    example: 'tools_documentation({topic: "search_nodes"})',
    performance: 'Instant (static content)',
    tips: [
      'Call without parameters first to see all tools',
      'Can document itself: tools_documentation({topic: "tools_documentation"})',
      'Use depth:"full" for comprehensive details'
    ]
  },
  full: {
    description: 'The self-referential documentation system for all MCP tools. This tool can document any other tool, including itself. It\'s the primary discovery mechanism for understanding what tools are available and how to use them. Returns utilitarian documentation optimized for AI agent consumption.',
    parameters: {
      topic: { type: 'string', description: 'Tool name (e.g., "search_nodes"), special topic ("javascript_code_node_guide", "python_code_node_guide"), or "overview". Leave empty for quick reference.', required: false },
      depth: { type: 'string', description: 'Level of detail: "essentials" (default, concise) or "full" (comprehensive with examples)', required: false }
    },
    returns: 'Markdown-formatted documentation tailored for the requested tool and depth. For essentials: key info, parameters, example, tips. For full: complete details, all examples, use cases, best practices.',
    examples: [
      '// Get started - see all available tools',
      'tools_documentation()',
      '',
      '// Learn about a specific tool',
      'tools_documentation({topic: "search_nodes"})',
      '',
      '// Get comprehensive details',
      'tools_documentation({topic: "validate_workflow", depth: "full"})',
      '',
      '// Self-referential example - document this tool',
      'tools_documentation({topic: "tools_documentation", depth: "full"})',
      '',
      '// Code node guides',
      'tools_documentation({topic: "javascript_code_node_guide"})',
      'tools_documentation({topic: "python_code_node_guide"})'
    ],
    useCases: [
      'Initial discovery of available MCP tools',
      'Learning how to use specific tools',
      'Finding required and optional parameters',
      'Getting working examples to copy',
      'Understanding tool performance characteristics',
      'Discovering related tools for workflows'
    ],
    performance: 'Instant - all documentation is pre-loaded in memory',
    bestPractices: [
      'Always start with tools_documentation() to see available tools',
      'Use essentials for quick parameter reference during coding',
      'Switch to full depth when debugging or learning new tools',
      'Check Code node guides when working with Code nodes'
    ],
    pitfalls: [
      'Tool names must match exactly - use the overview to find correct names',
      'Not all internal functions are documented',
      'Special topics (code guides) require exact names'
    ],
    relatedTools: ['n8n_list_available_tools for dynamic tool discovery', 'list_tasks for common configurations', 'get_database_statistics to verify MCP connection']
  }
};
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/templates/get-template.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const getTemplateDoc: ToolDocumentation = {
  name: 'get_template',
  category: 'templates',
  essentials: {
    description: 'Get complete workflow JSON by ID. Ready to import. IDs from list_node_templates or search_templates.',
    keyParameters: ['templateId'],
    example: 'get_template({templateId: 1234})',
    performance: 'Fast (<100ms) - single database lookup',
    tips: [
      'Get template IDs from list_node_templates or search_templates first',
      'Returns complete workflow JSON ready for import into n8n',
      'Includes all nodes, connections, and settings'
    ]
  },
  full: {
    description: `Retrieves the complete workflow JSON for a specific template by its ID. The returned workflow can be directly imported into n8n through the UI or API. This tool fetches pre-built workflows from the community template library containing 399+ curated workflows.`,
    parameters: {
      templateId: {
        type: 'number',
        required: true,
        description: 'The numeric ID of the template to retrieve. Get IDs from list_node_templates or search_templates'
      }
    },
    returns: `Returns an object containing:
- template: Complete template information including workflow JSON
  - id: Template ID
  - name: Template name
  - description: What the workflow does
  - author: Creator information (name, username, verified status)
  - nodes: Array of node types used
  - views: Number of times viewed
  - created: Creation date
  - url: Link to template on n8n.io
  - workflow: Complete workflow JSON with structure:
    - nodes: Array of node objects (id, name, type, typeVersion, position, parameters)
    - connections: Object mapping source nodes to targets
    - settings: Workflow configuration (timezone, error handling, etc.)
- usage: Instructions for using the workflow`,
    examples: [
      'get_template({templateId: 1234}) - Get Slack notification workflow',
      'get_template({templateId: 5678}) - Get data sync workflow',
      'get_template({templateId: 9012}) - Get AI chatbot workflow'
    ],
    useCases: [
      'Download workflows for direct import into n8n',
      'Study workflow patterns and best practices',
      'Get complete workflow JSON for customization',
      'Clone popular workflows for your use case',
      'Learn how complex automations are built'
    ],
    performance: `Fast performance with single database lookup:
- Query time: <10ms for template retrieval
- Workflow JSON parsing: <50ms
- Total response time: <100ms
- No network calls (uses local cache)`,
    bestPractices: [
      'Always check if template exists before attempting modifications',
      'Review workflow nodes before importing to ensure compatibility',
      'Save template JSON locally if planning multiple customizations',
      'Check template creation date for most recent patterns',
      'Verify all required credentials are configured before import'
    ],
    pitfalls: [
      'Template IDs change when database is refreshed',
      'Some templates may use deprecated node versions',
      'Credentials in templates are placeholders - configure your own',
      'Not all templates work with all n8n versions',
      'Template may reference external services you don\'t have access to'
    ],
    relatedTools: ['list_node_templates', 'search_templates', 'get_templates_for_task', 'n8n_create_workflow']
  }
};
```

--------------------------------------------------------------------------------
/tests/auth.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, vi, beforeEach } from 'vitest';
import { AuthManager } from '../src/utils/auth';

describe('AuthManager', () => {
  let authManager: AuthManager;

  beforeEach(() => {
    authManager = new AuthManager();
  });

  describe('validateToken', () => {
    it('should return true when no authentication is required', () => {
      expect(authManager.validateToken('any-token')).toBe(true);
      expect(authManager.validateToken(undefined)).toBe(true);
    });

    it('should validate static token correctly', () => {
      const expectedToken = 'secret-token';
      
      expect(authManager.validateToken('secret-token', expectedToken)).toBe(true);
      expect(authManager.validateToken('wrong-token', expectedToken)).toBe(false);
      expect(authManager.validateToken(undefined, expectedToken)).toBe(false);
    });

    it('should validate generated tokens', () => {
      const token = authManager.generateToken(1);
      
      expect(authManager.validateToken(token, 'expected-token')).toBe(true);
    });

    it('should reject expired tokens', () => {
      vi.useFakeTimers();
      
      const token = authManager.generateToken(1); // 1 hour expiry
      
      // Token should be valid initially
      expect(authManager.validateToken(token, 'expected-token')).toBe(true);
      
      // Fast forward 2 hours
      vi.advanceTimersByTime(2 * 60 * 60 * 1000);
      
      // Token should be expired
      expect(authManager.validateToken(token, 'expected-token')).toBe(false);
      
      vi.useRealTimers();
    });
  });

  describe('generateToken', () => {
    it('should generate unique tokens', () => {
      const token1 = authManager.generateToken();
      const token2 = authManager.generateToken();
      
      expect(token1).not.toBe(token2);
      expect(token1).toHaveLength(64); // 32 bytes hex = 64 chars
    });

    it('should set custom expiry time', () => {
      vi.useFakeTimers();
      
      const token = authManager.generateToken(24); // 24 hours
      
      // Token should be valid after 23 hours
      vi.advanceTimersByTime(23 * 60 * 60 * 1000);
      expect(authManager.validateToken(token, 'expected')).toBe(true);
      
      // Token should expire after 25 hours
      vi.advanceTimersByTime(2 * 60 * 60 * 1000);
      expect(authManager.validateToken(token, 'expected')).toBe(false);
      
      vi.useRealTimers();
    });
  });

  describe('revokeToken', () => {
    it('should revoke a generated token', () => {
      const token = authManager.generateToken();
      
      expect(authManager.validateToken(token, 'expected')).toBe(true);
      
      authManager.revokeToken(token);
      
      expect(authManager.validateToken(token, 'expected')).toBe(false);
    });
  });

  describe('static methods', () => {
    it('should hash tokens consistently', () => {
      const token = 'my-secret-token';
      const hash1 = AuthManager.hashToken(token);
      const hash2 = AuthManager.hashToken(token);
      
      expect(hash1).toBe(hash2);
      expect(hash1).toHaveLength(64); // SHA256 hex = 64 chars
    });

    it('should compare tokens securely', () => {
      const token = 'my-secret-token';
      const hashedToken = AuthManager.hashToken(token);
      
      expect(AuthManager.compareTokens(token, hashedToken)).toBe(true);
      expect(AuthManager.compareTokens('wrong-token', hashedToken)).toBe(false);
    });
  });
});
```

--------------------------------------------------------------------------------
/tests/unit/utils/simple-cache-memory-leak-fix.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { SimpleCache } from '../../../src/utils/simple-cache';

describe('SimpleCache Memory Leak Fix', () => {
  let cache: SimpleCache;
  
  beforeEach(() => {
    vi.useFakeTimers();
  });
  
  afterEach(() => {
    if (cache && typeof cache.destroy === 'function') {
      cache.destroy();
    }
    vi.restoreAllMocks();
  });
  
  it('should track cleanup timer', () => {
    cache = new SimpleCache();
    // Access private property for testing
    expect((cache as any).cleanupTimer).toBeDefined();
    expect((cache as any).cleanupTimer).not.toBeNull();
  });
  
  it('should clear timer on destroy', () => {
    cache = new SimpleCache();
    const timer = (cache as any).cleanupTimer;
    
    cache.destroy();
    
    expect((cache as any).cleanupTimer).toBeNull();
    // Verify timer was cleared
    expect(() => clearInterval(timer)).not.toThrow();
  });
  
  it('should clear cache on destroy', () => {
    cache = new SimpleCache();
    cache.set('test-key', 'test-value', 300);
    
    expect(cache.get('test-key')).toBe('test-value');
    
    cache.destroy();
    
    expect(cache.get('test-key')).toBeNull();
  });
  
  it('should handle multiple destroy calls safely', () => {
    cache = new SimpleCache();
    
    expect(() => {
      cache.destroy();
      cache.destroy();
      cache.destroy();
    }).not.toThrow();
    
    expect((cache as any).cleanupTimer).toBeNull();
  });
  
  it('should not create new timers after destroy', () => {
    cache = new SimpleCache();
    const originalTimer = (cache as any).cleanupTimer;
    
    cache.destroy();
    
    // Try to use the cache after destroy
    cache.set('key', 'value');
    cache.get('key');
    cache.clear();
    
    // Timer should still be null
    expect((cache as any).cleanupTimer).toBeNull();
    expect((cache as any).cleanupTimer).not.toBe(originalTimer);
  });
  
  it('should clean up expired entries periodically', () => {
    cache = new SimpleCache();
    
    // Set items with different TTLs
    cache.set('short', 'value1', 1); // 1 second
    cache.set('long', 'value2', 300); // 300 seconds
    
    // Advance time by 2 seconds
    vi.advanceTimersByTime(2000);
    
    // Advance time to trigger cleanup (60 seconds)
    vi.advanceTimersByTime(58000);
    
    // Short-lived item should be gone
    expect(cache.get('short')).toBeNull();
    // Long-lived item should still exist
    expect(cache.get('long')).toBe('value2');
  });
  
  it('should prevent memory leak by clearing timer', () => {
    const timers: NodeJS.Timeout[] = [];
    const originalSetInterval = global.setInterval;
    
    // Mock setInterval to track created timers
    global.setInterval = vi.fn((callback, delay) => {
      const timer = originalSetInterval(callback, delay);
      timers.push(timer);
      return timer;
    });
    
    // Create and destroy multiple caches
    for (let i = 0; i < 5; i++) {
      const tempCache = new SimpleCache();
      tempCache.set(`key${i}`, `value${i}`);
      tempCache.destroy();
    }
    
    // All timers should have been cleared
    expect(timers.length).toBe(5);
    
    // Restore original setInterval
    global.setInterval = originalSetInterval;
  });
  
  it('should have destroy method defined', () => {
    cache = new SimpleCache();
    expect(typeof cache.destroy).toBe('function');
  });
});
```

--------------------------------------------------------------------------------
/src/telemetry/telemetry-types.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Telemetry Types and Interfaces
 * Centralized type definitions for the telemetry system
 */

import { StartupCheckpoint } from './startup-checkpoints';

export interface TelemetryEvent {
  user_id: string;
  event: string;
  properties: Record<string, any>;
  created_at?: string;
}

/**
 * Startup error event - captures pre-handshake failures
 */
export interface StartupErrorEvent extends TelemetryEvent {
  event: 'startup_error';
  properties: {
    checkpoint: StartupCheckpoint;
    errorMessage: string;
    errorType: string;
    checkpointsPassed: StartupCheckpoint[];
    checkpointsPassedCount: number;
    startupDuration: number;
    platform: string;
    arch: string;
    nodeVersion: string;
    isDocker: boolean;
  };
}

/**
 * Startup completed event - confirms server is functional
 */
export interface StartupCompletedEvent extends TelemetryEvent {
  event: 'startup_completed';
  properties: {
    version: string;
  };
}

/**
 * Enhanced session start properties with startup tracking
 */
export interface SessionStartProperties {
  version: string;
  platform: string;
  arch: string;
  nodeVersion: string;
  isDocker: boolean;
  cloudPlatform: string | null;
  // NEW: Startup tracking fields (v2.18.2)
  startupDurationMs?: number;
  checkpointsPassed?: StartupCheckpoint[];
  startupErrorCount?: number;
}

export interface WorkflowTelemetry {
  user_id: string;
  workflow_hash: string;
  node_count: number;
  node_types: string[];
  has_trigger: boolean;
  has_webhook: boolean;
  complexity: 'simple' | 'medium' | 'complex';
  sanitized_workflow: any;
  created_at?: string;
}

export interface SanitizedWorkflow {
  nodes: any[];
  connections: any;
  nodeCount: number;
  nodeTypes: string[];
  hasTrigger: boolean;
  hasWebhook: boolean;
  complexity: 'simple' | 'medium' | 'complex';
  workflowHash: string;
}

export const TELEMETRY_CONFIG = {
  // Batch processing
  BATCH_FLUSH_INTERVAL: 5000, // 5 seconds
  EVENT_QUEUE_THRESHOLD: 10, // Batch events for efficiency
  WORKFLOW_QUEUE_THRESHOLD: 5, // Batch workflows

  // Retry logic
  MAX_RETRIES: 3,
  RETRY_DELAY: 1000, // 1 second base delay
  OPERATION_TIMEOUT: 5000, // 5 seconds

  // Rate limiting
  RATE_LIMIT_WINDOW: 60000, // 1 minute
  RATE_LIMIT_MAX_EVENTS: 100, // Max events per window

  // Queue limits
  MAX_QUEUE_SIZE: 1000, // Maximum events to queue
  MAX_BATCH_SIZE: 50, // Maximum events per batch
} as const;

export const TELEMETRY_BACKEND = {
  URL: 'https://ydyufsohxdfpopqbubwk.supabase.co',
  ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTg3OTYyMDAsImV4cCI6MjA3NDM3MjIwMH0.xESphg6h5ozaDsm4Vla3QnDJGc6Nc_cpfoqTHRynkCk'
} as const;

export interface TelemetryMetrics {
  eventsTracked: number;
  eventsDropped: number;
  eventsFailed: number;
  batchesSent: number;
  batchesFailed: number;
  averageFlushTime: number;
  lastFlushTime?: number;
  rateLimitHits: number;
}

export enum TelemetryErrorType {
  VALIDATION_ERROR = 'VALIDATION_ERROR',
  NETWORK_ERROR = 'NETWORK_ERROR',
  RATE_LIMIT_ERROR = 'RATE_LIMIT_ERROR',
  QUEUE_OVERFLOW_ERROR = 'QUEUE_OVERFLOW_ERROR',
  INITIALIZATION_ERROR = 'INITIALIZATION_ERROR',
  UNKNOWN_ERROR = 'UNKNOWN_ERROR'
}

export interface TelemetryErrorContext {
  type: TelemetryErrorType;
  message: string;
  context?: Record<string, any>;
  timestamp: number;
  retryable: boolean;
}
```

--------------------------------------------------------------------------------
/examples/enhanced-documentation-demo.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

const { DocumentationFetcher } = require('../dist/utils/documentation-fetcher');

async function demonstrateEnhancedDocumentation() {
  console.log('🎯 Enhanced Documentation Demo\n');
  
  const fetcher = new DocumentationFetcher();
  const nodeType = 'n8n-nodes-base.slack';
  
  console.log(`Fetching enhanced documentation for: ${nodeType}\n`);
  
  try {
    const doc = await fetcher.getEnhancedNodeDocumentation(nodeType);
    
    if (!doc) {
      console.log('No documentation found for this node.');
      return;
    }
    
    // Display title and description
    console.log('📄 Basic Information:');
    console.log(`Title: ${doc.title || 'N/A'}`);
    console.log(`URL: ${doc.url}`);
    console.log(`Description: ${doc.description || 'See documentation for details'}\n`);
    
    // Display operations
    if (doc.operations && doc.operations.length > 0) {
      console.log('⚙️  Available Operations:');
      // Group by resource
      const resourceMap = new Map();
      doc.operations.forEach(op => {
        if (!resourceMap.has(op.resource)) {
          resourceMap.set(op.resource, []);
        }
        resourceMap.get(op.resource).push(op);
      });
      
      resourceMap.forEach((ops, resource) => {
        console.log(`\n  ${resource}:`);
        ops.forEach(op => {
          console.log(`    - ${op.operation}: ${op.description}`);
        });
      });
      console.log('');
    }
    
    // Display API methods
    if (doc.apiMethods && doc.apiMethods.length > 0) {
      console.log('🔌 API Method Mappings (first 5):');
      doc.apiMethods.slice(0, 5).forEach(method => {
        console.log(`  ${method.resource}.${method.operation} → ${method.apiMethod}`);
        if (method.apiUrl) {
          console.log(`    Documentation: ${method.apiUrl}`);
        }
      });
      console.log(`  ... and ${Math.max(0, doc.apiMethods.length - 5)} more\n`);
    }
    
    // Display templates
    if (doc.templates && doc.templates.length > 0) {
      console.log('📋 Available Templates:');
      doc.templates.forEach(template => {
        console.log(`  - ${template.name}`);
        if (template.description) {
          console.log(`    ${template.description}`);
        }
      });
      console.log('');
    }
    
    // Display related resources
    if (doc.relatedResources && doc.relatedResources.length > 0) {
      console.log('🔗 Related Resources:');
      doc.relatedResources.forEach(resource => {
        console.log(`  - ${resource.title} (${resource.type})`);
        console.log(`    ${resource.url}`);
      });
      console.log('');
    }
    
    // Display required scopes
    if (doc.requiredScopes && doc.requiredScopes.length > 0) {
      console.log('🔐 Required Scopes:');
      doc.requiredScopes.forEach(scope => {
        console.log(`  - ${scope}`);
      });
      console.log('');
    }
    
    // Display summary
    console.log('📊 Summary:');
    console.log(`  - Total operations: ${doc.operations?.length || 0}`);
    console.log(`  - Total API methods: ${doc.apiMethods?.length || 0}`);
    console.log(`  - Code examples: ${doc.examples?.length || 0}`);
    console.log(`  - Templates: ${doc.templates?.length || 0}`);
    console.log(`  - Related resources: ${doc.relatedResources?.length || 0}`);
    
  } catch (error) {
    console.error('Error:', error.message);
  } finally {
    await fetcher.cleanup();
  }
}

// Run demo
demonstrateEnhancedDocumentation().catch(console.error);
```

--------------------------------------------------------------------------------
/scripts/publish-npm.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
# Script to publish n8n-mcp with runtime-only dependencies

set -e

# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color

echo "🚀 Preparing n8n-mcp for npm publish..."

# Skip tests - they already run in CI before merge/publish
echo "⏭️  Skipping tests (already verified in CI)"

# Sync version to runtime package first
echo "🔄 Syncing version to package.runtime.json..."
npm run sync:runtime-version

# Get version from main package.json
VERSION=$(node -e "console.log(require('./package.json').version)")
echo -e "${GREEN}📌 Version: $VERSION${NC}"

# Check if dist directory exists
if [ ! -d "dist" ]; then
    echo -e "${RED}❌ Error: dist directory not found. Run 'npm run build' first.${NC}"
    exit 1
fi

# Check if database exists
if [ ! -f "data/nodes.db" ]; then
    echo -e "${RED}❌ Error: data/nodes.db not found. Run 'npm run rebuild' first.${NC}"
    exit 1
fi

# Create a temporary publish directory
PUBLISH_DIR="npm-publish-temp"
rm -rf $PUBLISH_DIR
mkdir -p $PUBLISH_DIR

# Copy necessary files
echo "📦 Copying files..."
cp -r dist $PUBLISH_DIR/
cp -r data $PUBLISH_DIR/
cp README.md $PUBLISH_DIR/
cp LICENSE $PUBLISH_DIR/
cp .env.example $PUBLISH_DIR/
cp .npmignore $PUBLISH_DIR/ 2>/dev/null || true

# Use runtime package.json (already has correct version from sync)
echo "📋 Using runtime-only dependencies..."
cp package.runtime.json $PUBLISH_DIR/package.json

cd $PUBLISH_DIR

# Add required fields from main package.json
node -e "
const pkg = require('./package.json');
pkg.name = 'n8n-mcp';
pkg.description = 'Integration between n8n workflow automation and Model Context Protocol (MCP)';
pkg.main = 'dist/index.js';
pkg.types = 'dist/index.d.ts';
pkg.exports = {
  '.': {
    types: './dist/index.d.ts',
    require: './dist/index.js',
    import: './dist/index.js'
  }
};
pkg.bin = { 'n8n-mcp': './dist/mcp/index.js' };
pkg.repository = { type: 'git', url: 'git+https://github.com/czlonkowski/n8n-mcp.git' };
pkg.keywords = ['n8n', 'mcp', 'model-context-protocol', 'ai', 'workflow', 'automation'];
pkg.author = 'Romuald Czlonkowski @ www.aiadvisors.pl/en';
pkg.license = 'MIT';
pkg.bugs = { url: 'https://github.com/czlonkowski/n8n-mcp/issues' };
pkg.homepage = 'https://github.com/czlonkowski/n8n-mcp#readme';
pkg.files = ['dist/**/*', 'data/nodes.db', '.env.example', 'README.md', 'LICENSE'];
// Note: node_modules are automatically included for dependencies
delete pkg.private; // Remove private field so we can publish
require('fs').writeFileSync('./package.json', JSON.stringify(pkg, null, 2));
"

echo ""
echo "📋 Package details:"
echo -e "${GREEN}Name:${NC} $(node -e "console.log(require('./package.json').name)")"
echo -e "${GREEN}Version:${NC} $(node -e "console.log(require('./package.json').version)")"
echo -e "${GREEN}Size:${NC} ~50MB (vs 1GB+ with dev dependencies)"
echo -e "${GREEN}Runtime deps:${NC} 8 packages"

echo ""
echo "✅ Ready to publish!"
echo ""
echo -e "${YELLOW}⚠️  Important: npm publishing requires OTP authentication${NC}"
echo ""
echo "To publish, run:"
echo -e "  ${GREEN}cd $PUBLISH_DIR${NC}"
echo -e "  ${GREEN}npm publish --otp=YOUR_OTP_CODE${NC}"
echo ""
echo "After publishing, clean up with:"
echo -e "  ${GREEN}cd ..${NC}"
echo -e "  ${GREEN}rm -rf $PUBLISH_DIR${NC}"
echo ""
echo "📝 Notes:"
echo "  - Get your OTP from your authenticator app"
echo "  - The package will be available at https://www.npmjs.com/package/n8n-mcp"
echo "  - Users can run 'npx n8n-mcp' immediately after publish"
```

--------------------------------------------------------------------------------
/scripts/extract-nodes-docker.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash
set -e

echo "🐳 n8n Node Extraction via Docker"
echo "================================="

# Colors for output
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color

# Function to print colored output
print_status() {
    echo -e "${GREEN}[$(date +'%H:%M:%S')]${NC} $1"
}

print_warning() {
    echo -e "${YELLOW}[$(date +'%H:%M:%S')]${NC} ⚠️  $1"
}

print_error() {
    echo -e "${RED}[$(date +'%H:%M:%S')]${NC} ❌ $1"
}

# Check if Docker is running
if ! docker info > /dev/null 2>&1; then
    print_error "Docker is not running. Please start Docker and try again."
    exit 1
fi

print_status "Docker is running ✅"

# Clean up any existing containers
print_status "Cleaning up existing containers..."
docker-compose -f docker-compose.extract.yml down -v 2>/dev/null || true

# Build the project first
print_status "Building the project..."
npm run build

# Start the extraction process
print_status "Starting n8n container to extract latest nodes..."
docker-compose -f docker-compose.extract.yml up -d n8n-latest

# Wait for n8n container to be healthy
print_status "Waiting for n8n container to initialize..."
ATTEMPTS=0
MAX_ATTEMPTS=60

while [ $ATTEMPTS -lt $MAX_ATTEMPTS ]; do
    if docker-compose -f docker-compose.extract.yml ps | grep -q "healthy"; then
        print_status "n8n container is ready ✅"
        break
    fi
    
    ATTEMPTS=$((ATTEMPTS + 1))
    echo -n "."
    sleep 2
done

if [ $ATTEMPTS -eq $MAX_ATTEMPTS ]; then
    print_error "n8n container failed to become healthy"
    docker-compose -f docker-compose.extract.yml logs n8n-latest
    docker-compose -f docker-compose.extract.yml down -v
    exit 1
fi

# Run the extraction
print_status "Running node extraction..."
docker-compose -f docker-compose.extract.yml run --rm node-extractor

# Check the results
print_status "Checking extraction results..."
if [ -f "./data/nodes-fresh.db" ]; then
    NODE_COUNT=$(sqlite3 ./data/nodes-fresh.db "SELECT COUNT(*) FROM nodes;" 2>/dev/null || echo "0")
    IF_VERSION=$(sqlite3 ./data/nodes-fresh.db "SELECT version FROM nodes WHERE name='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "not found")
    
    print_status "Extracted $NODE_COUNT nodes"
    print_status "If node version: $IF_VERSION"
    
    # Check if we got the If node source code and look for version
    IF_SOURCE=$(sqlite3 ./data/nodes-fresh.db "SELECT source_code FROM nodes WHERE name='n8n-nodes-base.If' LIMIT 1;" 2>/dev/null || echo "")
    if [[ $IF_SOURCE =~ version:[[:space:]]*([0-9]+) ]]; then
        IF_CODE_VERSION="${BASH_REMATCH[1]}"
        print_status "If node version from source code: v$IF_CODE_VERSION"
        
        if [ "$IF_CODE_VERSION" -ge "2" ]; then
            print_status "✅ Successfully extracted latest If node (v$IF_CODE_VERSION)!"
        else
            print_warning "If node is still v$IF_CODE_VERSION, expected v2 or higher"
        fi
    fi
else
    print_error "Database file not found after extraction"
fi

# Clean up
print_status "Cleaning up Docker containers..."
docker-compose -f docker-compose.extract.yml down -v

print_status "✨ Extraction complete!"

# Offer to restart the MCP server
echo ""
read -p "Would you like to restart the MCP server with the new nodes? (y/n) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
    print_status "Restarting MCP server..."
    # Kill any existing server process
    pkill -f "node.*dist/index.js" || true
    
    # Start the server
    npm start &
    print_status "MCP server restarted with fresh node database"
fi
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/templates/get-templates-for-task.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const getTemplatesForTaskDoc: ToolDocumentation = {
  name: 'get_templates_for_task',
  category: 'templates',
  essentials: {
    description: 'Curated templates by task: ai_automation, data_sync, webhooks, email, slack, data_transform, files, scheduling, api, database.',
    keyParameters: ['task'],
    example: 'get_templates_for_task({task: "slack_integration"})',
    performance: 'Fast (<100ms) - pre-categorized results',
    tips: [
      'Returns hand-picked templates for specific automation tasks',
      'Use list_tasks to see all available task categories',
      'Templates are curated for quality and relevance'
    ]
  },
  full: {
    description: `Retrieves curated workflow templates for specific automation tasks. This tool provides hand-picked templates organized by common use cases, making it easy to find the right workflow for your needs. Each task category contains the most popular and effective templates for that particular automation scenario.`,
    parameters: {
      task: {
        type: 'string',
        required: true,
        description: 'The type of task to get templates for. Options: ai_automation, data_sync, webhook_processing, email_automation, slack_integration, data_transformation, file_processing, scheduling, api_integration, database_operations'
      }
    },
    returns: `Returns an object containing:
- task: The requested task type
- templates: Array of curated templates
  - id: Template ID
  - name: Template name
  - description: What the workflow does
  - author: Creator information
  - nodes: Array of node types used
  - views: Popularity metric
  - created: Creation date
  - url: Link to template
- totalFound: Number of templates in this category
- availableTasks: List of all task categories (if no templates found)`,
    examples: [
      'get_templates_for_task({task: "slack_integration"}) - Get Slack automation workflows',
      'get_templates_for_task({task: "ai_automation"}) - Get AI-powered workflows',
      'get_templates_for_task({task: "data_sync"}) - Get data synchronization workflows',
      'get_templates_for_task({task: "webhook_processing"}) - Get webhook handler workflows',
      'get_templates_for_task({task: "email_automation"}) - Get email automation workflows'
    ],
    useCases: [
      'Find workflows for specific business needs',
      'Discover best practices for common automations',
      'Get started quickly with pre-built solutions',
      'Learn patterns for specific integration types',
      'Browse curated collections of quality workflows'
    ],
    performance: `Excellent performance with pre-categorized templates:
- Query time: <10ms (indexed by task)
- No filtering needed (pre-curated)
- Returns 5-20 templates per category
- Total response time: <100ms`,
    bestPractices: [
      'Start with task-based search for faster results',
      'Review multiple templates to find best patterns',
      'Check template age for most current approaches',
      'Combine templates from same category for complex workflows',
      'Use returned node lists to understand requirements'
    ],
    pitfalls: [
      'Not all tasks have many templates available',
      'Task categories are predefined - no custom categories',
      'Some templates may overlap between categories',
      'Curation is subjective - browse all results',
      'Templates may need updates for latest n8n features'
    ],
    relatedTools: ['search_templates', 'list_node_templates', 'get_template', 'list_tasks']
  }
};
```

--------------------------------------------------------------------------------
/scripts/test-jmespath-validation.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env npx tsx

import { EnhancedConfigValidator } from '../src/services/enhanced-config-validator.js';

console.log('🧪 Testing JMESPath Validation\n');

const testCases = [
  {
    name: 'JMESPath with unquoted numeric literal',
    config: {
      language: 'javaScript',
      jsCode: `const data = { users: [{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }] };
const adults = $jmespath(data, 'users[?age >= 18]');
return [{json: {adults}}];`
    },
    expectError: true
  },
  {
    name: 'JMESPath with properly quoted numeric literal',
    config: {
      language: 'javaScript',
      jsCode: `const data = { users: [{ name: 'John', age: 30 }, { name: 'Jane', age: 25 }] };
const adults = $jmespath(data, 'users[?age >= \`18\`]');
return [{json: {adults}}];`
    },
    expectError: false
  },
  {
    name: 'Multiple JMESPath filters with unquoted numbers',
    config: {
      language: 'javaScript',
      jsCode: `const products = items.map(item => item.json);
const expensive = $jmespath(products, '[?price > 100]');
const lowStock = $jmespath(products, '[?quantity < 10]');
const highPriority = $jmespath(products, '[?priority == 1]');
return [{json: {expensive, lowStock, highPriority}}];`
    },
    expectError: true
  },
  {
    name: 'JMESPath with string comparison (no backticks needed)',
    config: {
      language: 'javaScript',
      jsCode: `const data = { users: [{ name: 'John', status: 'active' }, { name: 'Jane', status: 'inactive' }] };
const activeUsers = $jmespath(data, 'users[?status == "active"]');
return [{json: {activeUsers}}];`
    },
    expectError: false
  },
  {
    name: 'Python JMESPath with unquoted numeric literal',
    config: {
      language: 'python',
      pythonCode: `data = { 'users': [{ 'name': 'John', 'age': 30 }, { 'name': 'Jane', 'age': 25 }] }
adults = _jmespath(data, 'users[?age >= 18]')
return [{'json': {'adults': adults}}]`
    },
    expectError: true
  },
  {
    name: 'Complex filter with decimal numbers',
    config: {
      language: 'javaScript',
      jsCode: `const items = [{ price: 99.99 }, { price: 150.50 }, { price: 200 }];
const expensive = $jmespath(items, '[?price >= 99.95]');
return [{json: {expensive}}];`
    },
    expectError: true
  }
];

let passCount = 0;
let failCount = 0;

for (const test of testCases) {
  console.log(`Test: ${test.name}`);
  const result = EnhancedConfigValidator.validateWithMode(
    'nodes-base.code',
    test.config,
    [
      { name: 'language', type: 'options', options: ['javaScript', 'python'] },
      { name: 'jsCode', type: 'string' },
      { name: 'pythonCode', type: 'string' }
    ],
    'operation',
    'strict'
  );
  
  const hasJMESPathError = result.errors.some(e => 
    e.message.includes('JMESPath numeric literal') || 
    e.message.includes('must be wrapped in backticks')
  );
  
  const passed = hasJMESPathError === test.expectError;
  
  console.log(`  Expected error: ${test.expectError}`);
  console.log(`  Has JMESPath error: ${hasJMESPathError}`);
  console.log(`  Result: ${passed ? '✅ PASS' : '❌ FAIL'}`);
  
  if (result.errors.length > 0) {
    console.log(`  Errors: ${result.errors.map(e => e.message).join(', ')}`);
  }
  if (result.warnings.length > 0) {
    console.log(`  Warnings: ${result.warnings.slice(0, 2).map(w => w.message).join(', ')}`);
  }
  
  if (passed) passCount++;
  else failCount++;
  
  console.log();
}

console.log(`\n📊 Results: ${passCount} passed, ${failCount} failed`);
console.log(failCount === 0 ? '✅ All JMESPath validation tests passed!' : '❌ Some tests failed');
```

--------------------------------------------------------------------------------
/tests/mocks/n8n-api/data/executions.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Mock execution data for MSW handlers
 */

export interface MockExecution {
  id: string;
  workflowId: string;
  status: 'success' | 'error' | 'waiting' | 'running';
  mode: 'manual' | 'trigger' | 'webhook' | 'internal';
  startedAt: string;
  stoppedAt?: string;
  data?: any;
  error?: any;
}

export const mockExecutions: MockExecution[] = [
  {
    id: 'exec_1',
    workflowId: 'workflow_1',
    status: 'success',
    mode: 'manual',
    startedAt: '2024-01-01T10:00:00.000Z',
    stoppedAt: '2024-01-01T10:00:05.000Z',
    data: {
      resultData: {
        runData: {
          'node_2': [
            {
              startTime: 1704106800000,
              executionTime: 234,
              data: {
                main: [[{
                  json: {
                    status: 200,
                    data: { message: 'Success' }
                  }
                }]]
              }
            }
          ]
        }
      }
    }
  },
  {
    id: 'exec_2',
    workflowId: 'workflow_2',
    status: 'error',
    mode: 'webhook',
    startedAt: '2024-01-01T11:00:00.000Z',
    stoppedAt: '2024-01-01T11:00:02.000Z',
    error: {
      message: 'Could not send message to Slack',
      stack: 'Error: Could not send message to Slack\n    at SlackNode.execute',
      node: 'slack_1'
    },
    data: {
      resultData: {
        runData: {
          'webhook_1': [
            {
              startTime: 1704110400000,
              executionTime: 10,
              data: {
                main: [[{
                  json: {
                    headers: { 'content-type': 'application/json' },
                    body: { message: 'Test webhook' }
                  }
                }]]
              }
            }
          ]
        }
      }
    }
  },
  {
    id: 'exec_3',
    workflowId: 'workflow_3',
    status: 'waiting',
    mode: 'trigger',
    startedAt: '2024-01-01T12:00:00.000Z',
    data: {
      resultData: {
        runData: {}
      },
      waitingExecutions: {
        'agent_1': {
          reason: 'Waiting for user input'
        }
      }
    }
  }
];

/**
 * Factory functions for creating mock executions
 */
export const executionFactory = {
  /**
   * Create a successful execution
   */
  success: (workflowId: string, data?: any): MockExecution => ({
    id: `exec_${Date.now()}`,
    workflowId,
    status: 'success',
    mode: 'manual',
    startedAt: new Date().toISOString(),
    stoppedAt: new Date(Date.now() + 5000).toISOString(),
    data: data || {
      resultData: {
        runData: {
          'node_1': [{
            startTime: Date.now(),
            executionTime: 100,
            data: {
              main: [[{ json: { success: true } }]]
            }
          }]
        }
      }
    }
  }),

  /**
   * Create a failed execution
   */
  error: (workflowId: string, error: { message: string; node?: string }): MockExecution => ({
    id: `exec_${Date.now()}`,
    workflowId,
    status: 'error',
    mode: 'manual',
    startedAt: new Date().toISOString(),
    stoppedAt: new Date(Date.now() + 2000).toISOString(),
    error: {
      message: error.message,
      stack: `Error: ${error.message}\n    at Node.execute`,
      node: error.node
    },
    data: {
      resultData: {
        runData: {}
      }
    }
  }),

  /**
   * Create a custom execution
   */
  custom: (config: Partial<MockExecution>): MockExecution => ({
    id: `exec_${Date.now()}`,
    workflowId: 'workflow_1',
    status: 'success',
    mode: 'manual',
    startedAt: new Date().toISOString(),
    ...config
  })
};
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/index.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from './types';

// Import all tool documentations
import { searchNodesDoc, listNodesDoc, listAiToolsDoc, getDatabaseStatisticsDoc } from './discovery';
import { 
  getNodeEssentialsDoc, 
  getNodeInfoDoc, 
  getNodeDocumentationDoc,
  searchNodePropertiesDoc,
  getNodeAsToolInfoDoc,
  getPropertyDependenciesDoc
} from './configuration';
import { 
  validateNodeMinimalDoc, 
  validateNodeOperationDoc,
  validateWorkflowDoc,
  validateWorkflowConnectionsDoc,
  validateWorkflowExpressionsDoc
} from './validation';
import {
  listTasksDoc,
  listNodeTemplatesDoc,
  getTemplateDoc,
  searchTemplatesDoc,
  searchTemplatesByMetadataDoc,
  getTemplatesForTaskDoc
} from './templates';
import {
  toolsDocumentationDoc,
  n8nDiagnosticDoc,
  n8nHealthCheckDoc,
  n8nListAvailableToolsDoc
} from './system';
import {
  aiAgentsGuide
} from './guides';
import {
  n8nCreateWorkflowDoc,
  n8nGetWorkflowDoc,
  n8nGetWorkflowDetailsDoc,
  n8nGetWorkflowStructureDoc,
  n8nGetWorkflowMinimalDoc,
  n8nUpdateFullWorkflowDoc,
  n8nUpdatePartialWorkflowDoc,
  n8nDeleteWorkflowDoc,
  n8nListWorkflowsDoc,
  n8nValidateWorkflowDoc,
  n8nAutofixWorkflowDoc,
  n8nTriggerWebhookWorkflowDoc,
  n8nGetExecutionDoc,
  n8nListExecutionsDoc,
  n8nDeleteExecutionDoc
} from './workflow_management';

// Combine all tool documentations into a single object
export const toolsDocumentation: Record<string, ToolDocumentation> = {
  // System tools
  tools_documentation: toolsDocumentationDoc,
  n8n_diagnostic: n8nDiagnosticDoc,
  n8n_health_check: n8nHealthCheckDoc,
  n8n_list_available_tools: n8nListAvailableToolsDoc,

  // Guides
  ai_agents_guide: aiAgentsGuide,

  // Discovery tools
  search_nodes: searchNodesDoc,
  list_nodes: listNodesDoc,
  list_ai_tools: listAiToolsDoc,
  get_database_statistics: getDatabaseStatisticsDoc,
  
  // Configuration tools
  get_node_essentials: getNodeEssentialsDoc,
  get_node_info: getNodeInfoDoc,
  get_node_documentation: getNodeDocumentationDoc,
  search_node_properties: searchNodePropertiesDoc,
  get_node_as_tool_info: getNodeAsToolInfoDoc,
  get_property_dependencies: getPropertyDependenciesDoc,
  
  // Validation tools
  validate_node_minimal: validateNodeMinimalDoc,
  validate_node_operation: validateNodeOperationDoc,
  validate_workflow: validateWorkflowDoc,
  validate_workflow_connections: validateWorkflowConnectionsDoc,
  validate_workflow_expressions: validateWorkflowExpressionsDoc,
  
  // Template tools
  list_tasks: listTasksDoc,
  list_node_templates: listNodeTemplatesDoc,
  get_template: getTemplateDoc,
  search_templates: searchTemplatesDoc,
  search_templates_by_metadata: searchTemplatesByMetadataDoc,
  get_templates_for_task: getTemplatesForTaskDoc,
  
  // Workflow Management tools (n8n API)
  n8n_create_workflow: n8nCreateWorkflowDoc,
  n8n_get_workflow: n8nGetWorkflowDoc,
  n8n_get_workflow_details: n8nGetWorkflowDetailsDoc,
  n8n_get_workflow_structure: n8nGetWorkflowStructureDoc,
  n8n_get_workflow_minimal: n8nGetWorkflowMinimalDoc,
  n8n_update_full_workflow: n8nUpdateFullWorkflowDoc,
  n8n_update_partial_workflow: n8nUpdatePartialWorkflowDoc,
  n8n_delete_workflow: n8nDeleteWorkflowDoc,
  n8n_list_workflows: n8nListWorkflowsDoc,
  n8n_validate_workflow: n8nValidateWorkflowDoc,
  n8n_autofix_workflow: n8nAutofixWorkflowDoc,
  n8n_trigger_webhook_workflow: n8nTriggerWebhookWorkflowDoc,
  n8n_get_execution: n8nGetExecutionDoc,
  n8n_list_executions: n8nListExecutionsDoc,
  n8n_delete_execution: n8nDeleteExecutionDoc
};

// Re-export types
export type { ToolDocumentation } from './types';
```

--------------------------------------------------------------------------------
/src/utils/mcp-client.ts:
--------------------------------------------------------------------------------

```typescript
import { Client } from '@modelcontextprotocol/sdk/client/index.js';
import { StdioClientTransport } from '@modelcontextprotocol/sdk/client/stdio.js';
import { WebSocketClientTransport } from '@modelcontextprotocol/sdk/client/websocket.js';
import {
  CallToolRequest,
  ListToolsRequest,
  ListResourcesRequest,
  ReadResourceRequest,
  ListPromptsRequest,
  GetPromptRequest,
  CallToolResultSchema,
  ListToolsResultSchema,
  ListResourcesResultSchema,
  ReadResourceResultSchema,
  ListPromptsResultSchema,
  GetPromptResultSchema,
} from '@modelcontextprotocol/sdk/types.js';

export interface MCPClientConfig {
  serverUrl: string;
  authToken?: string;
  connectionType: 'http' | 'websocket' | 'stdio';
}

export class MCPClient {
  private client: Client;
  private config: MCPClientConfig;
  private connected: boolean = false;

  constructor(config: MCPClientConfig) {
    this.config = config;
    this.client = new Client(
      {
        name: 'n8n-mcp-client',
        version: '1.0.0',
      },
      {
        capabilities: {},
      }
    );
  }

  async connect(): Promise<void> {
    if (this.connected) {
      return;
    }

    let transport;
    
    switch (this.config.connectionType) {
      case 'websocket':
        const wsUrl = this.config.serverUrl.replace(/^http/, 'ws');
        transport = new WebSocketClientTransport(new URL(wsUrl));
        break;
      
      case 'stdio':
        // For stdio, the serverUrl should be the command to execute
        const [command, ...args] = this.config.serverUrl.split(' ');
        transport = new StdioClientTransport({
          command,
          args,
        });
        break;
      
      default:
        throw new Error(`HTTP transport is not yet supported for MCP clients`);
    }

    await this.client.connect(transport);
    this.connected = true;
  }

  async disconnect(): Promise<void> {
    if (this.connected) {
      await this.client.close();
      this.connected = false;
    }
  }

  async listTools(): Promise<any> {
    await this.ensureConnected();
    return await this.client.request(
      { method: 'tools/list' } as ListToolsRequest,
      ListToolsResultSchema
    );
  }

  async callTool(name: string, args: any): Promise<any> {
    await this.ensureConnected();
    return await this.client.request(
      {
        method: 'tools/call',
        params: {
          name,
          arguments: args,
        },
      } as CallToolRequest,
      CallToolResultSchema
    );
  }

  async listResources(): Promise<any> {
    await this.ensureConnected();
    return await this.client.request(
      { method: 'resources/list' } as ListResourcesRequest,
      ListResourcesResultSchema
    );
  }

  async readResource(uri: string): Promise<any> {
    await this.ensureConnected();
    return await this.client.request(
      {
        method: 'resources/read',
        params: {
          uri,
        },
      } as ReadResourceRequest,
      ReadResourceResultSchema
    );
  }

  async listPrompts(): Promise<any> {
    await this.ensureConnected();
    return await this.client.request(
      { method: 'prompts/list' } as ListPromptsRequest,
      ListPromptsResultSchema
    );
  }

  async getPrompt(name: string, args?: any): Promise<any> {
    await this.ensureConnected();
    return await this.client.request(
      {
        method: 'prompts/get',
        params: {
          name,
          arguments: args,
        },
      } as GetPromptRequest,
      GetPromptResultSchema
    );
  }

  private async ensureConnected(): Promise<void> {
    if (!this.connected) {
      await this.connect();
    }
  }
}
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/workflow_management/n8n-validate-workflow.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const n8nValidateWorkflowDoc: ToolDocumentation = {
  name: 'n8n_validate_workflow',
  category: 'workflow_management',
  essentials: {
    description: 'Validate workflow from n8n instance by ID - checks nodes, connections, expressions, and returns errors/warnings',
    keyParameters: ['id'],
    example: 'n8n_validate_workflow({id: "wf_abc123"})',
    performance: 'Network-dependent (100-500ms) - fetches and validates workflow',
    tips: [
      'Use options.profile to control validation strictness (minimal/runtime/ai-friendly/strict)',
      'Validation includes node configs, connections, and n8n expression syntax',
      'Returns categorized errors, warnings, and actionable fix suggestions'
    ]
  },
  full: {
    description: `Validates a workflow stored in your n8n instance by fetching it via API and running comprehensive validation checks. This tool:

- Fetches the workflow from n8n using the workflow ID
- Validates all node configurations based on their schemas
- Checks workflow connections and data flow
- Validates n8n expression syntax in all fields
- Returns categorized issues with fix suggestions

The validation uses the same engine as validate_workflow but works with workflows already in n8n, making it perfect for validating existing workflows before execution.

Requires N8N_API_URL and N8N_API_KEY environment variables to be configured.`,
    parameters: {
      id: {
        type: 'string',
        required: true,
        description: 'The workflow ID to validate from your n8n instance'
      },
      options: {
        type: 'object',
        required: false,
        description: 'Validation options: {validateNodes: bool (default true), validateConnections: bool (default true), validateExpressions: bool (default true), profile: "minimal"|"runtime"|"ai-friendly"|"strict" (default "runtime")}'
      }
    },
    returns: 'ValidationResult object containing isValid boolean, arrays of errors/warnings, and suggestions for fixes',
    examples: [
      'n8n_validate_workflow({id: "wf_abc123"}) - Validate with default settings',
      'n8n_validate_workflow({id: "wf_abc123", options: {profile: "strict"}}) - Strict validation',
      'n8n_validate_workflow({id: "wf_abc123", options: {validateExpressions: false}}) - Skip expression validation'
    ],
    useCases: [
      'Validating workflows before running them in production',
      'Checking imported workflows for compatibility',
      'Debugging workflow execution failures',
      'Ensuring workflows follow best practices',
      'Pre-deployment validation in CI/CD pipelines'
    ],
    performance: 'Depends on workflow size and API latency. Typically 100-500ms for medium workflows.',
    bestPractices: [
      'Run validation before activating workflows in production',
      'Use "runtime" profile for pre-execution checks',
      'Use "strict" profile for code review and best practices',
      'Fix errors before warnings - errors will likely cause execution failures',
      'Pay attention to expression validation - syntax errors are common'
    ],
    pitfalls: [
      'Requires valid API credentials - check n8n_health_check first',
      'Large workflows may take longer to validate',
      'Some warnings may be intentional (e.g., optional parameters)',
      'Profile affects validation time - strict is slower but more thorough',
      'Expression validation may flag working but non-standard syntax'
    ],
    relatedTools: ['validate_workflow', 'n8n_get_workflow', 'validate_workflow_expressions', 'n8n_health_check', 'n8n_autofix_workflow']
  }
};
```

--------------------------------------------------------------------------------
/N8N_HTTP_STREAMABLE_SETUP.md:
--------------------------------------------------------------------------------

```markdown
# n8n MCP HTTP Streamable Configuration Guide

## Overview

This guide shows how to configure the n8n-nodes-mcp community node to connect to n8n-mcp using the **recommended HTTP Streamable transport**.

## Prerequisites

1. Install n8n-nodes-mcp community node:
   - Go to n8n Settings → Community Nodes
   - Install: `n8n-nodes-mcp`
   - Restart n8n if prompted

2. Ensure environment variable is set:
   ```bash
   N8N_COMMUNITY_PACKAGES_ALLOW_TOOL_USAGE=true
   ```

## Quick Start

### Step 1: Start Services

```bash
# Stop any existing containers
docker stop n8n n8n-mcp && docker rm n8n n8n-mcp

# Start with HTTP Streamable configuration
docker-compose -f docker-compose.n8n.yml up -d

# Services will be available at:
# - n8n: http://localhost:5678
# - n8n-mcp: http://localhost:3000
```

### Step 2: Create MCP Credentials in n8n

1. Open n8n at http://localhost:5678
2. Go to Credentials → Add credential
3. Search for "MCP" and select "MCP API"
4. Configure the fields as follows:
   - **Credential Name**: `n8n MCP Server`
   - **HTTP Stream URL**: `
   - **Messages Post Endpoint**: (leave empty)
   - **Additional Headers**: 
     ```json
     {
       "Authorization": "Bearer test-secure-token-123456789"
     }
     ```
5. Save the credential

### Step 3: Configure MCP Client Node

Add an MCP Client node to your workflow with these settings:

- **Connection Type**: `HTTP Streamable`
- **HTTP Streamable URL**: `http://n8n-mcp:3000/mcp`
- **Authentication**: `Bearer Auth`
- **Credentials**: Select the credential you created
- **Operation**: Choose your operation (e.g., "List Tools", "Call Tool")

### Step 4: Test the Connection

1. Execute the workflow
2. The MCP Client should successfully connect and return results

## Available Operations

### List Tools
Shows all available MCP tools:
- `tools_documentation`
- `list_nodes`
- `get_node_info`
- `search_nodes`
- `get_node_essentials`
- `validate_node_config`
- And many more...

### Call Tool
Execute specific tools with arguments:

**Example: Get Node Info**
- Tool Name: `get_node_info`
- Arguments: `{ "nodeType": "n8n-nodes-base.httpRequest" }`

**Example: Search Nodes**
- Tool Name: `search_nodes`
- Arguments: `{ "query": "webhook", "limit": 5 }`

## Import Example Workflow

Import the pre-configured workflow:
1. Go to Workflows → Add workflow → Import from File
2. Select: `examples/n8n-mcp-streamable-workflow.json`
3. Update the credentials with your bearer token

## Troubleshooting

### Connection Refused
- Verify services are running: `docker ps`
- Check logs: `docker logs n8n-mcp`
- Ensure you're using `http://n8n-mcp:3000/mcp` (container name) not `localhost`

### Authentication Failed
- Verify bearer token matches exactly
- Check CORS settings allow n8n origin

### Test Endpoint Manually
```bash
# Test health check
curl http://localhost:3000/health

# Test MCP endpoint (should return error without proper JSON-RPC body)
curl -X POST http://localhost:3000/mcp \
  -H "Authorization: Bearer test-secure-token-123456789" \
  -H "Content-Type: application/json"
```

## Architecture Notes

- **Transport**: HTTP Streamable (StreamableHTTPServerTransport)
- **Protocol**: JSON-RPC 2.0 over HTTP POST
- **Authentication**: Bearer token in Authorization header
- **Endpoint**: Single `/mcp` endpoint handles all operations
- **Stateless**: Each request creates a new MCP server instance

## Why HTTP Streamable?

1. **Recommended by MCP**: The official recommended transport method
2. **Better Performance**: More efficient than SSE
3. **Simpler Implementation**: Single POST endpoint
4. **Future Proof**: SSE is deprecated in MCP spec
```

--------------------------------------------------------------------------------
/src/scripts/sanitize-templates.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node
import { createDatabaseAdapter } from '../database/database-adapter';
import { logger } from '../utils/logger';
import { TemplateSanitizer } from '../utils/template-sanitizer';
import { gunzipSync, gzipSync } from 'zlib';

async function sanitizeTemplates() {
  console.log('🧹 Sanitizing workflow templates in database...\n');

  const db = await createDatabaseAdapter('./data/nodes.db');
  const sanitizer = new TemplateSanitizer();

  try {
    // Get all templates - check both old and new format
    const templates = db.prepare('SELECT id, name, workflow_json, workflow_json_compressed FROM templates').all() as any[];
    console.log(`Found ${templates.length} templates to check\n`);

    let sanitizedCount = 0;
    const problematicTemplates: any[] = [];

    for (const template of templates) {
      let originalWorkflow: any = null;
      let useCompressed = false;

      // Try compressed format first (newer format)
      if (template.workflow_json_compressed) {
        try {
          const buffer = Buffer.from(template.workflow_json_compressed, 'base64');
          const decompressed = gunzipSync(buffer).toString('utf-8');
          originalWorkflow = JSON.parse(decompressed);
          useCompressed = true;
        } catch (e) {
          console.log(`⚠️ Failed to decompress template ${template.id}, trying uncompressed`);
        }
      }

      // Fall back to uncompressed format (deprecated)
      if (!originalWorkflow && template.workflow_json) {
        try {
          originalWorkflow = JSON.parse(template.workflow_json);
        } catch (e) {
          console.log(`⚠️ Skipping template ${template.id}: Invalid JSON in both formats`);
          continue;
        }
      }

      if (!originalWorkflow) {
        continue; // Skip templates without workflow data
      }

      const { sanitized: sanitizedWorkflow, wasModified } = sanitizer.sanitizeWorkflow(originalWorkflow);

      if (wasModified) {
        // Get detected tokens for reporting
        const detectedTokens = sanitizer.detectTokens(originalWorkflow);

        // Update the template with sanitized version in the same format
        if (useCompressed) {
          const compressed = gzipSync(JSON.stringify(sanitizedWorkflow)).toString('base64');
          const stmt = db.prepare('UPDATE templates SET workflow_json_compressed = ? WHERE id = ?');
          stmt.run(compressed, template.id);
        } else {
          const stmt = db.prepare('UPDATE templates SET workflow_json = ? WHERE id = ?');
          stmt.run(JSON.stringify(sanitizedWorkflow), template.id);
        }

        sanitizedCount++;
        problematicTemplates.push({
          id: template.id,
          name: template.name,
          tokens: detectedTokens
        });

        console.log(`✅ Sanitized template ${template.id}: ${template.name}`);
        detectedTokens.forEach(token => {
          console.log(`   - Found: ${token.substring(0, 20)}...`);
        });
      }
    }
    
    console.log(`\n📊 Summary:`);
    console.log(`   Total templates: ${templates.length}`);
    console.log(`   Sanitized: ${sanitizedCount}`);
    
    if (problematicTemplates.length > 0) {
      console.log(`\n⚠️  Templates that contained API tokens:`);
      problematicTemplates.forEach(t => {
        console.log(`   - ${t.id}: ${t.name}`);
      });
    }
    
    console.log('\n✨ Sanitization complete!');
  } catch (error) {
    console.error('❌ Error sanitizing templates:', error);
    process.exit(1);
  } finally {
    db.close();
  }
}

// Run if called directly
if (require.main === module) {
  sanitizeTemplates().catch(console.error);
}
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/configuration/get-property-dependencies.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const getPropertyDependenciesDoc: ToolDocumentation = {
  name: 'get_property_dependencies',
  category: 'configuration',
  essentials: {
    description: 'Shows property dependencies and visibility rules - which fields appear when.',
    keyParameters: ['nodeType', 'config?'],
    example: 'get_property_dependencies({nodeType: "nodes-base.httpRequest"})',
    performance: 'Fast - analyzes property conditions',
    tips: [
      'Shows which properties depend on other property values',
      'Test visibility impact with optional config parameter',
      'Helps understand complex conditional property displays'
    ]
  },
  full: {
    description: `Analyzes property dependencies and visibility conditions for a node. Shows which properties control the visibility of other properties (e.g., sendBody=true reveals body-related fields). Optionally test how a specific configuration affects property visibility.`,
    parameters: {
      nodeType: {
        type: 'string',
        required: true,
        description: 'The node type to analyze (e.g., "nodes-base.httpRequest")',
        examples: [
          'nodes-base.httpRequest',
          'nodes-base.slack',
          'nodes-base.if',
          'nodes-base.switch'
        ]
      },
      config: {
        type: 'object',
        required: false,
        description: 'Optional partial configuration to check visibility impact',
        examples: [
          '{ method: "POST", sendBody: true }',
          '{ operation: "create", resource: "contact" }',
          '{ mode: "rules" }'
        ]
      }
    },
    returns: `Object containing:
- nodeType: The analyzed node type
- displayName: Human-readable node name
- controllingProperties: Properties that control visibility of others
- dependentProperties: Properties whose visibility depends on others
- complexDependencies: Multi-condition dependencies
- currentConfig: If config provided, shows:
  - providedValues: The configuration you passed
  - visibilityImpact: Which properties are visible/hidden`,
    examples: [
      'get_property_dependencies({nodeType: "nodes-base.httpRequest"}) - Analyze HTTP Request dependencies',
      'get_property_dependencies({nodeType: "nodes-base.httpRequest", config: {sendBody: true}}) - Test visibility with sendBody enabled',
      'get_property_dependencies({nodeType: "nodes-base.if", config: {mode: "rules"}}) - Check If node in rules mode'
    ],
    useCases: [
      'Understanding which properties control others',
      'Debugging why certain fields are not visible',
      'Building dynamic UIs that match n8n behavior',
      'Testing configurations before applying them',
      'Understanding complex node property relationships'
    ],
    performance: 'Fast - analyzes property metadata without database queries',
    bestPractices: [
      'Use before configuring complex nodes with many conditional fields',
      'Test different config values to understand visibility rules',
      'Check dependencies when properties seem to be missing',
      'Use for nodes with multiple operation modes (Slack, Google Sheets)',
      'Combine with search_node_properties to find specific fields'
    ],
    pitfalls: [
      'Some properties have complex multi-condition dependencies',
      'Visibility rules can be nested (property A controls B which controls C)',
      'Not all hidden properties are due to dependencies (some are deprecated)',
      'Config parameter only tests visibility, does not validate values'
    ],
    relatedTools: ['search_node_properties', 'get_node_essentials', 'validate_node_operation']
  }
};
```

--------------------------------------------------------------------------------
/.claude/agents/code-reviewer.md:
--------------------------------------------------------------------------------

```markdown
---
name: code-reviewer
description: Use this agent when you need to review recently written or modified code for quality, security, and maintainability. This agent should be invoked immediately after code changes are made to ensure high standards are maintained. Examples:\n\n<example>\nContext: The user has created a code-review agent that should be called after writing code.\nuser: "Please write a function that validates email addresses"\nassistant: "I'll create an email validation function for you:"\n<function implementation>\nassistant: "Now let me use the code-reviewer agent to review this code for quality and security"\n<commentary>\nSince new code was just written, the code-reviewer agent should be invoked to ensure it meets quality standards.\n</commentary>\n</example>\n\n<example>\nContext: The user has modified existing code and wants it reviewed.\nuser: "I've updated the authentication logic, can you check if it's secure?"\nassistant: "I'll use the code-reviewer agent to analyze your authentication changes for security and best practices"\n<commentary>\nThe user has made changes to security-critical code, so the code-reviewer agent is the appropriate tool to ensure the modifications are secure and well-implemented.\n</commentary>\n</example>
---

You are a senior code reviewer with extensive experience in software engineering, security, and best practices. Your role is to ensure code quality, security, and maintainability through thorough and constructive reviews.

When invoked, you will:

1. **Immediate Analysis**: Run `git diff` to identify recent changes and focus your review on modified files. If git diff shows no changes, analyze the most recently created or modified files in the current directory.

2. **Comprehensive Review**: Evaluate code against these critical criteria:
   - **Readability**: Code is simple, clear, and self-documenting
   - **Naming**: Functions, variables, and classes have descriptive, meaningful names
   - **DRY Principle**: No duplicated code; common logic is properly abstracted
   - **Error Handling**: All edge cases handled; errors are caught and logged appropriately
   - **Security**: No hardcoded secrets, API keys, or sensitive data; proper authentication/authorization
   - **Input Validation**: All user inputs are validated and sanitized
   - **Testing**: Adequate test coverage for critical paths and edge cases
   - **Performance**: No obvious bottlenecks; efficient algorithms and data structures used

3. **Structured Feedback**: Organize your review into three priority levels:
   - **🚨 Critical Issues (Must Fix)**: Security vulnerabilities, bugs that will cause failures, or severe performance problems
   - **⚠️ Warnings (Should Fix)**: Code smells, missing error handling, or practices that could lead to future issues
   - **💡 Suggestions (Consider Improving)**: Opportunities for better readability, performance optimizations, or architectural improvements

4. **Actionable Recommendations**: For each issue identified:
   - Explain why it's a problem
   - Provide a specific code example showing how to fix it
   - Reference relevant best practices or documentation when applicable

5. **Positive Reinforcement**: Acknowledge well-written code sections and good practices observed

Your review style should be:
- Constructive and educational, not critical or harsh
- Specific with line numbers and code snippets
- Focused on the most impactful improvements
- Considerate of the project's context and constraints

Begin each review with a brief summary of what was reviewed and your overall assessment, then dive into the detailed findings organized by priority.

```

--------------------------------------------------------------------------------
/scripts/deploy-to-vm.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash

# Deployment script for n8n Documentation MCP Server
# Target: n8ndocumentation.aiservices.pl

set -e

echo "🚀 n8n Documentation MCP Server - VM Deployment"
echo "=============================================="

# Configuration
SERVER_USER=${SERVER_USER:-root}
SERVER_HOST=${SERVER_HOST:-n8ndocumentation.aiservices.pl}
APP_DIR="/opt/n8n-mcp"
SERVICE_NAME="n8n-docs-mcp"

# Colors
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m' # No Color

# Check if .env exists
if [ ! -f .env ]; then
    echo -e "${RED}❌ .env file not found. Please create it from .env.example${NC}"
    exit 1
fi

# Check required environment variables
source .env
if [ "$MCP_DOMAIN" != "n8ndocumentation.aiservices.pl" ]; then
    echo -e "${YELLOW}⚠️  Warning: MCP_DOMAIN is not set to n8ndocumentation.aiservices.pl${NC}"
    read -p "Continue anyway? (y/N) " -n 1 -r
    echo
    if [[ ! $REPLY =~ ^[Yy]$ ]]; then
        exit 1
    fi
fi

if [ -z "$MCP_AUTH_TOKEN" ] || [ "$MCP_AUTH_TOKEN" == "your-secure-auth-token-here" ]; then
    echo -e "${RED}❌ MCP_AUTH_TOKEN not set or using default value${NC}"
    echo "Generate a secure token with: openssl rand -hex 32"
    exit 1
fi

echo -e "${GREEN}✅ Configuration validated${NC}"

# Build the project locally
echo -e "\n${YELLOW}Building project...${NC}"
npm run build

# Create deployment package
echo -e "\n${YELLOW}Creating deployment package...${NC}"
rm -rf deploy-package
mkdir -p deploy-package

# Copy necessary files
cp -r dist deploy-package/
cp -r data deploy-package/
cp package*.json deploy-package/
cp .env deploy-package/
cp ecosystem.config.js deploy-package/ 2>/dev/null || true

# Create tarball
tar -czf deploy-package.tar.gz deploy-package

echo -e "${GREEN}✅ Deployment package created${NC}"

# Upload to server
echo -e "\n${YELLOW}Uploading to server...${NC}"
scp deploy-package.tar.gz $SERVER_USER@$SERVER_HOST:/tmp/

# Deploy on server
echo -e "\n${YELLOW}Deploying on server...${NC}"
ssh $SERVER_USER@$SERVER_HOST << 'ENDSSH'
set -e

# Create app directory
mkdir -p /opt/n8n-mcp
cd /opt/n8n-mcp

# Stop existing service if running
pm2 stop n8n-docs-mcp 2>/dev/null || true

# Extract deployment package
tar -xzf /tmp/deploy-package.tar.gz --strip-components=1
rm /tmp/deploy-package.tar.gz

# Install production dependencies
npm ci --only=production

# Create PM2 ecosystem file if not exists
if [ ! -f ecosystem.config.js ]; then
    cat > ecosystem.config.js << 'EOF'
module.exports = {
  apps: [{
    name: 'n8n-docs-mcp',
    script: './dist/index-http.js',
    instances: 1,
    autorestart: true,
    watch: false,
    max_memory_restart: '1G',
    env: {
      NODE_ENV: 'production'
    },
    error_file: './logs/error.log',
    out_file: './logs/out.log',
    log_file: './logs/combined.log',
    time: true
  }]
};
EOF
fi

# Create logs directory
mkdir -p logs

# Start with PM2
pm2 start ecosystem.config.js
pm2 save

echo "✅ Deployment complete!"
echo ""
echo "Service status:"
pm2 status n8n-docs-mcp
ENDSSH

# Clean up local files
rm -rf deploy-package deploy-package.tar.gz

echo -e "\n${GREEN}🎉 Deployment successful!${NC}"
echo -e "\nServer endpoints:"
echo -e "  Health: https://$SERVER_HOST/health"
echo -e "  Stats:  https://$SERVER_HOST/stats"
echo -e "  MCP:    https://$SERVER_HOST/mcp"
echo -e "\nClaude Desktop configuration:"
echo -e "  {
    \"mcpServers\": {
      \"n8n-nodes-remote\": {
        \"command\": \"npx\",
        \"args\": [
          \"-y\",
          \"@modelcontextprotocol/client-http\",
          \"https://$SERVER_HOST/mcp\"
        ],
        \"env\": {
          \"MCP_AUTH_TOKEN\": \"$MCP_AUTH_TOKEN\"
        }
      }
    }
  }"
```

--------------------------------------------------------------------------------
/scripts/migrate-nodes-fts.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

import * as path from 'path';
import { createDatabaseAdapter } from '../src/database/database-adapter';
import { logger } from '../src/utils/logger';

/**
 * Migrate existing database to add FTS5 support for nodes
 */
async function migrateNodesFTS() {
  logger.info('Starting nodes FTS5 migration...');
  
  const dbPath = path.join(process.cwd(), 'data', 'nodes.db');
  const db = await createDatabaseAdapter(dbPath);
  
  try {
    // Check if nodes_fts already exists
    const tableExists = db.prepare(`
      SELECT name FROM sqlite_master 
      WHERE type='table' AND name='nodes_fts'
    `).get();
    
    if (tableExists) {
      logger.info('nodes_fts table already exists, skipping migration');
      return;
    }
    
    logger.info('Creating nodes_fts virtual table...');
    
    // Create the FTS5 virtual table
    db.prepare(`
      CREATE VIRTUAL TABLE IF NOT EXISTS nodes_fts USING fts5(
        node_type,
        display_name,
        description,
        documentation,
        operations,
        content=nodes,
        content_rowid=rowid,
        tokenize='porter'
      )
    `).run();
    
    // Populate the FTS table with existing data
    logger.info('Populating nodes_fts with existing data...');
    
    const nodes = db.prepare('SELECT rowid, * FROM nodes').all() as any[];
    logger.info(`Migrating ${nodes.length} nodes to FTS index...`);
    
    const insertStmt = db.prepare(`
      INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
      VALUES (?, ?, ?, ?, ?, ?)
    `);
    
    for (const node of nodes) {
      insertStmt.run(
        node.rowid,
        node.node_type,
        node.display_name,
        node.description || '',
        node.documentation || '',
        node.operations || ''
      );
    }
    
    // Create triggers to keep FTS in sync
    logger.info('Creating synchronization triggers...');
    
    db.prepare(`
      CREATE TRIGGER IF NOT EXISTS nodes_fts_insert AFTER INSERT ON nodes
      BEGIN
        INSERT INTO nodes_fts(rowid, node_type, display_name, description, documentation, operations)
        VALUES (new.rowid, new.node_type, new.display_name, new.description, new.documentation, new.operations);
      END
    `).run();
    
    db.prepare(`
      CREATE TRIGGER IF NOT EXISTS nodes_fts_update AFTER UPDATE ON nodes
      BEGIN
        UPDATE nodes_fts 
        SET node_type = new.node_type,
            display_name = new.display_name,
            description = new.description,
            documentation = new.documentation,
            operations = new.operations
        WHERE rowid = new.rowid;
      END
    `).run();
    
    db.prepare(`
      CREATE TRIGGER IF NOT EXISTS nodes_fts_delete AFTER DELETE ON nodes
      BEGIN
        DELETE FROM nodes_fts WHERE rowid = old.rowid;
      END
    `).run();
    
    // Test the FTS search
    logger.info('Testing FTS search...');
    
    const testResults = db.prepare(`
      SELECT n.* FROM nodes n
      JOIN nodes_fts ON n.rowid = nodes_fts.rowid
      WHERE nodes_fts MATCH 'webhook'
      ORDER BY rank
      LIMIT 5
    `).all();
    
    logger.info(`FTS test search found ${testResults.length} results for 'webhook'`);
    
    // Persist if using sql.js
    if ('persist' in db) {
      logger.info('Persisting database changes...');
      (db as any).persist();
    }
    
    logger.info('✅ FTS5 migration completed successfully!');
    
  } catch (error) {
    logger.error('Migration failed:', error);
    throw error;
  } finally {
    db.close();
  }
}

// Run migration
migrateNodesFTS().catch(error => {
  logger.error('Migration error:', error);
  process.exit(1);
});
```

--------------------------------------------------------------------------------
/src/utils/example-generator.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Generates example workflows and parameters for n8n nodes
 */
export class ExampleGenerator {
  /**
   * Generate an example workflow from node definition
   */
  static generateFromNodeDefinition(nodeDefinition: any): any {
    const nodeName = nodeDefinition.displayName || 'Example Node';
    const nodeType = nodeDefinition.name || 'n8n-nodes-base.exampleNode';
    
    return {
      name: `${nodeName} Example Workflow`,
      nodes: [
        {
          parameters: this.generateExampleParameters(nodeDefinition),
          id: this.generateNodeId(),
          name: nodeName,
          type: nodeType,
          typeVersion: nodeDefinition.version || 1,
          position: [250, 300],
        },
      ],
      connections: {},
      active: false,
      settings: {},
      tags: ['example', 'generated'],
    };
  }

  /**
   * Generate example parameters based on node properties
   */
  static generateExampleParameters(nodeDefinition: any): any {
    const params: any = {};
    
    // If properties are available, generate examples based on them
    if (Array.isArray(nodeDefinition.properties)) {
      for (const prop of nodeDefinition.properties) {
        if (prop.name && prop.type) {
          params[prop.name] = this.generateExampleValue(prop);
        }
      }
    }
    
    // Add common parameters based on node type
    if (nodeDefinition.displayName?.toLowerCase().includes('trigger')) {
      params.pollTimes = {
        item: [
          {
            mode: 'everyMinute',
          },
        ],
      };
    }
    
    return params;
  }

  /**
   * Generate example value based on property definition
   */
  private static generateExampleValue(property: any): any {
    switch (property.type) {
      case 'string':
        if (property.name.toLowerCase().includes('url')) {
          return 'https://example.com';
        }
        if (property.name.toLowerCase().includes('email')) {
          return '[email protected]';
        }
        if (property.name.toLowerCase().includes('name')) {
          return 'Example Name';
        }
        return property.default || 'example-value';
        
      case 'number':
        return property.default || 10;
        
      case 'boolean':
        return property.default !== undefined ? property.default : true;
        
      case 'options':
        if (property.options && property.options.length > 0) {
          return property.options[0].value;
        }
        return property.default || '';
        
      case 'collection':
      case 'fixedCollection':
        return {};
        
      default:
        return property.default || null;
    }
  }

  /**
   * Generate a unique node ID
   */
  private static generateNodeId(): string {
    return Math.random().toString(36).substring(2, 15) + 
           Math.random().toString(36).substring(2, 15);
  }

  /**
   * Generate example based on node operations
   */
  static generateFromOperations(operations: any[]): any {
    const examples: any[] = [];
    
    if (!operations || operations.length === 0) {
      return examples;
    }
    
    // Group operations by resource
    const resourceMap = new Map<string, any[]>();
    for (const op of operations) {
      if (!resourceMap.has(op.resource)) {
        resourceMap.set(op.resource, []);
      }
      resourceMap.get(op.resource)!.push(op);
    }
    
    // Generate example for each resource
    for (const [resource, ops] of resourceMap) {
      examples.push({
        resource,
        operation: ops[0].operation,
        description: `Example: ${ops[0].description}`,
        parameters: {
          resource,
          operation: ops[0].operation,
        },
      });
    }
    
    return examples;
  }
}
```

--------------------------------------------------------------------------------
/verify-telemetry-fix.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * Verification script to test that telemetry permissions are fixed
 * Run this AFTER applying the GRANT permissions fix
 */

const { createClient } = require('@supabase/supabase-js');
const crypto = require('crypto');

const TELEMETRY_BACKEND = {
  URL: 'https://ydyufsohxdfpopqbubwk.supabase.co',
  ANON_KEY: 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6InlkeXVmc29oeGRmcG9wcWJ1YndrIiwicm9sZSI6ImFub24iLCJpYXQiOjE3NTg3OTYyMDAsImV4cCI6MjA3NDM3MjIwMH0.xESphg6h5ozaDsm4Vla3QnDJGc6Nc_cpfoqTHRynkCk'
};

async function verifyTelemetryFix() {
  console.log('🔍 VERIFYING TELEMETRY PERMISSIONS FIX');
  console.log('====================================\n');

  const supabase = createClient(TELEMETRY_BACKEND.URL, TELEMETRY_BACKEND.ANON_KEY, {
    auth: {
      persistSession: false,
      autoRefreshToken: false,
    }
  });

  const testUserId = 'verify-' + crypto.randomBytes(4).toString('hex');

  // Test 1: Event insert
  console.log('📝 Test 1: Event insert');
  try {
    const { data, error } = await supabase
      .from('telemetry_events')
      .insert([{
        user_id: testUserId,
        event: 'verification_test',
        properties: { fixed: true }
      }]);

    if (error) {
      console.error('❌ Event insert failed:', error.message);
      return false;
    } else {
      console.log('✅ Event insert successful');
    }
  } catch (e) {
    console.error('❌ Event insert exception:', e.message);
    return false;
  }

  // Test 2: Workflow insert
  console.log('📝 Test 2: Workflow insert');
  try {
    const { data, error } = await supabase
      .from('telemetry_workflows')
      .insert([{
        user_id: testUserId,
        workflow_hash: 'verify-' + crypto.randomBytes(4).toString('hex'),
        node_count: 2,
        node_types: ['n8n-nodes-base.webhook', 'n8n-nodes-base.set'],
        has_trigger: true,
        has_webhook: true,
        complexity: 'simple',
        sanitized_workflow: {
          nodes: [{
            id: 'test-node',
            type: 'n8n-nodes-base.webhook',
            position: [100, 100],
            parameters: {}
          }],
          connections: {}
        }
      }]);

    if (error) {
      console.error('❌ Workflow insert failed:', error.message);
      return false;
    } else {
      console.log('✅ Workflow insert successful');
    }
  } catch (e) {
    console.error('❌ Workflow insert exception:', e.message);
    return false;
  }

  // Test 3: Upsert operation (like real telemetry)
  console.log('📝 Test 3: Upsert operation');
  try {
    const workflowHash = 'upsert-verify-' + crypto.randomBytes(4).toString('hex');

    const { data, error } = await supabase
      .from('telemetry_workflows')
      .upsert([{
        user_id: testUserId,
        workflow_hash: workflowHash,
        node_count: 3,
        node_types: ['n8n-nodes-base.webhook', 'n8n-nodes-base.set', 'n8n-nodes-base.if'],
        has_trigger: true,
        has_webhook: true,
        complexity: 'medium',
        sanitized_workflow: {
          nodes: [],
          connections: {}
        }
      }], {
        onConflict: 'workflow_hash',
        ignoreDuplicates: true,
      });

    if (error) {
      console.error('❌ Upsert failed:', error.message);
      return false;
    } else {
      console.log('✅ Upsert successful');
    }
  } catch (e) {
    console.error('❌ Upsert exception:', e.message);
    return false;
  }

  console.log('\n🎉 All tests passed! Telemetry permissions are fixed.');
  console.log('👍 Workflow telemetry should now work in the actual application.');

  return true;
}

async function main() {
  const success = await verifyTelemetryFix();
  process.exit(success ? 0 : 1);
}

main().catch(console.error);
```

--------------------------------------------------------------------------------
/src/mcp/tool-docs/templates/list-node-templates.ts:
--------------------------------------------------------------------------------

```typescript
import { ToolDocumentation } from '../types';

export const listNodeTemplatesDoc: ToolDocumentation = {
  name: 'list_node_templates',
  category: 'templates',
  essentials: {
    description: 'Find templates using specific nodes. 399 community workflows. Use FULL types: "n8n-nodes-base.httpRequest".',
    keyParameters: ['nodeTypes', 'limit'],
    example: 'list_node_templates({nodeTypes: ["n8n-nodes-base.slack"]})',
    performance: 'Fast (<100ms) - indexed node search',
    tips: [
      'Must use FULL node type with package prefix: "n8n-nodes-base.slack"',
      'Can search for multiple nodes to find workflows using all of them',
      'Returns templates sorted by popularity (view count)'
    ]
  },
  full: {
    description: `Finds workflow templates that use specific n8n nodes. This is the best way to discover how particular nodes are used in real workflows. Search the community library of 399+ templates by specifying which nodes you want to see in action. Templates are sorted by popularity to show the most useful examples first.`,
    parameters: {
      nodeTypes: {
        type: 'array',
        required: true,
        description: 'Array of node types to search for. Must use full type names with package prefix (e.g., ["n8n-nodes-base.httpRequest", "n8n-nodes-base.openAi"])'
      },
      limit: {
        type: 'number',
        required: false,
        description: 'Maximum number of templates to return. Default 10, max 100'
      }
    },
    returns: `Returns an object containing:
- templates: Array of matching templates
  - id: Template ID for retrieval
  - name: Template name
  - description: What the workflow does
  - author: Creator details (name, username, verified)
  - nodes: Complete list of nodes used
  - views: View count (popularity metric)
  - created: Creation date
  - url: Link to template on n8n.io
- totalFound: Total number of matching templates
- tip: Usage hints if no results`,
    examples: [
      'list_node_templates({nodeTypes: ["n8n-nodes-base.slack"]}) - Find all Slack workflows',
      'list_node_templates({nodeTypes: ["n8n-nodes-base.httpRequest", "n8n-nodes-base.postgres"]}) - Find workflows using both HTTP and Postgres',
      'list_node_templates({nodeTypes: ["@n8n/n8n-nodes-langchain.openAi"], limit: 20}) - Find AI workflows with OpenAI',
      'list_node_templates({nodeTypes: ["n8n-nodes-base.webhook", "n8n-nodes-base.respondToWebhook"]}) - Find webhook examples'
    ],
    useCases: [
      'Learn how to use specific nodes through examples',
      'Find workflows combining particular integrations',
      'Discover patterns for node combinations',
      'See real-world usage of complex nodes',
      'Find templates for your exact tech stack'
    ],
    performance: `Optimized for node-based searches:
- Indexed by node type for fast lookups
- Query time: <50ms for single node
- Multiple nodes: <100ms (uses AND logic)
- Returns pre-sorted by popularity
- No full-text search needed`,
    bestPractices: [
      'Always use full node type with package prefix',
      'Search for core nodes that define the workflow purpose',
      'Start with single node searches, then refine',
      'Check node types with list_nodes if unsure of names',
      'Review multiple templates to learn different approaches'
    ],
    pitfalls: [
      'Node types must match exactly - no partial matches',
      'Package prefix required: "slack" won\'t work, use "n8n-nodes-base.slack"',
      'Some nodes have version numbers: "n8n-nodes-base.httpRequestV3"',
      'Templates may use old node versions not in current n8n',
      'AND logic means all specified nodes must be present'
    ],
    relatedTools: ['get_template', 'search_templates', 'get_templates_for_task', 'list_nodes']
  }
};
```

--------------------------------------------------------------------------------
/src/mcp-tools-engine.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * MCPEngine - A simplified interface for benchmarking MCP tool execution
 * This directly implements the MCP tool functionality without server dependencies
 */
import { NodeRepository } from './database/node-repository';
import { PropertyFilter } from './services/property-filter';
import { TaskTemplates } from './services/task-templates';
import { ConfigValidator } from './services/config-validator';
import { EnhancedConfigValidator } from './services/enhanced-config-validator';
import { WorkflowValidator, WorkflowValidationResult } from './services/workflow-validator';

export class MCPEngine {
  private workflowValidator: WorkflowValidator;

  constructor(private repository: NodeRepository) {
    this.workflowValidator = new WorkflowValidator(repository, EnhancedConfigValidator);
  }

  async listNodes(args: any = {}) {
    return this.repository.getAllNodes(args.limit);
  }

  async searchNodes(args: any) {
    return this.repository.searchNodes(args.query, args.mode || 'OR', args.limit || 20);
  }

  async getNodeInfo(args: any) {
    return this.repository.getNodeByType(args.nodeType);
  }

  async getNodeEssentials(args: any) {
    const node = await this.repository.getNodeByType(args.nodeType);
    if (!node) return null;
    
    // Filter to essentials using static method
    const essentials = PropertyFilter.getEssentials(node.properties || [], args.nodeType);
    return {
      nodeType: node.nodeType,
      displayName: node.displayName,
      description: node.description,
      category: node.category,
      required: essentials.required,
      common: essentials.common
    };
  }

  async getNodeDocumentation(args: any) {
    const node = await this.repository.getNodeByType(args.nodeType);
    return node?.documentation || null;
  }

  async validateNodeOperation(args: any) {
    // Get node properties and validate
    const node = await this.repository.getNodeByType(args.nodeType);
    if (!node) {
      return {
        valid: false,
        errors: [{ type: 'invalid_configuration', property: '', message: 'Node type not found' }],
        warnings: [],
        suggestions: [],
        visibleProperties: [],
        hiddenProperties: []
      };
    }

    // CRITICAL FIX: Extract user-provided keys before validation
    // This prevents false warnings about default values
    const userProvidedKeys = new Set(Object.keys(args.config || {}));

    return ConfigValidator.validate(args.nodeType, args.config, node.properties || [], userProvidedKeys);
  }

  async validateNodeMinimal(args: any) {
    // Get node and check minimal requirements
    const node = await this.repository.getNodeByType(args.nodeType);
    if (!node) {
      return { missingFields: [], error: 'Node type not found' };
    }
    
    const missingFields: string[] = [];
    const requiredFields = PropertyFilter.getEssentials(node.properties || [], args.nodeType).required;
    
    for (const field of requiredFields) {
      if (!args.config[field.name]) {
        missingFields.push(field.name);
      }
    }
    
    return { missingFields };
  }

  async searchNodeProperties(args: any) {
    return this.repository.searchNodeProperties(args.nodeType, args.query, args.maxResults || 20);
  }

  async listAITools(args: any) {
    return this.repository.getAIToolNodes();
  }

  async getDatabaseStatistics(args: any) {
    const count = await this.repository.getNodeCount();
    const aiTools = await this.repository.getAIToolNodes();
    return {
      totalNodes: count,
      aiToolsCount: aiTools.length,
      categories: ['trigger', 'transform', 'output', 'input']
    };
  }

  async validateWorkflow(args: any): Promise<WorkflowValidationResult> {
    return this.workflowValidator.validateWorkflow(args.workflow, args.options);
  }
}
```

--------------------------------------------------------------------------------
/docs/tools-documentation-usage.md:
--------------------------------------------------------------------------------

```markdown
# MCP Tools Documentation Usage Guide

The `tools_documentation` tool provides comprehensive documentation for all MCP tools, making it easy for LLMs to understand how to use the tools effectively.

## Basic Usage

### 1. Get Documentation for Specific Tools

```json
{
  "name": "tools_documentation",
  "arguments": {
    "tools": ["search_nodes", "get_node_essentials"]
  }
}
```

Returns detailed documentation including parameters, examples, and best practices for the specified tools.

### 2. Search Tools by Keyword

```json
{
  "name": "tools_documentation",
  "arguments": {
    "search": "validation"
  }
}
```

Finds all tools related to validation, including their descriptions and use cases.

### 3. Browse Tools by Category

```json
{
  "name": "tools_documentation",
  "arguments": {
    "category": "workflow_management"
  }
}
```

Available categories:
- **discovery**: Tools for finding and exploring nodes
- **configuration**: Tools for configuring nodes
- **validation**: Tools for validating configurations
- **workflow_management**: Tools for creating and updating workflows
- **execution**: Tools for running workflows
- **templates**: Tools for working with workflow templates

### 4. Get All Categories

```json
{
  "name": "tools_documentation",
  "arguments": {}
}
```

Returns a list of all categories and the tools in each category.

### 5. Include Quick Reference Guide

```json
{
  "name": "tools_documentation",
  "arguments": {
    "tools": ["n8n_create_workflow"],
    "includeQuickReference": true
  }
}
```

Includes a quick reference guide with workflow building process, performance tips, and common patterns.

## Response Format

The tool returns structured documentation with:

- **Parameters**: Complete parameter descriptions with types, requirements, and defaults
- **Return Format**: Example of what the tool returns
- **Common Use Cases**: Real-world scenarios where the tool is useful
- **Examples**: Working examples with input and expected output
- **Performance Notes**: Speed and efficiency considerations
- **Best Practices**: Recommended usage patterns
- **Common Pitfalls**: Mistakes to avoid
- **Related Tools**: Other tools that work well together

## Example: Learning About search_nodes

Request:
```json
{
  "name": "tools_documentation",
  "arguments": {
    "tools": ["search_nodes"]
  }
}
```

Response includes:
- How to search effectively (single words work best)
- Performance characteristics (fast, cached)
- Common searches (http, webhook, email, database, slack)
- Pitfalls to avoid (multi-word searches use OR logic)
- Related tools for next steps

## Tips for LLMs

1. **Start with categories**: Browse available tools by category to understand what's possible
2. **Search by task**: Use search to find tools for specific tasks like "validation" or "workflow"
3. **Learn tool combinations**: Check "Related Tools" to understand workflow patterns
4. **Check examples**: Every tool has working examples to copy and modify
5. **Avoid pitfalls**: Pay attention to "Common Pitfalls" to prevent errors

## Integration with Workflow Building

The documentation helps build workflows efficiently:

1. **Discovery Phase**: Use `search_nodes` and `list_nodes` documentation
2. **Configuration Phase**: Learn from `get_node_essentials` examples
3. **Validation Phase**: Understand validation tool options and profiles
4. **Creation Phase**: Follow `n8n_create_workflow` best practices
5. **Update Phase**: Master `n8n_update_partial_workflow` operations

## Performance Optimization

The documentation emphasizes performance:
- Which tools are fast (essentials) vs slow (full info)
- Optimal parameters (e.g., limit: 200+ for list_nodes)
- Caching behavior
- Token savings with partial updates

This documentation system ensures LLMs can use the MCP tools effectively without trial and error.
```

--------------------------------------------------------------------------------
/tests/test-storage-system.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * Test the node storage and search system
 */

const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor');
const { NodeStorageService } = require('../dist/services/node-storage-service');

async function testStorageSystem() {
  console.log('=== Node Storage System Test ===\n');
  
  const extractor = new NodeSourceExtractor();
  const storage = new NodeStorageService();
  
  // 1. Extract and store some nodes
  console.log('1. Extracting and storing nodes...\n');
  
  const testNodes = [
    'n8n-nodes-base.Function',
    'n8n-nodes-base.Webhook',
    'n8n-nodes-base.HttpRequest',
    '@n8n/n8n-nodes-langchain.Agent'
  ];
  
  let stored = 0;
  for (const nodeType of testNodes) {
    try {
      console.log(`  Extracting ${nodeType}...`);
      const nodeInfo = await extractor.extractNodeSource(nodeType);
      await storage.storeNode(nodeInfo);
      stored++;
      console.log(`  ✅ Stored successfully`);
    } catch (error) {
      console.log(`  ❌ Failed: ${error.message}`);
    }
  }
  
  console.log(`\n  Total stored: ${stored}/${testNodes.length}\n`);
  
  // 2. Test search functionality
  console.log('2. Testing search functionality...\n');
  
  const searchTests = [
    { query: 'function', desc: 'Search for "function"' },
    { query: 'webhook', desc: 'Search for "webhook"' },
    { packageName: 'n8n-nodes-base', desc: 'Filter by package' },
    { hasCredentials: false, desc: 'Nodes without credentials' }
  ];
  
  for (const test of searchTests) {
    console.log(`  ${test.desc}:`);
    const results = await storage.searchNodes(test);
    console.log(`    Found ${results.length} nodes`);
    if (results.length > 0) {
      console.log(`    First result: ${results[0].nodeType}`);
    }
  }
  
  // 3. Get statistics
  console.log('\n3. Storage statistics:\n');
  
  const stats = await storage.getStatistics();
  console.log(`  Total nodes: ${stats.totalNodes}`);
  console.log(`  Total packages: ${stats.totalPackages}`);
  console.log(`  Total code size: ${(stats.totalCodeSize / 1024).toFixed(2)} KB`);
  console.log(`  Average node size: ${(stats.averageNodeSize / 1024).toFixed(2)} KB`);
  console.log(`  Nodes with credentials: ${stats.nodesWithCredentials}`);
  
  console.log('\n  Package distribution:');
  stats.packageDistribution.forEach(pkg => {
    console.log(`    ${pkg.package}: ${pkg.count} nodes`);
  });
  
  // 4. Test bulk extraction
  console.log('\n4. Testing bulk extraction (first 10 nodes)...\n');
  
  const allNodes = await extractor.listAvailableNodes();
  const nodesToExtract = allNodes.slice(0, 10);
  
  const nodeInfos = [];
  for (const node of nodesToExtract) {
    try {
      const nodeType = node.packageName ? `${node.packageName}.${node.name}` : node.name;
      const nodeInfo = await extractor.extractNodeSource(nodeType);
      nodeInfos.push(nodeInfo);
    } catch (error) {
      // Skip failed extractions
    }
  }
  
  if (nodeInfos.length > 0) {
    const bulkResult = await storage.bulkStoreNodes(nodeInfos);
    console.log(`  Bulk stored: ${bulkResult.stored}`);
    console.log(`  Failed: ${bulkResult.failed}`);
  }
  
  // 5. Export for database
  console.log('\n5. Exporting for database...\n');
  
  const dbExport = await storage.exportForDatabase();
  console.log(`  Exported ${dbExport.nodes.length} nodes`);
  console.log(`  Total packages: ${dbExport.metadata.totalPackages}`);
  console.log(`  Export timestamp: ${dbExport.metadata.exportedAt}`);
  
  // Save export to file
  const fs = require('fs').promises;
  const exportFile = path.join(__dirname, 'node-storage-export.json');
  await fs.writeFile(exportFile, JSON.stringify(dbExport, null, 2));
  console.log(`  Saved to: ${exportFile}`);
  
  console.log('\n✅ Storage system test completed!');
}

const path = require('path');
testStorageSystem().catch(console.error);
```

--------------------------------------------------------------------------------
/src/utils/auth.ts:
--------------------------------------------------------------------------------

```typescript
import crypto from 'crypto';

export class AuthManager {
  private validTokens: Set<string>;
  private tokenExpiry: Map<string, number>;

  constructor() {
    this.validTokens = new Set();
    this.tokenExpiry = new Map();
  }

  /**
   * Validate an authentication token
   */
  validateToken(token: string | undefined, expectedToken?: string): boolean {
    if (!expectedToken) {
      // No authentication required
      return true;
    }

    if (!token) {
      return false;
    }

    // SECURITY: Use timing-safe comparison for static token
    // See: https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
    if (AuthManager.timingSafeCompare(token, expectedToken)) {
      return true;
    }

    // Check dynamic tokens
    if (this.validTokens.has(token)) {
      const expiry = this.tokenExpiry.get(token);
      if (expiry && expiry > Date.now()) {
        return true;
      } else {
        // Token expired
        this.validTokens.delete(token);
        this.tokenExpiry.delete(token);
        return false;
      }
    }

    return false;
  }

  /**
   * Generate a new authentication token
   */
  generateToken(expiryHours: number = 24): string {
    const token = crypto.randomBytes(32).toString('hex');
    const expiryTime = Date.now() + (expiryHours * 60 * 60 * 1000);

    this.validTokens.add(token);
    this.tokenExpiry.set(token, expiryTime);

    // Clean up expired tokens
    this.cleanupExpiredTokens();

    return token;
  }

  /**
   * Revoke a token
   */
  revokeToken(token: string): void {
    this.validTokens.delete(token);
    this.tokenExpiry.delete(token);
  }

  /**
   * Clean up expired tokens
   */
  private cleanupExpiredTokens(): void {
    const now = Date.now();
    for (const [token, expiry] of this.tokenExpiry.entries()) {
      if (expiry <= now) {
        this.validTokens.delete(token);
        this.tokenExpiry.delete(token);
      }
    }
  }

  /**
   * Hash a password or token for secure storage
   */
  static hashToken(token: string): string {
    return crypto.createHash('sha256').update(token).digest('hex');
  }

  /**
   * Compare a plain token with a hashed token
   */
  static compareTokens(plainToken: string, hashedToken: string): boolean {
    const hashedPlainToken = AuthManager.hashToken(plainToken);
    return crypto.timingSafeEqual(
      Buffer.from(hashedPlainToken),
      Buffer.from(hashedToken)
    );
  }

  /**
   * Compare two tokens using constant-time algorithm to prevent timing attacks
   *
   * @param plainToken - Token from request
   * @param expectedToken - Expected token value
   * @returns true if tokens match, false otherwise
   *
   * @security This uses crypto.timingSafeEqual to prevent timing attack vulnerabilities.
   * Never use === or !== for token comparison as it allows attackers to discover
   * tokens character-by-character through timing analysis.
   *
   * @example
   * const isValid = AuthManager.timingSafeCompare(requestToken, serverToken);
   * if (!isValid) {
   *   return res.status(401).json({ error: 'Unauthorized' });
   * }
   *
   * @see https://github.com/czlonkowski/n8n-mcp/issues/265 (CRITICAL-02)
   */
  static timingSafeCompare(plainToken: string, expectedToken: string): boolean {
    try {
      // Tokens must be non-empty
      if (!plainToken || !expectedToken) {
        return false;
      }

      // Convert to buffers
      const plainBuffer = Buffer.from(plainToken, 'utf8');
      const expectedBuffer = Buffer.from(expectedToken, 'utf8');

      // Check length first (constant time not needed for length comparison)
      if (plainBuffer.length !== expectedBuffer.length) {
        return false;
      }

      // Constant-time comparison
      return crypto.timingSafeEqual(plainBuffer, expectedBuffer);
    } catch (error) {
      // Buffer conversion or comparison failed
      return false;
    }
  }
}
```
Page 3/48FirstPrevNextLast