#
tokens: 48066/50000 18/614 files (page 10/45)
lines: off (toggle) GitHub
raw markdown copy
This is page 10 of 45. Use http://codebase.md/czlonkowski/n8n-mcp?lines=false&page={x} to view the full context.

# Directory Structure

```
├── _config.yml
├── .claude
│   └── agents
│       ├── code-reviewer.md
│       ├── context-manager.md
│       ├── debugger.md
│       ├── deployment-engineer.md
│       ├── mcp-backend-engineer.md
│       ├── n8n-mcp-tester.md
│       ├── technical-researcher.md
│       └── test-automator.md
├── .dockerignore
├── .env.docker
├── .env.example
├── .env.n8n.example
├── .env.test
├── .env.test.example
├── .github
│   ├── ABOUT.md
│   ├── BENCHMARK_THRESHOLDS.md
│   ├── FUNDING.yml
│   ├── gh-pages.yml
│   ├── secret_scanning.yml
│   └── workflows
│       ├── benchmark-pr.yml
│       ├── benchmark.yml
│       ├── docker-build-fast.yml
│       ├── docker-build-n8n.yml
│       ├── docker-build.yml
│       ├── release.yml
│       ├── test.yml
│       └── update-n8n-deps.yml
├── .gitignore
├── .npmignore
├── ATTRIBUTION.md
├── CHANGELOG.md
├── CLAUDE.md
├── codecov.yml
├── coverage.json
├── data
│   ├── .gitkeep
│   ├── nodes.db
│   ├── nodes.db-shm
│   ├── nodes.db-wal
│   └── templates.db
├── deploy
│   └── quick-deploy-n8n.sh
├── docker
│   ├── docker-entrypoint.sh
│   ├── n8n-mcp
│   ├── parse-config.js
│   └── README.md
├── docker-compose.buildkit.yml
├── docker-compose.extract.yml
├── docker-compose.n8n.yml
├── docker-compose.override.yml.example
├── docker-compose.test-n8n.yml
├── docker-compose.yml
├── Dockerfile
├── Dockerfile.railway
├── Dockerfile.test
├── docs
│   ├── AUTOMATED_RELEASES.md
│   ├── BENCHMARKS.md
│   ├── CHANGELOG.md
│   ├── CLAUDE_CODE_SETUP.md
│   ├── CLAUDE_INTERVIEW.md
│   ├── CODECOV_SETUP.md
│   ├── CODEX_SETUP.md
│   ├── CURSOR_SETUP.md
│   ├── DEPENDENCY_UPDATES.md
│   ├── DOCKER_README.md
│   ├── DOCKER_TROUBLESHOOTING.md
│   ├── FINAL_AI_VALIDATION_SPEC.md
│   ├── FLEXIBLE_INSTANCE_CONFIGURATION.md
│   ├── HTTP_DEPLOYMENT.md
│   ├── img
│   │   ├── cc_command.png
│   │   ├── cc_connected.png
│   │   ├── codex_connected.png
│   │   ├── cursor_tut.png
│   │   ├── Railway_api.png
│   │   ├── Railway_server_address.png
│   │   ├── vsc_ghcp_chat_agent_mode.png
│   │   ├── vsc_ghcp_chat_instruction_files.png
│   │   ├── vsc_ghcp_chat_thinking_tool.png
│   │   └── windsurf_tut.png
│   ├── INSTALLATION.md
│   ├── LIBRARY_USAGE.md
│   ├── local
│   │   ├── DEEP_DIVE_ANALYSIS_2025-10-02.md
│   │   ├── DEEP_DIVE_ANALYSIS_README.md
│   │   ├── Deep_dive_p1_p2.md
│   │   ├── integration-testing-plan.md
│   │   ├── integration-tests-phase1-summary.md
│   │   ├── N8N_AI_WORKFLOW_BUILDER_ANALYSIS.md
│   │   ├── P0_IMPLEMENTATION_PLAN.md
│   │   └── TEMPLATE_MINING_ANALYSIS.md
│   ├── MCP_ESSENTIALS_README.md
│   ├── MCP_QUICK_START_GUIDE.md
│   ├── N8N_DEPLOYMENT.md
│   ├── RAILWAY_DEPLOYMENT.md
│   ├── README_CLAUDE_SETUP.md
│   ├── README.md
│   ├── tools-documentation-usage.md
│   ├── VS_CODE_PROJECT_SETUP.md
│   ├── WINDSURF_SETUP.md
│   └── workflow-diff-examples.md
├── examples
│   └── enhanced-documentation-demo.js
├── fetch_log.txt
├── LICENSE
├── MEMORY_N8N_UPDATE.md
├── MEMORY_TEMPLATE_UPDATE.md
├── monitor_fetch.sh
├── N8N_HTTP_STREAMABLE_SETUP.md
├── n8n-nodes.db
├── P0-R3-TEST-PLAN.md
├── package-lock.json
├── package.json
├── package.runtime.json
├── PRIVACY.md
├── railway.json
├── README.md
├── renovate.json
├── scripts
│   ├── analyze-optimization.sh
│   ├── audit-schema-coverage.ts
│   ├── build-optimized.sh
│   ├── compare-benchmarks.js
│   ├── demo-optimization.sh
│   ├── deploy-http.sh
│   ├── deploy-to-vm.sh
│   ├── export-webhook-workflows.ts
│   ├── extract-changelog.js
│   ├── extract-from-docker.js
│   ├── extract-nodes-docker.sh
│   ├── extract-nodes-simple.sh
│   ├── format-benchmark-results.js
│   ├── generate-benchmark-stub.js
│   ├── generate-detailed-reports.js
│   ├── generate-test-summary.js
│   ├── http-bridge.js
│   ├── mcp-http-client.js
│   ├── migrate-nodes-fts.ts
│   ├── migrate-tool-docs.ts
│   ├── n8n-docs-mcp.service
│   ├── nginx-n8n-mcp.conf
│   ├── prebuild-fts5.ts
│   ├── prepare-release.js
│   ├── publish-npm-quick.sh
│   ├── publish-npm.sh
│   ├── quick-test.ts
│   ├── run-benchmarks-ci.js
│   ├── sync-runtime-version.js
│   ├── test-ai-validation-debug.ts
│   ├── test-code-node-enhancements.ts
│   ├── test-code-node-fixes.ts
│   ├── test-docker-config.sh
│   ├── test-docker-fingerprint.ts
│   ├── test-docker-optimization.sh
│   ├── test-docker.sh
│   ├── test-empty-connection-validation.ts
│   ├── test-error-message-tracking.ts
│   ├── test-error-output-validation.ts
│   ├── test-error-validation.js
│   ├── test-essentials.ts
│   ├── test-expression-code-validation.ts
│   ├── test-expression-format-validation.js
│   ├── test-fts5-search.ts
│   ├── test-fuzzy-fix.ts
│   ├── test-fuzzy-simple.ts
│   ├── test-helpers-validation.ts
│   ├── test-http-search.ts
│   ├── test-http.sh
│   ├── test-jmespath-validation.ts
│   ├── test-multi-tenant-simple.ts
│   ├── test-multi-tenant.ts
│   ├── test-n8n-integration.sh
│   ├── test-node-info.js
│   ├── test-node-type-validation.ts
│   ├── test-nodes-base-prefix.ts
│   ├── test-operation-validation.ts
│   ├── test-optimized-docker.sh
│   ├── test-release-automation.js
│   ├── test-search-improvements.ts
│   ├── test-security.ts
│   ├── test-single-session.sh
│   ├── test-sqljs-triggers.ts
│   ├── test-telemetry-debug.ts
│   ├── test-telemetry-direct.ts
│   ├── test-telemetry-env.ts
│   ├── test-telemetry-integration.ts
│   ├── test-telemetry-no-select.ts
│   ├── test-telemetry-security.ts
│   ├── test-telemetry-simple.ts
│   ├── test-typeversion-validation.ts
│   ├── test-url-configuration.ts
│   ├── test-user-id-persistence.ts
│   ├── test-webhook-validation.ts
│   ├── test-workflow-insert.ts
│   ├── test-workflow-sanitizer.ts
│   ├── test-workflow-tracking-debug.ts
│   ├── update-and-publish-prep.sh
│   ├── update-n8n-deps.js
│   ├── update-readme-version.js
│   ├── vitest-benchmark-json-reporter.js
│   └── vitest-benchmark-reporter.ts
├── SECURITY.md
├── src
│   ├── config
│   │   └── n8n-api.ts
│   ├── data
│   │   └── canonical-ai-tool-examples.json
│   ├── database
│   │   ├── database-adapter.ts
│   │   ├── migrations
│   │   │   └── add-template-node-configs.sql
│   │   ├── node-repository.ts
│   │   ├── nodes.db
│   │   ├── schema-optimized.sql
│   │   └── schema.sql
│   ├── errors
│   │   └── validation-service-error.ts
│   ├── http-server-single-session.ts
│   ├── http-server.ts
│   ├── index.ts
│   ├── loaders
│   │   └── node-loader.ts
│   ├── mappers
│   │   └── docs-mapper.ts
│   ├── mcp
│   │   ├── handlers-n8n-manager.ts
│   │   ├── handlers-workflow-diff.ts
│   │   ├── index.ts
│   │   ├── server.ts
│   │   ├── stdio-wrapper.ts
│   │   ├── tool-docs
│   │   │   ├── configuration
│   │   │   │   ├── get-node-as-tool-info.ts
│   │   │   │   ├── get-node-documentation.ts
│   │   │   │   ├── get-node-essentials.ts
│   │   │   │   ├── get-node-info.ts
│   │   │   │   ├── get-property-dependencies.ts
│   │   │   │   ├── index.ts
│   │   │   │   └── search-node-properties.ts
│   │   │   ├── discovery
│   │   │   │   ├── get-database-statistics.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-ai-tools.ts
│   │   │   │   ├── list-nodes.ts
│   │   │   │   └── search-nodes.ts
│   │   │   ├── guides
│   │   │   │   ├── ai-agents-guide.ts
│   │   │   │   └── index.ts
│   │   │   ├── index.ts
│   │   │   ├── system
│   │   │   │   ├── index.ts
│   │   │   │   ├── n8n-diagnostic.ts
│   │   │   │   ├── n8n-health-check.ts
│   │   │   │   ├── n8n-list-available-tools.ts
│   │   │   │   └── tools-documentation.ts
│   │   │   ├── templates
│   │   │   │   ├── get-template.ts
│   │   │   │   ├── get-templates-for-task.ts
│   │   │   │   ├── index.ts
│   │   │   │   ├── list-node-templates.ts
│   │   │   │   ├── list-tasks.ts
│   │   │   │   ├── search-templates-by-metadata.ts
│   │   │   │   └── search-templates.ts
│   │   │   ├── types.ts
│   │   │   ├── validation
│   │   │   │   ├── index.ts
│   │   │   │   ├── validate-node-minimal.ts
│   │   │   │   ├── validate-node-operation.ts
│   │   │   │   ├── validate-workflow-connections.ts
│   │   │   │   ├── validate-workflow-expressions.ts
│   │   │   │   └── validate-workflow.ts
│   │   │   └── workflow_management
│   │   │       ├── index.ts
│   │   │       ├── n8n-autofix-workflow.ts
│   │   │       ├── n8n-create-workflow.ts
│   │   │       ├── n8n-delete-execution.ts
│   │   │       ├── n8n-delete-workflow.ts
│   │   │       ├── n8n-get-execution.ts
│   │   │       ├── n8n-get-workflow-details.ts
│   │   │       ├── n8n-get-workflow-minimal.ts
│   │   │       ├── n8n-get-workflow-structure.ts
│   │   │       ├── n8n-get-workflow.ts
│   │   │       ├── n8n-list-executions.ts
│   │   │       ├── n8n-list-workflows.ts
│   │   │       ├── n8n-trigger-webhook-workflow.ts
│   │   │       ├── n8n-update-full-workflow.ts
│   │   │       ├── n8n-update-partial-workflow.ts
│   │   │       └── n8n-validate-workflow.ts
│   │   ├── tools-documentation.ts
│   │   ├── tools-n8n-friendly.ts
│   │   ├── tools-n8n-manager.ts
│   │   ├── tools.ts
│   │   └── workflow-examples.ts
│   ├── mcp-engine.ts
│   ├── mcp-tools-engine.ts
│   ├── n8n
│   │   ├── MCPApi.credentials.ts
│   │   └── MCPNode.node.ts
│   ├── parsers
│   │   ├── node-parser.ts
│   │   ├── property-extractor.ts
│   │   └── simple-parser.ts
│   ├── scripts
│   │   ├── debug-http-search.ts
│   │   ├── extract-from-docker.ts
│   │   ├── fetch-templates-robust.ts
│   │   ├── fetch-templates.ts
│   │   ├── rebuild-database.ts
│   │   ├── rebuild-optimized.ts
│   │   ├── rebuild.ts
│   │   ├── sanitize-templates.ts
│   │   ├── seed-canonical-ai-examples.ts
│   │   ├── test-autofix-documentation.ts
│   │   ├── test-autofix-workflow.ts
│   │   ├── test-execution-filtering.ts
│   │   ├── test-node-suggestions.ts
│   │   ├── test-protocol-negotiation.ts
│   │   ├── test-summary.ts
│   │   ├── test-webhook-autofix.ts
│   │   ├── validate.ts
│   │   └── validation-summary.ts
│   ├── services
│   │   ├── ai-node-validator.ts
│   │   ├── ai-tool-validators.ts
│   │   ├── confidence-scorer.ts
│   │   ├── config-validator.ts
│   │   ├── enhanced-config-validator.ts
│   │   ├── example-generator.ts
│   │   ├── execution-processor.ts
│   │   ├── expression-format-validator.ts
│   │   ├── expression-validator.ts
│   │   ├── n8n-api-client.ts
│   │   ├── n8n-validation.ts
│   │   ├── node-documentation-service.ts
│   │   ├── node-similarity-service.ts
│   │   ├── node-specific-validators.ts
│   │   ├── operation-similarity-service.ts
│   │   ├── property-dependencies.ts
│   │   ├── property-filter.ts
│   │   ├── resource-similarity-service.ts
│   │   ├── sqlite-storage-service.ts
│   │   ├── task-templates.ts
│   │   ├── universal-expression-validator.ts
│   │   ├── workflow-auto-fixer.ts
│   │   ├── workflow-diff-engine.ts
│   │   └── workflow-validator.ts
│   ├── telemetry
│   │   ├── batch-processor.ts
│   │   ├── config-manager.ts
│   │   ├── early-error-logger.ts
│   │   ├── error-sanitization-utils.ts
│   │   ├── error-sanitizer.ts
│   │   ├── event-tracker.ts
│   │   ├── event-validator.ts
│   │   ├── index.ts
│   │   ├── performance-monitor.ts
│   │   ├── rate-limiter.ts
│   │   ├── startup-checkpoints.ts
│   │   ├── telemetry-error.ts
│   │   ├── telemetry-manager.ts
│   │   ├── telemetry-types.ts
│   │   └── workflow-sanitizer.ts
│   ├── templates
│   │   ├── batch-processor.ts
│   │   ├── metadata-generator.ts
│   │   ├── README.md
│   │   ├── template-fetcher.ts
│   │   ├── template-repository.ts
│   │   └── template-service.ts
│   ├── types
│   │   ├── index.ts
│   │   ├── instance-context.ts
│   │   ├── n8n-api.ts
│   │   ├── node-types.ts
│   │   └── workflow-diff.ts
│   └── utils
│       ├── auth.ts
│       ├── bridge.ts
│       ├── cache-utils.ts
│       ├── console-manager.ts
│       ├── documentation-fetcher.ts
│       ├── enhanced-documentation-fetcher.ts
│       ├── error-handler.ts
│       ├── example-generator.ts
│       ├── fixed-collection-validator.ts
│       ├── logger.ts
│       ├── mcp-client.ts
│       ├── n8n-errors.ts
│       ├── node-source-extractor.ts
│       ├── node-type-normalizer.ts
│       ├── node-type-utils.ts
│       ├── node-utils.ts
│       ├── npm-version-checker.ts
│       ├── protocol-version.ts
│       ├── simple-cache.ts
│       ├── ssrf-protection.ts
│       ├── template-node-resolver.ts
│       ├── template-sanitizer.ts
│       ├── url-detector.ts
│       ├── validation-schemas.ts
│       └── version.ts
├── test-output.txt
├── test-reinit-fix.sh
├── tests
│   ├── __snapshots__
│   │   └── .gitkeep
│   ├── auth.test.ts
│   ├── benchmarks
│   │   ├── database-queries.bench.ts
│   │   ├── index.ts
│   │   ├── mcp-tools.bench.ts
│   │   ├── mcp-tools.bench.ts.disabled
│   │   ├── mcp-tools.bench.ts.skip
│   │   ├── node-loading.bench.ts.disabled
│   │   ├── README.md
│   │   ├── search-operations.bench.ts.disabled
│   │   └── validation-performance.bench.ts.disabled
│   ├── bridge.test.ts
│   ├── comprehensive-extraction-test.js
│   ├── data
│   │   └── .gitkeep
│   ├── debug-slack-doc.js
│   ├── demo-enhanced-documentation.js
│   ├── docker-tests-README.md
│   ├── error-handler.test.ts
│   ├── examples
│   │   └── using-database-utils.test.ts
│   ├── extracted-nodes-db
│   │   ├── database-import.json
│   │   ├── extraction-report.json
│   │   ├── insert-nodes.sql
│   │   ├── n8n-nodes-base__Airtable.json
│   │   ├── n8n-nodes-base__Discord.json
│   │   ├── n8n-nodes-base__Function.json
│   │   ├── n8n-nodes-base__HttpRequest.json
│   │   ├── n8n-nodes-base__If.json
│   │   ├── n8n-nodes-base__Slack.json
│   │   ├── n8n-nodes-base__SplitInBatches.json
│   │   └── n8n-nodes-base__Webhook.json
│   ├── factories
│   │   ├── node-factory.ts
│   │   └── property-definition-factory.ts
│   ├── fixtures
│   │   ├── .gitkeep
│   │   ├── database
│   │   │   └── test-nodes.json
│   │   ├── factories
│   │   │   ├── node.factory.ts
│   │   │   └── parser-node.factory.ts
│   │   └── template-configs.ts
│   ├── helpers
│   │   └── env-helpers.ts
│   ├── http-server-auth.test.ts
│   ├── integration
│   │   ├── ai-validation
│   │   │   ├── ai-agent-validation.test.ts
│   │   │   ├── ai-tool-validation.test.ts
│   │   │   ├── chat-trigger-validation.test.ts
│   │   │   ├── e2e-validation.test.ts
│   │   │   ├── helpers.ts
│   │   │   ├── llm-chain-validation.test.ts
│   │   │   ├── README.md
│   │   │   └── TEST_REPORT.md
│   │   ├── ci
│   │   │   └── database-population.test.ts
│   │   ├── database
│   │   │   ├── connection-management.test.ts
│   │   │   ├── empty-database.test.ts
│   │   │   ├── fts5-search.test.ts
│   │   │   ├── node-fts5-search.test.ts
│   │   │   ├── node-repository.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── template-node-configs.test.ts
│   │   │   ├── template-repository.test.ts
│   │   │   ├── test-utils.ts
│   │   │   └── transactions.test.ts
│   │   ├── database-integration.test.ts
│   │   ├── docker
│   │   │   ├── docker-config.test.ts
│   │   │   ├── docker-entrypoint.test.ts
│   │   │   └── test-helpers.ts
│   │   ├── flexible-instance-config.test.ts
│   │   ├── mcp
│   │   │   └── template-examples-e2e.test.ts
│   │   ├── mcp-protocol
│   │   │   ├── basic-connection.test.ts
│   │   │   ├── error-handling.test.ts
│   │   │   ├── performance.test.ts
│   │   │   ├── protocol-compliance.test.ts
│   │   │   ├── README.md
│   │   │   ├── session-management.test.ts
│   │   │   ├── test-helpers.ts
│   │   │   ├── tool-invocation.test.ts
│   │   │   └── workflow-error-validation.test.ts
│   │   ├── msw-setup.test.ts
│   │   ├── n8n-api
│   │   │   ├── executions
│   │   │   │   ├── delete-execution.test.ts
│   │   │   │   ├── get-execution.test.ts
│   │   │   │   ├── list-executions.test.ts
│   │   │   │   └── trigger-webhook.test.ts
│   │   │   ├── scripts
│   │   │   │   └── cleanup-orphans.ts
│   │   │   ├── system
│   │   │   │   ├── diagnostic.test.ts
│   │   │   │   ├── health-check.test.ts
│   │   │   │   └── list-tools.test.ts
│   │   │   ├── test-connection.ts
│   │   │   ├── types
│   │   │   │   └── mcp-responses.ts
│   │   │   ├── utils
│   │   │   │   ├── cleanup-helpers.ts
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── factories.ts
│   │   │   │   ├── fixtures.ts
│   │   │   │   ├── mcp-context.ts
│   │   │   │   ├── n8n-client.ts
│   │   │   │   ├── node-repository.ts
│   │   │   │   ├── response-types.ts
│   │   │   │   ├── test-context.ts
│   │   │   │   └── webhook-workflows.ts
│   │   │   └── workflows
│   │   │       ├── autofix-workflow.test.ts
│   │   │       ├── create-workflow.test.ts
│   │   │       ├── delete-workflow.test.ts
│   │   │       ├── get-workflow-details.test.ts
│   │   │       ├── get-workflow-minimal.test.ts
│   │   │       ├── get-workflow-structure.test.ts
│   │   │       ├── get-workflow.test.ts
│   │   │       ├── list-workflows.test.ts
│   │   │       ├── smart-parameters.test.ts
│   │   │       ├── update-partial-workflow.test.ts
│   │   │       ├── update-workflow.test.ts
│   │   │       └── validate-workflow.test.ts
│   │   ├── security
│   │   │   ├── command-injection-prevention.test.ts
│   │   │   └── rate-limiting.test.ts
│   │   ├── setup
│   │   │   ├── integration-setup.ts
│   │   │   └── msw-test-server.ts
│   │   ├── telemetry
│   │   │   ├── docker-user-id-stability.test.ts
│   │   │   └── mcp-telemetry.test.ts
│   │   ├── templates
│   │   │   └── metadata-operations.test.ts
│   │   └── workflow-creation-node-type-format.test.ts
│   ├── logger.test.ts
│   ├── MOCKING_STRATEGY.md
│   ├── mocks
│   │   ├── n8n-api
│   │   │   ├── data
│   │   │   │   ├── credentials.ts
│   │   │   │   ├── executions.ts
│   │   │   │   └── workflows.ts
│   │   │   ├── handlers.ts
│   │   │   └── index.ts
│   │   └── README.md
│   ├── node-storage-export.json
│   ├── setup
│   │   ├── global-setup.ts
│   │   ├── msw-setup.ts
│   │   ├── TEST_ENV_DOCUMENTATION.md
│   │   └── test-env.ts
│   ├── test-database-extraction.js
│   ├── test-direct-extraction.js
│   ├── test-enhanced-documentation.js
│   ├── test-enhanced-integration.js
│   ├── test-mcp-extraction.js
│   ├── test-mcp-server-extraction.js
│   ├── test-mcp-tools-integration.js
│   ├── test-node-documentation-service.js
│   ├── test-node-list.js
│   ├── test-package-info.js
│   ├── test-parsing-operations.js
│   ├── test-slack-node-complete.js
│   ├── test-small-rebuild.js
│   ├── test-sqlite-search.js
│   ├── test-storage-system.js
│   ├── unit
│   │   ├── __mocks__
│   │   │   ├── n8n-nodes-base.test.ts
│   │   │   ├── n8n-nodes-base.ts
│   │   │   └── README.md
│   │   ├── database
│   │   │   ├── __mocks__
│   │   │   │   └── better-sqlite3.ts
│   │   │   ├── database-adapter-unit.test.ts
│   │   │   ├── node-repository-core.test.ts
│   │   │   ├── node-repository-operations.test.ts
│   │   │   ├── node-repository-outputs.test.ts
│   │   │   ├── README.md
│   │   │   └── template-repository-core.test.ts
│   │   ├── docker
│   │   │   ├── config-security.test.ts
│   │   │   ├── edge-cases.test.ts
│   │   │   ├── parse-config.test.ts
│   │   │   └── serve-command.test.ts
│   │   ├── errors
│   │   │   └── validation-service-error.test.ts
│   │   ├── examples
│   │   │   └── using-n8n-nodes-base-mock.test.ts
│   │   ├── flexible-instance-security-advanced.test.ts
│   │   ├── flexible-instance-security.test.ts
│   │   ├── http-server
│   │   │   └── multi-tenant-support.test.ts
│   │   ├── http-server-n8n-mode.test.ts
│   │   ├── http-server-n8n-reinit.test.ts
│   │   ├── http-server-session-management.test.ts
│   │   ├── loaders
│   │   │   └── node-loader.test.ts
│   │   ├── mappers
│   │   │   └── docs-mapper.test.ts
│   │   ├── mcp
│   │   │   ├── get-node-essentials-examples.test.ts
│   │   │   ├── handlers-n8n-manager-simple.test.ts
│   │   │   ├── handlers-n8n-manager.test.ts
│   │   │   ├── handlers-workflow-diff.test.ts
│   │   │   ├── lru-cache-behavior.test.ts
│   │   │   ├── multi-tenant-tool-listing.test.ts.disabled
│   │   │   ├── parameter-validation.test.ts
│   │   │   ├── search-nodes-examples.test.ts
│   │   │   ├── tools-documentation.test.ts
│   │   │   └── tools.test.ts
│   │   ├── monitoring
│   │   │   └── cache-metrics.test.ts
│   │   ├── MULTI_TENANT_TEST_COVERAGE.md
│   │   ├── multi-tenant-integration.test.ts
│   │   ├── parsers
│   │   │   ├── node-parser-outputs.test.ts
│   │   │   ├── node-parser.test.ts
│   │   │   ├── property-extractor.test.ts
│   │   │   └── simple-parser.test.ts
│   │   ├── scripts
│   │   │   └── fetch-templates-extraction.test.ts
│   │   ├── services
│   │   │   ├── ai-node-validator.test.ts
│   │   │   ├── ai-tool-validators.test.ts
│   │   │   ├── confidence-scorer.test.ts
│   │   │   ├── config-validator-basic.test.ts
│   │   │   ├── config-validator-edge-cases.test.ts
│   │   │   ├── config-validator-node-specific.test.ts
│   │   │   ├── config-validator-security.test.ts
│   │   │   ├── debug-validator.test.ts
│   │   │   ├── enhanced-config-validator-integration.test.ts
│   │   │   ├── enhanced-config-validator-operations.test.ts
│   │   │   ├── enhanced-config-validator.test.ts
│   │   │   ├── example-generator.test.ts
│   │   │   ├── execution-processor.test.ts
│   │   │   ├── expression-format-validator.test.ts
│   │   │   ├── expression-validator-edge-cases.test.ts
│   │   │   ├── expression-validator.test.ts
│   │   │   ├── fixed-collection-validation.test.ts
│   │   │   ├── loop-output-edge-cases.test.ts
│   │   │   ├── n8n-api-client.test.ts
│   │   │   ├── n8n-validation.test.ts
│   │   │   ├── node-similarity-service.test.ts
│   │   │   ├── node-specific-validators.test.ts
│   │   │   ├── operation-similarity-service-comprehensive.test.ts
│   │   │   ├── operation-similarity-service.test.ts
│   │   │   ├── property-dependencies.test.ts
│   │   │   ├── property-filter-edge-cases.test.ts
│   │   │   ├── property-filter.test.ts
│   │   │   ├── resource-similarity-service-comprehensive.test.ts
│   │   │   ├── resource-similarity-service.test.ts
│   │   │   ├── task-templates.test.ts
│   │   │   ├── template-service.test.ts
│   │   │   ├── universal-expression-validator.test.ts
│   │   │   ├── validation-fixes.test.ts
│   │   │   ├── workflow-auto-fixer.test.ts
│   │   │   ├── workflow-diff-engine.test.ts
│   │   │   ├── workflow-fixed-collection-validation.test.ts
│   │   │   ├── workflow-validator-comprehensive.test.ts
│   │   │   ├── workflow-validator-edge-cases.test.ts
│   │   │   ├── workflow-validator-error-outputs.test.ts
│   │   │   ├── workflow-validator-expression-format.test.ts
│   │   │   ├── workflow-validator-loops-simple.test.ts
│   │   │   ├── workflow-validator-loops.test.ts
│   │   │   ├── workflow-validator-mocks.test.ts
│   │   │   ├── workflow-validator-performance.test.ts
│   │   │   ├── workflow-validator-with-mocks.test.ts
│   │   │   └── workflow-validator.test.ts
│   │   ├── telemetry
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── config-manager.test.ts
│   │   │   ├── event-tracker.test.ts
│   │   │   ├── event-validator.test.ts
│   │   │   ├── rate-limiter.test.ts
│   │   │   ├── telemetry-error.test.ts
│   │   │   ├── telemetry-manager.test.ts
│   │   │   ├── v2.18.3-fixes-verification.test.ts
│   │   │   └── workflow-sanitizer.test.ts
│   │   ├── templates
│   │   │   ├── batch-processor.test.ts
│   │   │   ├── metadata-generator.test.ts
│   │   │   ├── template-repository-metadata.test.ts
│   │   │   └── template-repository-security.test.ts
│   │   ├── test-env-example.test.ts
│   │   ├── test-infrastructure.test.ts
│   │   ├── types
│   │   │   ├── instance-context-coverage.test.ts
│   │   │   └── instance-context-multi-tenant.test.ts
│   │   ├── utils
│   │   │   ├── auth-timing-safe.test.ts
│   │   │   ├── cache-utils.test.ts
│   │   │   ├── console-manager.test.ts
│   │   │   ├── database-utils.test.ts
│   │   │   ├── fixed-collection-validator.test.ts
│   │   │   ├── n8n-errors.test.ts
│   │   │   ├── node-type-normalizer.test.ts
│   │   │   ├── node-type-utils.test.ts
│   │   │   ├── node-utils.test.ts
│   │   │   ├── simple-cache-memory-leak-fix.test.ts
│   │   │   ├── ssrf-protection.test.ts
│   │   │   └── template-node-resolver.test.ts
│   │   └── validation-fixes.test.ts
│   └── utils
│       ├── assertions.ts
│       ├── builders
│       │   └── workflow.builder.ts
│       ├── data-generators.ts
│       ├── database-utils.ts
│       ├── README.md
│       └── test-helpers.ts
├── thumbnail.png
├── tsconfig.build.json
├── tsconfig.json
├── types
│   ├── mcp.d.ts
│   └── test-env.d.ts
├── verify-telemetry-fix.js
├── versioned-nodes.md
├── vitest.config.benchmark.ts
├── vitest.config.integration.ts
└── vitest.config.ts
```

# Files

--------------------------------------------------------------------------------
/tests/integration/n8n-api/utils/cleanup-helpers.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Cleanup Helpers for Integration Tests
 *
 * Provides multi-level cleanup strategies for test resources:
 * - Orphaned workflows (from failed test runs)
 * - Old executions (older than 24 hours)
 * - Bulk cleanup by tag or name prefix
 */

import { getTestN8nClient } from './n8n-client';
import { getN8nCredentials } from './credentials';
import { Logger } from '../../../../src/utils/logger';

const logger = new Logger({ prefix: '[Cleanup]' });

/**
 * Clean up orphaned test workflows
 *
 * Finds and deletes all workflows tagged with the test tag or
 * prefixed with the test name prefix. Run this periodically in CI
 * to clean up failed test runs.
 *
 * @returns Array of deleted workflow IDs
 */
export async function cleanupOrphanedWorkflows(): Promise<string[]> {
  const creds = getN8nCredentials();
  const client = getTestN8nClient();
  const deleted: string[] = [];

  logger.info('Searching for orphaned test workflows...');

  let allWorkflows: any[] = [];
  let cursor: string | undefined;
  let pageCount = 0;
  const MAX_PAGES = 1000; // Safety limit to prevent infinite loops

  // Fetch all workflows with pagination
  try {
    do {
      pageCount++;

      if (pageCount > MAX_PAGES) {
        logger.error(`Exceeded maximum pages (${MAX_PAGES}). Possible infinite loop or API issue.`);
        throw new Error('Pagination safety limit exceeded while fetching workflows');
      }

      logger.debug(`Fetching workflows page ${pageCount}...`);

      const response = await client.listWorkflows({
        cursor,
        limit: 100,
        excludePinnedData: true
      });

      allWorkflows.push(...response.data);
      cursor = response.nextCursor || undefined;
    } while (cursor);

    logger.info(`Found ${allWorkflows.length} total workflows across ${pageCount} page(s)`);
  } catch (error) {
    logger.error('Failed to fetch workflows:', error);
    throw error;
  }

  // Pre-activated webhook workflow that should NOT be deleted
  // This is needed for webhook trigger integration tests
  // Note: Single webhook accepts all HTTP methods (GET, POST, PUT, DELETE)
  const preservedWorkflowNames = new Set([
    '[MCP-TEST] Webhook All Methods'
  ]);

  // Find test workflows but exclude pre-activated webhook workflows
  const testWorkflows = allWorkflows.filter(w => {
    const isTestWorkflow = w.tags?.includes(creds.cleanup.tag) || w.name?.startsWith(creds.cleanup.namePrefix);
    const isPreserved = preservedWorkflowNames.has(w.name);

    return isTestWorkflow && !isPreserved;
  });

  logger.info(`Found ${testWorkflows.length} orphaned test workflow(s) (excluding ${preservedWorkflowNames.size} preserved webhook workflow)`);

  if (testWorkflows.length === 0) {
    return deleted;
  }

  // Delete them
  for (const workflow of testWorkflows) {
    try {
      await client.deleteWorkflow(workflow.id);
      deleted.push(workflow.id);
      logger.debug(`Deleted orphaned workflow: ${workflow.name} (${workflow.id})`);
    } catch (error) {
      logger.warn(`Failed to delete workflow ${workflow.id}:`, error);
    }
  }

  logger.info(`Successfully deleted ${deleted.length} orphaned workflow(s)`);
  return deleted;
}

/**
 * Clean up old executions
 *
 * Deletes executions older than the specified age.
 *
 * @param maxAgeMs - Maximum age in milliseconds (default: 24 hours)
 * @returns Array of deleted execution IDs
 */
export async function cleanupOldExecutions(
  maxAgeMs: number = 24 * 60 * 60 * 1000
): Promise<string[]> {
  const client = getTestN8nClient();
  const deleted: string[] = [];

  logger.info(`Searching for executions older than ${maxAgeMs}ms...`);

  let allExecutions: any[] = [];
  let cursor: string | undefined;
  let pageCount = 0;
  const MAX_PAGES = 1000; // Safety limit to prevent infinite loops

  // Fetch all executions
  try {
    do {
      pageCount++;

      if (pageCount > MAX_PAGES) {
        logger.error(`Exceeded maximum pages (${MAX_PAGES}). Possible infinite loop or API issue.`);
        throw new Error('Pagination safety limit exceeded while fetching executions');
      }

      logger.debug(`Fetching executions page ${pageCount}...`);

      const response = await client.listExecutions({
        cursor,
        limit: 100,
        includeData: false
      });

      allExecutions.push(...response.data);
      cursor = response.nextCursor || undefined;
    } while (cursor);

    logger.info(`Found ${allExecutions.length} total executions across ${pageCount} page(s)`);
  } catch (error) {
    logger.error('Failed to fetch executions:', error);
    throw error;
  }

  const cutoffTime = Date.now() - maxAgeMs;
  const oldExecutions = allExecutions.filter(e => {
    const executionTime = new Date(e.startedAt).getTime();
    return executionTime < cutoffTime;
  });

  logger.info(`Found ${oldExecutions.length} old execution(s)`);

  if (oldExecutions.length === 0) {
    return deleted;
  }

  for (const execution of oldExecutions) {
    try {
      await client.deleteExecution(execution.id);
      deleted.push(execution.id);
      logger.debug(`Deleted old execution: ${execution.id}`);
    } catch (error) {
      logger.warn(`Failed to delete execution ${execution.id}:`, error);
    }
  }

  logger.info(`Successfully deleted ${deleted.length} old execution(s)`);
  return deleted;
}

/**
 * Clean up all test resources
 *
 * Combines cleanupOrphanedWorkflows and cleanupOldExecutions.
 * Use this as a comprehensive cleanup in CI.
 *
 * @returns Object with counts of deleted resources
 */
export async function cleanupAllTestResources(): Promise<{
  workflows: number;
  executions: number;
}> {
  logger.info('Starting comprehensive test resource cleanup...');

  const [workflowIds, executionIds] = await Promise.all([
    cleanupOrphanedWorkflows(),
    cleanupOldExecutions()
  ]);

  logger.info(
    `Cleanup complete: ${workflowIds.length} workflows, ${executionIds.length} executions`
  );

  return {
    workflows: workflowIds.length,
    executions: executionIds.length
  };
}

/**
 * Delete workflows by tag
 *
 * Deletes all workflows with the specified tag.
 *
 * @param tag - Tag to match
 * @returns Array of deleted workflow IDs
 */
export async function cleanupWorkflowsByTag(tag: string): Promise<string[]> {
  const client = getTestN8nClient();
  const deleted: string[] = [];

  logger.info(`Searching for workflows with tag: ${tag}`);

  try {
    const response = await client.listWorkflows({
      tags: tag || undefined,
      limit: 100,
      excludePinnedData: true
    });

    const workflows = response.data;
    logger.info(`Found ${workflows.length} workflow(s) with tag: ${tag}`);

    for (const workflow of workflows) {
      if (!workflow.id) continue;

      try {
        await client.deleteWorkflow(workflow.id);
        deleted.push(workflow.id);
        logger.debug(`Deleted workflow: ${workflow.name} (${workflow.id})`);
      } catch (error) {
        logger.warn(`Failed to delete workflow ${workflow.id}:`, error);
      }
    }

    logger.info(`Successfully deleted ${deleted.length} workflow(s)`);
    return deleted;
  } catch (error) {
    logger.error(`Failed to cleanup workflows by tag: ${tag}`, error);
    throw error;
  }
}

/**
 * Delete executions for a specific workflow
 *
 * @param workflowId - Workflow ID
 * @returns Array of deleted execution IDs
 */
export async function cleanupExecutionsByWorkflow(
  workflowId: string
): Promise<string[]> {
  const client = getTestN8nClient();
  const deleted: string[] = [];

  logger.info(`Searching for executions of workflow: ${workflowId}`);

  let cursor: string | undefined;
  let totalCount = 0;
  let pageCount = 0;
  const MAX_PAGES = 1000; // Safety limit to prevent infinite loops

  try {
    do {
      pageCount++;

      if (pageCount > MAX_PAGES) {
        logger.error(`Exceeded maximum pages (${MAX_PAGES}). Possible infinite loop or API issue.`);
        throw new Error(`Pagination safety limit exceeded while fetching executions for workflow ${workflowId}`);
      }

      const response = await client.listExecutions({
        workflowId,
        cursor,
        limit: 100,
        includeData: false
      });

      const executions = response.data;
      totalCount += executions.length;

      for (const execution of executions) {
        try {
          await client.deleteExecution(execution.id);
          deleted.push(execution.id);
          logger.debug(`Deleted execution: ${execution.id}`);
        } catch (error) {
          logger.warn(`Failed to delete execution ${execution.id}:`, error);
        }
      }

      cursor = response.nextCursor || undefined;
    } while (cursor);

    logger.info(
      `Successfully deleted ${deleted.length}/${totalCount} execution(s) for workflow ${workflowId}`
    );
    return deleted;
  } catch (error) {
    logger.error(`Failed to cleanup executions for workflow: ${workflowId}`, error);
    throw error;
  }
}

```

--------------------------------------------------------------------------------
/tests/unit/docker/serve-command.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import { execSync } from 'child_process';
import fs from 'fs';
import path from 'path';
import os from 'os';

describe('n8n-mcp serve Command', () => {
  let tempDir: string;
  let mockEntrypointPath: string;
  
  // Clean environment for tests - only include essential variables
  const cleanEnv = { 
    PATH: process.env.PATH, 
    HOME: process.env.HOME,
    NODE_ENV: process.env.NODE_ENV 
  };

  beforeEach(() => {
    tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'serve-command-test-'));
    mockEntrypointPath = path.join(tempDir, 'mock-entrypoint.sh');
  });

  afterEach(() => {
    if (fs.existsSync(tempDir)) {
      fs.rmSync(tempDir, { recursive: true });
    }
  });

  /**
   * Create a mock entrypoint script that simulates the behavior
   * of the real docker-entrypoint.sh for testing purposes
   */
  function createMockEntrypoint(content: string): void {
    fs.writeFileSync(mockEntrypointPath, content, { mode: 0o755 });
  }

  describe('Command transformation', () => {
    it('should detect "n8n-mcp serve" and set MCP_MODE=http', () => {
      const mockScript = `#!/bin/sh
# Simplified version of the entrypoint logic
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    echo "MCP_MODE=\$MCP_MODE"
    echo "Remaining args: \$@"
else
    echo "Normal execution"
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(`"${mockEntrypointPath}" n8n-mcp serve`, { encoding: 'utf8', env: cleanEnv });
      
      expect(output).toContain('MCP_MODE=http');
      expect(output).toContain('Remaining args:');
    });

    it('should preserve additional arguments after serve command', () => {
      const mockScript = `#!/bin/sh
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    echo "MCP_MODE=\$MCP_MODE"
    echo "Args: \$@"
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(
        `"${mockEntrypointPath}" n8n-mcp serve --port 8080 --verbose --debug`,
        { encoding: 'utf8', env: cleanEnv }
      );
      
      expect(output).toContain('MCP_MODE=http');
      expect(output).toContain('Args: --port 8080 --verbose --debug');
    });

    it('should not affect other commands', () => {
      const mockScript = `#!/bin/sh
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    echo "Serve mode activated"
else
    echo "Command: \$@"
    echo "MCP_MODE=\${MCP_MODE:-not-set}"
fi
`;
      createMockEntrypoint(mockScript);

      // Test with different command
      const output1 = execSync(`"${mockEntrypointPath}" node index.js`, { encoding: 'utf8', env: cleanEnv });
      expect(output1).toContain('Command: node index.js');
      expect(output1).toContain('MCP_MODE=not-set');

      // Test with n8n-mcp but not serve
      const output2 = execSync(`"${mockEntrypointPath}" n8n-mcp validate`, { encoding: 'utf8', env: cleanEnv });
      expect(output2).toContain('Command: n8n-mcp validate');
      expect(output2).not.toContain('Serve mode activated');
    });
  });

  describe('Integration with config loading', () => {
    it('should load config before processing serve command', () => {
      const configPath = path.join(tempDir, 'config.json');
      const config = {
        custom_var: 'from-config',
        port: 9000
      };
      fs.writeFileSync(configPath, JSON.stringify(config));

      const mockScript = `#!/bin/sh
# Simulate config loading
if [ -f "${configPath}" ]; then
    export CUSTOM_VAR='from-config'
    export PORT='9000'
fi

# Process serve command
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    echo "MCP_MODE=\$MCP_MODE"
    echo "CUSTOM_VAR=\$CUSTOM_VAR"
    echo "PORT=\$PORT"
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(`"${mockEntrypointPath}" n8n-mcp serve`, { encoding: 'utf8', env: cleanEnv });
      
      expect(output).toContain('MCP_MODE=http');
      expect(output).toContain('CUSTOM_VAR=from-config');
      expect(output).toContain('PORT=9000');
    });
  });

  describe('Command line variations', () => {
    it('should handle serve command with equals sign notation', () => {
      const mockScript = `#!/bin/sh
# Handle both space and equals notation
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    echo "Standard notation worked"
    echo "Args: \$@"
elif echo "\$@" | grep -q "n8n-mcp.*serve"; then
    echo "Alternative notation detected"
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(`"${mockEntrypointPath}" n8n-mcp serve --port=8080`, { encoding: 'utf8', env: cleanEnv });
      
      expect(output).toContain('Standard notation worked');
      expect(output).toContain('Args: --port=8080');
    });

    it('should handle quoted arguments correctly', () => {
      const mockScript = `#!/bin/sh
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    shift 2
    echo "Args received:"
    for arg in "\$@"; do
        echo "  - '\$arg'"
    done
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(
        `"${mockEntrypointPath}" n8n-mcp serve --message "Hello World" --path "/path with spaces"`,
        { encoding: 'utf8', env: cleanEnv }
      );
      
      expect(output).toContain("- '--message'");
      expect(output).toContain("- 'Hello World'");
      expect(output).toContain("- '--path'");
      expect(output).toContain("- '/path with spaces'");
    });
  });

  describe('Error handling', () => {
    it('should handle serve command with missing AUTH_TOKEN in HTTP mode', () => {
      const mockScript = `#!/bin/sh
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    
    # Check for AUTH_TOKEN (simulate entrypoint validation)
    if [ -z "\$AUTH_TOKEN" ] && [ -z "\$AUTH_TOKEN_FILE" ]; then
        echo "ERROR: AUTH_TOKEN or AUTH_TOKEN_FILE is required for HTTP mode" >&2
        exit 1
    fi
fi
`;
      createMockEntrypoint(mockScript);

      try {
        execSync(`"${mockEntrypointPath}" n8n-mcp serve`, { encoding: 'utf8', env: cleanEnv });
        expect.fail('Should have thrown an error');
      } catch (error: any) {
        expect(error.status).toBe(1);
        expect(error.stderr.toString()).toContain('AUTH_TOKEN or AUTH_TOKEN_FILE is required');
      }
    });

    it('should succeed with AUTH_TOKEN provided', () => {
      const mockScript = `#!/bin/sh
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    
    # Check for AUTH_TOKEN
    if [ -z "\$AUTH_TOKEN" ] && [ -z "\$AUTH_TOKEN_FILE" ]; then
        echo "ERROR: AUTH_TOKEN or AUTH_TOKEN_FILE is required for HTTP mode" >&2
        exit 1
    fi
    
    echo "Server starting with AUTH_TOKEN"
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(
        `"${mockEntrypointPath}" n8n-mcp serve`,
        { encoding: 'utf8', env: { ...cleanEnv, AUTH_TOKEN: 'test-token' } }
      );
      
      expect(output).toContain('Server starting with AUTH_TOKEN');
    });
  });

  describe('Backwards compatibility', () => {
    it('should maintain compatibility with direct HTTP mode setting', () => {
      const mockScript = `#!/bin/sh
# Direct MCP_MODE setting should still work
echo "Initial MCP_MODE=\${MCP_MODE:-not-set}"

if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    echo "Serve command: MCP_MODE=\$MCP_MODE"
else
    echo "Direct mode: MCP_MODE=\${MCP_MODE:-stdio}"
fi
`;
      createMockEntrypoint(mockScript);

      // Test with explicit MCP_MODE
      const output1 = execSync(
        `"${mockEntrypointPath}" node index.js`,
        { encoding: 'utf8', env: { ...cleanEnv, MCP_MODE: 'http' } }
      );
      expect(output1).toContain('Initial MCP_MODE=http');
      expect(output1).toContain('Direct mode: MCP_MODE=http');

      // Test with serve command
      const output2 = execSync(`"${mockEntrypointPath}" n8n-mcp serve`, { encoding: 'utf8', env: cleanEnv });
      expect(output2).toContain('Serve command: MCP_MODE=http');
    });
  });

  describe('Command construction', () => {
    it('should properly construct the node command after transformation', () => {
      const mockScript = `#!/bin/sh
if [ "\$1" = "n8n-mcp" ] && [ "\$2" = "serve" ]; then
    export MCP_MODE="http"
    shift 2
    # Simulate the actual command that would be executed
    echo "Would execute: node /app/dist/mcp/index.js \$@"
fi
`;
      createMockEntrypoint(mockScript);

      const output = execSync(
        `"${mockEntrypointPath}" n8n-mcp serve --port 8080 --host 0.0.0.0`,
        { encoding: 'utf8', env: cleanEnv }
      );
      
      expect(output).toContain('Would execute: node /app/dist/mcp/index.js --port 8080 --host 0.0.0.0');
    });
  });
});
```

--------------------------------------------------------------------------------
/docs/AUTOMATED_RELEASES.md:
--------------------------------------------------------------------------------

```markdown
# Automated Release Process

This document describes the automated release system for n8n-mcp, which handles version detection, changelog parsing, and multi-artifact publishing.

## Overview

The automated release system is triggered when the version in `package.json` is updated and pushed to the main branch. It handles:

- 🏷️ **GitHub Releases**: Creates releases with changelog content
- 📦 **NPM Publishing**: Publishes optimized runtime package
- 🐳 **Docker Images**: Builds and pushes multi-platform images
- 📚 **Documentation**: Updates version badges automatically

## Quick Start

### For Maintainers

Use the prepared release script for a guided experience:

```bash
npm run prepare:release
```

This script will:
1. Prompt for the new version
2. Update `package.json` and `package.runtime.json`
3. Update the changelog
4. Run tests and build
5. Create a git commit
6. Optionally push to trigger the release

### Manual Process

1. **Update the version**:
   ```bash
   # Edit package.json version field
   vim package.json
   
   # Sync to runtime package
   npm run sync:runtime-version
   ```

2. **Update the changelog**:
   ```bash
   # Edit docs/CHANGELOG.md
   vim docs/CHANGELOG.md
   ```

3. **Test and commit**:
   ```bash
   # Ensure everything works
   npm test
   npm run build
   npm run rebuild
   
   # Commit changes
   git add package.json package.runtime.json docs/CHANGELOG.md
   git commit -m "chore: release vX.Y.Z"
   git push
   ```

## Workflow Details

### Version Detection

The workflow monitors pushes to the main branch and detects when `package.json` version changes:

```yaml
paths:
  - 'package.json'
  - 'package.runtime.json'
```

### Changelog Parsing

Automatically extracts release notes from `docs/CHANGELOG.md` using the version header format:

```markdown
## [2.10.0] - 2025-08-02

### Added
- New feature descriptions

### Changed
- Changed feature descriptions

### Fixed
- Bug fix descriptions
```

### Release Artifacts

#### GitHub Release
- Created with extracted changelog content
- Tagged with `vX.Y.Z` format
- Includes installation instructions
- Links to documentation

#### NPM Package
- Published as `n8n-mcp` on npmjs.com
- Uses runtime-only dependencies (8 packages vs 50+ dev deps)
- Optimized for `npx` usage
- ~50MB vs 1GB+ with dev dependencies

#### Docker Images
- **Standard**: `ghcr.io/czlonkowski/n8n-mcp:vX.Y.Z`
- **Railway**: `ghcr.io/czlonkowski/n8n-mcp-railway:vX.Y.Z`
- Multi-platform: linux/amd64, linux/arm64
- Semantic version tags: `vX.Y.Z`, `vX.Y`, `vX`, `latest`

## Configuration

### Required Secrets

Set these in GitHub repository settings → Secrets:

| Secret | Description | Required |
|--------|-------------|----------|
| `NPM_TOKEN` | NPM authentication token for publishing | ✅ Yes |
| `GITHUB_TOKEN` | Automatically provided by GitHub Actions | ✅ Auto |

### NPM Token Setup

1. Login to [npmjs.com](https://www.npmjs.com)
2. Go to Account Settings → Access Tokens
3. Create a new **Automation** token
4. Add as `NPM_TOKEN` secret in GitHub

## Testing

### Test Release Automation

Validate the release system without triggering a release:

```bash
npm run test:release-automation
```

This checks:
- ✅ File existence and structure
- ✅ Version detection logic
- ✅ Changelog parsing
- ✅ Build process
- ✅ NPM package preparation
- ✅ Docker configuration
- ✅ Workflow syntax
- ✅ Environment setup

### Local Testing

Test individual components:

```bash
# Test version detection
node -e "console.log(require('./package.json').version)"

# Test changelog parsing
node scripts/test-release-automation.js

# Test npm package preparation
npm run prepare:publish

# Test Docker build
docker build -t test-image .
```

## Workflow Jobs

### 1. Version Detection
- Compares current vs previous version in git history
- Determines if it's a prerelease (alpha, beta, rc, dev)
- Outputs version information for other jobs

### 2. Changelog Extraction
- Parses `docs/CHANGELOG.md` for the current version
- Extracts content between version headers
- Provides formatted release notes

### 3. GitHub Release Creation
- Creates annotated git tag
- Creates GitHub release with changelog content
- Handles prerelease flag for alpha/beta versions

### 4. Build and Test
- Installs dependencies
- Runs full test suite
- Builds TypeScript
- Rebuilds node database
- Type checking

### 5. NPM Publishing
- Prepares optimized package structure
- Uses `package.runtime.json` for dependencies
- Publishes to npmjs.com registry
- Automatic cleanup

### 6. Docker Building
- Multi-platform builds (amd64, arm64)
- Two image variants (standard, railway)
- Semantic versioning tags
- GitHub Container Registry

### 7. Documentation Updates
- Updates version badges in README
- Commits documentation changes
- Automatic push back to repository

## Monitoring

### GitHub Actions
Monitor releases at: https://github.com/czlonkowski/n8n-mcp/actions

### Release Status
- **GitHub Releases**: https://github.com/czlonkowski/n8n-mcp/releases
- **NPM Package**: https://www.npmjs.com/package/n8n-mcp
- **Docker Images**: https://github.com/czlonkowski/n8n-mcp/pkgs/container/n8n-mcp

### Notifications

The workflow provides comprehensive summaries:
- ✅ Success notifications with links
- ❌ Failure notifications with error details
- 📊 Artifact information and installation commands

## Troubleshooting

### Common Issues

#### NPM Publishing Fails
```
Error: 401 Unauthorized
```
**Solution**: Check NPM_TOKEN secret is valid and has publishing permissions.

#### Docker Build Fails
```
Error: failed to solve: could not read from registry
```
**Solution**: Check GitHub Container Registry permissions and GITHUB_TOKEN.

#### Changelog Parsing Fails
```
No changelog entries found for version X.Y.Z
```
**Solution**: Ensure changelog follows the correct format:
```markdown
## [X.Y.Z] - YYYY-MM-DD
```

#### Version Detection Fails
```
Version not incremented
```
**Solution**: Ensure new version is greater than the previous version.

### Recovery Steps

#### Failed NPM Publish
1. Check if version was already published
2. If not, manually publish:
   ```bash
   npm run prepare:publish
   cd npm-publish-temp
   npm publish
   ```

#### Failed Docker Build
1. Build locally to test:
   ```bash
   docker build -t test-build .
   ```
2. Re-trigger workflow or push a fix

#### Incomplete Release
1. Delete the created tag if needed:
   ```bash
   git tag -d vX.Y.Z
   git push --delete origin vX.Y.Z
   ```
2. Fix issues and push again

## Security

### Secrets Management
- NPM_TOKEN has limited scope (publish only)
- GITHUB_TOKEN has automatic scoping
- No secrets are logged or exposed

### Package Security
- Runtime package excludes development dependencies
- No build tools or test frameworks in published package
- Minimal attack surface (~50MB vs 1GB+)

### Docker Security
- Multi-stage builds
- Non-root user execution
- Minimal base images
- Security scanning enabled

## Changelog Format

The automated system expects changelog entries in [Keep a Changelog](https://keepachangelog.com/) format:

```markdown
# Changelog

All notable changes to this project will be documented in this file.

## [Unreleased]

### Added
- New features for next release

## [2.10.0] - 2025-08-02

### Added
- Automated release system
- Multi-platform Docker builds

### Changed
- Improved version detection
- Enhanced error handling

### Fixed
- Fixed changelog parsing edge cases
- Fixed Docker build optimization

## [2.9.1] - 2025-08-01

...
```

## Version Strategy

### Semantic Versioning
- **MAJOR** (X.0.0): Breaking changes
- **MINOR** (X.Y.0): New features, backward compatible
- **PATCH** (X.Y.Z): Bug fixes, backward compatible

### Prerelease Versions
- **Alpha**: `X.Y.Z-alpha.N` - Early development
- **Beta**: `X.Y.Z-beta.N` - Feature complete, testing
- **RC**: `X.Y.Z-rc.N` - Release candidate

Prerelease versions are automatically detected and marked appropriately.

## Best Practices

### Before Releasing
1. ✅ Run `npm run test:release-automation`
2. ✅ Update changelog with meaningful descriptions
3. ✅ Test locally with `npm test && npm run build`
4. ✅ Review breaking changes
5. ✅ Consider impact on users

### Version Bumping
- Use `npm run prepare:release` for guided process
- Follow semantic versioning strictly
- Document breaking changes clearly
- Consider backward compatibility

### Changelog Writing
- Be specific about changes
- Include migration notes for breaking changes
- Credit contributors
- Use consistent formatting

## Contributing

### For Maintainers
1. Use automated tools: `npm run prepare:release`
2. Follow semantic versioning
3. Update changelog thoroughly
4. Test before releasing

### For Contributors
- Breaking changes require MAJOR version bump
- New features require MINOR version bump
- Bug fixes require PATCH version bump
- Update changelog in PR descriptions

---

🤖 *This automated release system was designed with [Claude Code](https://claude.ai/code)*
```

--------------------------------------------------------------------------------
/tests/integration/workflow-creation-node-type-format.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Integration test for workflow creation with node type format validation
 *
 * This test validates that workflows are correctly validated with FULL form node types
 * (n8n-nodes-base.*) as required by the n8n API, without normalization to SHORT form.
 *
 * Background: Bug in handlers-n8n-manager.ts was normalizing node types to SHORT form
 * (nodes-base.*) before validation, causing validation to reject all workflows.
 */

import { describe, it, expect } from 'vitest';
import { validateWorkflowStructure } from '@/services/n8n-validation';

describe('Workflow Creation Node Type Format (Integration)', () => {
  describe('validateWorkflowStructure with FULL form node types', () => {
    it('should accept workflows with FULL form node types (n8n-nodes-base.*)', () => {
      const workflow = {
        name: 'Test Workflow',
        nodes: [
          {
            id: 'manual-1',
            name: 'Manual Trigger',
            type: 'n8n-nodes-base.manualTrigger', // FULL form
            typeVersion: 1,
            position: [250, 300] as [number, number],
            parameters: {}
          },
          {
            id: 'set-1',
            name: 'Set Data',
            type: 'n8n-nodes-base.set', // FULL form
            typeVersion: 3.4,
            position: [450, 300] as [number, number],
            parameters: {
              mode: 'manual',
              assignments: {
                assignments: [{
                  id: '1',
                  name: 'test',
                  value: 'hello',
                  type: 'string'
                }]
              }
            }
          }
        ],
        connections: {
          'Manual Trigger': {
            main: [[{
              node: 'Set Data',
              type: 'main',
              index: 0
            }]]
          }
        }
      };

      const errors = validateWorkflowStructure(workflow);

      expect(errors).toEqual([]);
    });

    it('should reject workflows with SHORT form node types (nodes-base.*)', () => {
      const workflow = {
        name: 'Test Workflow',
        nodes: [
          {
            id: 'manual-1',
            name: 'Manual Trigger',
            type: 'nodes-base.manualTrigger', // SHORT form - should be rejected
            typeVersion: 1,
            position: [250, 300] as [number, number],
            parameters: {}
          }
        ],
        connections: {}
      };

      const errors = validateWorkflowStructure(workflow);

      expect(errors.length).toBeGreaterThan(0);
      expect(errors.some(e =>
        e.includes('Invalid node type "nodes-base.manualTrigger"') &&
        e.includes('Use "n8n-nodes-base.manualTrigger" instead')
      )).toBe(true);
    });

    it('should accept workflows with LangChain nodes in FULL form', () => {
      const workflow = {
        name: 'AI Workflow',
        nodes: [
          {
            id: 'manual-1',
            name: 'Manual Trigger',
            type: 'n8n-nodes-base.manualTrigger',
            typeVersion: 1,
            position: [250, 300] as [number, number],
            parameters: {}
          },
          {
            id: 'agent-1',
            name: 'AI Agent',
            type: '@n8n/n8n-nodes-langchain.agent', // FULL form
            typeVersion: 1,
            position: [450, 300] as [number, number],
            parameters: {}
          }
        ],
        connections: {
          'Manual Trigger': {
            main: [[{
              node: 'AI Agent',
              type: 'main',
              index: 0
            }]]
          }
        }
      };

      const errors = validateWorkflowStructure(workflow);

      // Should accept FULL form LangChain nodes
      // Note: May have other validation errors (missing parameters), but NOT node type errors
      const hasNodeTypeError = errors.some(e =>
        e.includes('Invalid node type') && e.includes('@n8n/n8n-nodes-langchain.agent')
      );
      expect(hasNodeTypeError).toBe(false);
    });

    it('should reject node types without package prefix', () => {
      const workflow = {
        name: 'Invalid Workflow',
        nodes: [
          {
            id: 'node-1',
            name: 'Invalid Node',
            type: 'webhook', // No package prefix
            typeVersion: 1,
            position: [250, 300] as [number, number],
            parameters: {}
          }
        ],
        connections: {}
      };

      const errors = validateWorkflowStructure(workflow);

      expect(errors.length).toBeGreaterThan(0);
      expect(errors.some(e =>
        e.includes('Invalid node type "webhook"') &&
        e.includes('must include package prefix')
      )).toBe(true);
    });
  });

  describe('Real-world workflow examples', () => {
    it('should validate webhook workflow correctly', () => {
      const workflow = {
        name: 'Webhook to HTTP',
        nodes: [
          {
            id: 'webhook-1',
            name: 'Webhook',
            type: 'n8n-nodes-base.webhook',
            typeVersion: 2,
            position: [250, 300] as [number, number],
            parameters: {
              path: 'test-webhook',
              httpMethod: 'POST',
              responseMode: 'onReceived'
            }
          },
          {
            id: 'http-1',
            name: 'HTTP Request',
            type: 'n8n-nodes-base.httpRequest',
            typeVersion: 4.2,
            position: [450, 300] as [number, number],
            parameters: {
              method: 'POST',
              url: 'https://example.com/api',
              sendBody: true,
              bodyParameters: {
                parameters: []
              }
            }
          }
        ],
        connections: {
          'Webhook': {
            main: [[{
              node: 'HTTP Request',
              type: 'main',
              index: 0
            }]]
          }
        }
      };

      const errors = validateWorkflowStructure(workflow);

      expect(errors).toEqual([]);
    });

    it('should validate schedule trigger workflow correctly', () => {
      const workflow = {
        name: 'Daily Report',
        nodes: [
          {
            id: 'schedule-1',
            name: 'Schedule Trigger',
            type: 'n8n-nodes-base.scheduleTrigger',
            typeVersion: 1.2,
            position: [250, 300] as [number, number],
            parameters: {
              rule: {
                interval: [{
                  field: 'days',
                  daysInterval: 1
                }]
              }
            }
          },
          {
            id: 'set-1',
            name: 'Set',
            type: 'n8n-nodes-base.set',
            typeVersion: 3.4,
            position: [450, 300] as [number, number],
            parameters: {
              mode: 'manual',
              assignments: {
                assignments: []
              }
            }
          }
        ],
        connections: {
          'Schedule Trigger': {
            main: [[{
              node: 'Set',
              type: 'main',
              index: 0
            }]]
          }
        }
      };

      const errors = validateWorkflowStructure(workflow);

      expect(errors).toEqual([]);
    });
  });

  describe('Regression test for normalization bug', () => {
    it('should NOT normalize node types before validation', () => {
      // This test ensures that handleCreateWorkflow does NOT call
      // NodeTypeNormalizer.normalizeWorkflowNodeTypes() before validation

      const fullFormWorkflow = {
        name: 'Test',
        nodes: [
          {
            id: '1',
            name: 'Manual Trigger',
            type: 'n8n-nodes-base.manualTrigger',
            typeVersion: 1,
            position: [0, 0] as [number, number],
            parameters: {}
          },
          {
            id: '2',
            name: 'Set',
            type: 'n8n-nodes-base.set',
            typeVersion: 3.4,
            position: [200, 0] as [number, number],
            parameters: {
              mode: 'manual',
              assignments: { assignments: [] }
            }
          }
        ],
        connections: {
          'Manual Trigger': {
            main: [[{ node: 'Set', type: 'main', index: 0 }]]
          }
        }
      };

      const errors = validateWorkflowStructure(fullFormWorkflow);

      // FULL form should pass validation
      expect(errors).toEqual([]);

      // SHORT form (what normalizer produces) should FAIL validation
      const shortFormWorkflow = {
        ...fullFormWorkflow,
        nodes: fullFormWorkflow.nodes.map(node => ({
          ...node,
          type: node.type.replace('n8n-nodes-base.', 'nodes-base.') // Convert to SHORT form
        }))
      };

      const shortFormErrors = validateWorkflowStructure(shortFormWorkflow);

      expect(shortFormErrors.length).toBeGreaterThan(0);
      expect(shortFormErrors.some(e =>
        e.includes('Invalid node type') &&
        e.includes('nodes-base.')
      )).toBe(true);
    });
  });
});

```

--------------------------------------------------------------------------------
/src/scripts/rebuild.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node
/**
 * Copyright (c) 2024 AiAdvisors Romuald Czlonkowski
 * Licensed under the Sustainable Use License v1.0
 */
import { createDatabaseAdapter } from '../database/database-adapter';
import { N8nNodeLoader } from '../loaders/node-loader';
import { NodeParser, ParsedNode } from '../parsers/node-parser';
import { DocsMapper } from '../mappers/docs-mapper';
import { NodeRepository } from '../database/node-repository';
import { TemplateSanitizer } from '../utils/template-sanitizer';
import * as fs from 'fs';
import * as path from 'path';

async function rebuild() {
  console.log('🔄 Rebuilding n8n node database...\n');
  
  const dbPath = process.env.NODE_DB_PATH || './data/nodes.db';
  const db = await createDatabaseAdapter(dbPath);
  const loader = new N8nNodeLoader();
  const parser = new NodeParser();
  const mapper = new DocsMapper();
  const repository = new NodeRepository(db);
  
  // Initialize database
  const schema = fs.readFileSync(path.join(__dirname, '../../src/database/schema.sql'), 'utf8');
  db.exec(schema);
  
  // Clear existing data
  db.exec('DELETE FROM nodes');
  console.log('🗑️  Cleared existing data\n');
  
  // Load all nodes
  const nodes = await loader.loadAllNodes();
  console.log(`📦 Loaded ${nodes.length} nodes from packages\n`);
  
  // Statistics
  const stats = {
    successful: 0,
    failed: 0,
    aiTools: 0,
    triggers: 0,
    webhooks: 0,
    withProperties: 0,
    withOperations: 0,
    withDocs: 0
  };
  
  // Process each node (documentation fetching must be outside transaction due to async)
  console.log('🔄 Processing nodes...');
  const processedNodes: Array<{ parsed: ParsedNode; docs: string | undefined; nodeName: string }> = [];
  
  for (const { packageName, nodeName, NodeClass } of nodes) {
    try {
      // Parse node
      const parsed = parser.parse(NodeClass, packageName);
      
      // Validate parsed data
      if (!parsed.nodeType || !parsed.displayName) {
        throw new Error(`Missing required fields - nodeType: ${parsed.nodeType}, displayName: ${parsed.displayName}, packageName: ${parsed.packageName}`);
      }
      
      // Additional validation for required fields
      if (!parsed.packageName) {
        throw new Error(`Missing packageName for node ${nodeName}`);
      }
      
      // Get documentation
      const docs = await mapper.fetchDocumentation(parsed.nodeType);
      parsed.documentation = docs || undefined;
      
      processedNodes.push({ parsed, docs: docs || undefined, nodeName });
    } catch (error) {
      stats.failed++;
      const errorMessage = (error as Error).message;
      console.error(`❌ Failed to process ${nodeName}: ${errorMessage}`);
    }
  }
  
  // Now save all processed nodes to database
  console.log(`\n💾 Saving ${processedNodes.length} processed nodes to database...`);
  
  let saved = 0;
  for (const { parsed, docs, nodeName } of processedNodes) {
    try {
      repository.saveNode(parsed);
      saved++;
      
      // Update statistics
      stats.successful++;
      if (parsed.isAITool) stats.aiTools++;
      if (parsed.isTrigger) stats.triggers++;
      if (parsed.isWebhook) stats.webhooks++;
      if (parsed.properties.length > 0) stats.withProperties++;
      if (parsed.operations.length > 0) stats.withOperations++;
      if (docs) stats.withDocs++;
      
      console.log(`✅ ${parsed.nodeType} [Props: ${parsed.properties.length}, Ops: ${parsed.operations.length}]`);
    } catch (error) {
      stats.failed++;
      const errorMessage = (error as Error).message;
      console.error(`❌ Failed to save ${nodeName}: ${errorMessage}`);
    }
  }
  
  console.log(`💾 Save completed: ${saved} nodes saved successfully`);
  
  // Validation check
  console.log('\n🔍 Running validation checks...');
  try {
    const validationResults = validateDatabase(repository);
    
    if (!validationResults.passed) {
      console.log('⚠️  Validation Issues:');
      validationResults.issues.forEach(issue => console.log(`   - ${issue}`));
    } else {
      console.log('✅ All validation checks passed');
    }
  } catch (validationError) {
    console.error('❌ Validation failed:', (validationError as Error).message);
    console.log('⚠️  Skipping validation due to database compatibility issues');
  }
  
  // Summary
  console.log('\n📊 Summary:');
  console.log(`   Total nodes: ${nodes.length}`);
  console.log(`   Successful: ${stats.successful}`);
  console.log(`   Failed: ${stats.failed}`);
  console.log(`   AI Tools: ${stats.aiTools}`);
  console.log(`   Triggers: ${stats.triggers}`);
  console.log(`   Webhooks: ${stats.webhooks}`);
  console.log(`   With Properties: ${stats.withProperties}`);
  console.log(`   With Operations: ${stats.withOperations}`);
  console.log(`   With Documentation: ${stats.withDocs}`);
  
  // Sanitize templates if they exist
  console.log('\n🧹 Checking for templates to sanitize...');
  const templateCount = db.prepare('SELECT COUNT(*) as count FROM templates').get() as { count: number };
  
  if (templateCount && templateCount.count > 0) {
    console.log(`   Found ${templateCount.count} templates, sanitizing...`);
    const sanitizer = new TemplateSanitizer();
    let sanitizedCount = 0;
    
    const templates = db.prepare('SELECT id, name, workflow_json FROM templates').all() as any[];
    for (const template of templates) {
      const originalWorkflow = JSON.parse(template.workflow_json);
      const { sanitized: sanitizedWorkflow, wasModified } = sanitizer.sanitizeWorkflow(originalWorkflow);
      
      if (wasModified) {
        const stmt = db.prepare('UPDATE templates SET workflow_json = ? WHERE id = ?');
        stmt.run(JSON.stringify(sanitizedWorkflow), template.id);
        sanitizedCount++;
        console.log(`   ✅ Sanitized template ${template.id}: ${template.name}`);
      }
    }
    
    console.log(`   Sanitization complete: ${sanitizedCount} templates cleaned`);
  } else {
    console.log('   No templates found in database');
  }
  
  console.log('\n✨ Rebuild complete!');
  
  db.close();
}

function validateDatabase(repository: NodeRepository): { passed: boolean; issues: string[] } {
  const issues = [];

  try {
    const db = (repository as any).db;

    // CRITICAL: Check if database has any nodes at all
    const nodeCount = db.prepare('SELECT COUNT(*) as count FROM nodes').get() as { count: number };
    if (nodeCount.count === 0) {
      issues.push('CRITICAL: Database is empty - no nodes found! Rebuild failed or was interrupted.');
      return { passed: false, issues };
    }

    // Check minimum expected node count (should have at least 500 nodes from both packages)
    if (nodeCount.count < 500) {
      issues.push(`WARNING: Only ${nodeCount.count} nodes found - expected at least 500 (both n8n packages)`);
    }

    // Check critical nodes
    const criticalNodes = ['nodes-base.httpRequest', 'nodes-base.code', 'nodes-base.webhook', 'nodes-base.slack'];

    for (const nodeType of criticalNodes) {
      const node = repository.getNode(nodeType);

      if (!node) {
        issues.push(`Critical node ${nodeType} not found`);
        continue;
      }

      if (node.properties.length === 0) {
        issues.push(`Node ${nodeType} has no properties`);
      }
    }

    // Check AI tools
    const aiTools = repository.getAITools();
    if (aiTools.length === 0) {
      issues.push('No AI tools found - check detection logic');
    }

    // Check FTS5 table existence and population
    const ftsTableCheck = db.prepare(`
      SELECT name FROM sqlite_master
      WHERE type='table' AND name='nodes_fts'
    `).get();

    if (!ftsTableCheck) {
      issues.push('CRITICAL: FTS5 table (nodes_fts) does not exist - searches will fail or be very slow');
    } else {
      // Check if FTS5 table is properly populated
      const ftsCount = db.prepare('SELECT COUNT(*) as count FROM nodes_fts').get() as { count: number };

      if (ftsCount.count === 0) {
        issues.push('CRITICAL: FTS5 index is empty - searches will return zero results');
      } else if (nodeCount.count !== ftsCount.count) {
        issues.push(`FTS5 index out of sync: ${nodeCount.count} nodes but ${ftsCount.count} FTS5 entries`);
      }

      // Verify critical nodes are searchable via FTS5
      const searchableNodes = ['webhook', 'merge', 'split'];
      for (const searchTerm of searchableNodes) {
        const searchResult = db.prepare(`
          SELECT COUNT(*) as count FROM nodes_fts
          WHERE nodes_fts MATCH ?
        `).get(searchTerm);

        if (searchResult.count === 0) {
          issues.push(`CRITICAL: Search for "${searchTerm}" returns zero results in FTS5 index`);
        }
      }
    }
  } catch (error) {
    // Catch any validation errors
    const errorMessage = (error as Error).message;
    issues.push(`Validation error: ${errorMessage}`);
  }

  return {
    passed: issues.length === 0,
    issues
  };
}

// Run if called directly
if (require.main === module) {
  rebuild().catch(console.error);
}
```

--------------------------------------------------------------------------------
/scripts/update-n8n-deps.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * Update n8n dependencies to latest versions
 * Can be run manually or via GitHub Actions
 */

const { execSync } = require('child_process');
const fs = require('fs');
const path = require('path');

class N8nDependencyUpdater {
  constructor() {
    this.packageJsonPath = path.join(__dirname, '..', 'package.json');
    // Only track the main n8n package - let it manage its own dependencies
    this.mainPackage = 'n8n';
  }

  /**
   * Get latest version of a package from npm
   */
  getLatestVersion(packageName) {
    try {
      const output = execSync(`npm view ${packageName} version`, { encoding: 'utf8' });
      return output.trim();
    } catch (error) {
      console.error(`Failed to get version for ${packageName}:`, error.message);
      return null;
    }
  }

  /**
   * Get dependencies of a specific n8n version
   */
  getN8nDependencies(n8nVersion) {
    try {
      const output = execSync(`npm view n8n@${n8nVersion} dependencies --json`, { encoding: 'utf8' });
      return JSON.parse(output);
    } catch (error) {
      console.error(`Failed to get dependencies for n8n@${n8nVersion}:`, error.message);
      return {};
    }
  }

  /**
   * Get current version from package.json
   */
  getCurrentVersion(packageName) {
    const packageJson = JSON.parse(fs.readFileSync(this.packageJsonPath, 'utf8'));
    const version = packageJson.dependencies[packageName];
    return version ? version.replace(/^[\^~]/, '') : null;
  }

  /**
   * Check which packages need updates
   */
  async checkForUpdates() {
    console.log('🔍 Checking for n8n dependency updates...\n');
    
    const updates = [];
    
    // First check the main n8n package
    const currentN8nVersion = this.getCurrentVersion('n8n');
    const latestN8nVersion = this.getLatestVersion('n8n');
    
    if (!currentN8nVersion || !latestN8nVersion) {
      console.error('Failed to check n8n version');
      return updates;
    }
    
    if (currentN8nVersion !== latestN8nVersion) {
      console.log(`📦 n8n: ${currentN8nVersion} → ${latestN8nVersion} (update available)`);
      
      // Get the dependencies that n8n requires
      const n8nDeps = this.getN8nDependencies(latestN8nVersion);
      
      // Add main n8n update
      updates.push({
        package: 'n8n',
        current: currentN8nVersion,
        latest: latestN8nVersion
      });
      
      // Check our tracked dependencies that n8n uses
      const trackedDeps = ['n8n-core', 'n8n-workflow', '@n8n/n8n-nodes-langchain'];
      
      for (const dep of trackedDeps) {
        const currentVersion = this.getCurrentVersion(dep);
        const requiredVersion = n8nDeps[dep];
        
        if (requiredVersion && currentVersion) {
          // Extract version from npm dependency format (e.g., "^1.2.3" -> "1.2.3")
          const cleanRequiredVersion = requiredVersion.replace(/^[\^~>=<]/, '').split(' ')[0];
          
          if (currentVersion !== cleanRequiredVersion) {
            updates.push({
              package: dep,
              current: currentVersion,
              latest: cleanRequiredVersion,
              reason: `Required by n8n@${latestN8nVersion}`
            });
            console.log(`📦 ${dep}: ${currentVersion} → ${cleanRequiredVersion} (required by n8n)`);
          } else {
            console.log(`✅ ${dep}: ${currentVersion} (compatible with n8n@${latestN8nVersion})`);
          }
        }
      }
    } else {
      console.log(`✅ n8n: ${currentN8nVersion} (up to date)`);
      
      // Even if n8n is up to date, check if our dependencies match what n8n expects
      const n8nDeps = this.getN8nDependencies(currentN8nVersion);
      const trackedDeps = ['n8n-core', 'n8n-workflow', '@n8n/n8n-nodes-langchain'];
      
      for (const dep of trackedDeps) {
        const currentVersion = this.getCurrentVersion(dep);
        const requiredVersion = n8nDeps[dep];
        
        if (requiredVersion && currentVersion) {
          const cleanRequiredVersion = requiredVersion.replace(/^[\^~>=<]/, '').split(' ')[0];
          
          if (currentVersion !== cleanRequiredVersion) {
            updates.push({
              package: dep,
              current: currentVersion,
              latest: cleanRequiredVersion,
              reason: `Required by n8n@${currentN8nVersion}`
            });
            console.log(`📦 ${dep}: ${currentVersion} → ${cleanRequiredVersion} (sync with n8n)`);
          } else {
            console.log(`✅ ${dep}: ${currentVersion} (in sync)`);
          }
        }
      }
    }
    
    return updates;
  }

  /**
   * Update package.json with new versions
   */
  updatePackageJson(updates) {
    if (updates.length === 0) {
      console.log('\n✨ All n8n dependencies are up to date and in sync!');
      return false;
    }
    
    console.log(`\n📝 Updating ${updates.length} packages in package.json...`);
    
    const packageJson = JSON.parse(fs.readFileSync(this.packageJsonPath, 'utf8'));
    
    for (const update of updates) {
      packageJson.dependencies[update.package] = `^${update.latest}`;
      console.log(`   Updated ${update.package} to ^${update.latest}${update.reason ? ` (${update.reason})` : ''}`);
    }
    
    fs.writeFileSync(
      this.packageJsonPath,
      JSON.stringify(packageJson, null, 2) + '\n',
      'utf8'
    );
    
    return true;
  }

  /**
   * Run npm install to update lock file
   */
  runNpmInstall() {
    console.log('\n📥 Running npm install to update lock file...');
    try {
      execSync('npm install', { 
        cwd: path.join(__dirname, '..'),
        stdio: 'inherit'
      });
      return true;
    } catch (error) {
      console.error('❌ npm install failed:', error.message);
      return false;
    }
  }

  /**
   * Rebuild the node database
   */
  rebuildDatabase() {
    console.log('\n🔨 Rebuilding node database...');
    try {
      execSync('npm run build && npm run rebuild', { 
        cwd: path.join(__dirname, '..'),
        stdio: 'inherit'
      });
      return true;
    } catch (error) {
      console.error('❌ Database rebuild failed:', error.message);
      return false;
    }
  }

  /**
   * Run validation tests
   */
  runValidation() {
    console.log('\n🧪 Running validation tests...');
    try {
      execSync('npm run validate && npm run test-nodes', { 
        cwd: path.join(__dirname, '..'),
        stdio: 'inherit'
      });
      console.log('✅ All tests passed!');
      return true;
    } catch (error) {
      console.error('❌ Validation failed:', error.message);
      return false;
    }
  }

  /**
   * Generate update summary for PR/commit message
   */
  generateUpdateSummary(updates) {
    if (updates.length === 0) return '';
    
    const summary = ['Updated n8n dependencies:\n'];
    
    for (const update of updates) {
      summary.push(`- ${update.package}: ${update.current} → ${update.latest}`);
    }
    
    return summary.join('\n');
  }

  /**
   * Main update process
   */
  async run(options = {}) {
    const { dryRun = false, skipTests = false } = options;
    
    console.log('🚀 n8n Dependency Updater\n');
    console.log('Mode:', dryRun ? 'DRY RUN' : 'LIVE UPDATE');
    console.log('Skip tests:', skipTests ? 'YES' : 'NO');
    console.log('Strategy: Update n8n and sync its required dependencies');
    console.log('');
    
    // Check for updates
    const updates = await this.checkForUpdates();
    
    if (updates.length === 0) {
      process.exit(0);
    }
    
    if (dryRun) {
      console.log('\n🔍 DRY RUN: No changes made');
      console.log('\nUpdate summary:');
      console.log(this.generateUpdateSummary(updates));
      process.exit(0);
    }
    
    // Apply updates
    if (!this.updatePackageJson(updates)) {
      process.exit(0);
    }
    
    // Install dependencies
    if (!this.runNpmInstall()) {
      console.error('\n❌ Update failed at npm install step');
      process.exit(1);
    }
    
    // Rebuild database
    if (!this.rebuildDatabase()) {
      console.error('\n❌ Update failed at database rebuild step');
      process.exit(1);
    }
    
    // Run tests
    if (!skipTests && !this.runValidation()) {
      console.error('\n❌ Update failed at validation step');
      process.exit(1);
    }
    
    // Success!
    console.log('\n✅ Update completed successfully!');
    console.log('\nUpdate summary:');
    console.log(this.generateUpdateSummary(updates));
    
    // Write summary to file for GitHub Actions
    if (process.env.GITHUB_ACTIONS) {
      fs.writeFileSync(
        path.join(__dirname, '..', 'update-summary.txt'),
        this.generateUpdateSummary(updates),
        'utf8'
      );
    }
  }
}

// CLI handling
if (require.main === module) {
  const args = process.argv.slice(2);
  const options = {
    dryRun: args.includes('--dry-run') || args.includes('-d'),
    skipTests: args.includes('--skip-tests') || args.includes('-s')
  };
  
  const updater = new N8nDependencyUpdater();
  updater.run(options).catch(error => {
    console.error('Unexpected error:', error);
    process.exit(1);
  });
}

module.exports = N8nDependencyUpdater;
```

--------------------------------------------------------------------------------
/tests/unit/mcp/handlers-n8n-manager-simple.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Simple, focused unit tests for handlers-n8n-manager.ts coverage gaps
 *
 * This test file focuses on specific uncovered lines to achieve >95% coverage
 */

import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest';
import { createHash } from 'crypto';

describe('handlers-n8n-manager Simple Coverage Tests', () => {
  beforeEach(() => {
    vi.resetAllMocks();
    vi.resetModules();
  });

  afterEach(() => {
    vi.clearAllMocks();
  });

  describe('Cache Key Generation', () => {
    it('should generate deterministic SHA-256 hashes', () => {
      const input1 = 'https://api.n8n.cloud:key123:instance1';
      const input2 = 'https://api.n8n.cloud:key123:instance1';
      const input3 = 'https://api.n8n.cloud:key456:instance2';

      const hash1 = createHash('sha256').update(input1).digest('hex');
      const hash2 = createHash('sha256').update(input2).digest('hex');
      const hash3 = createHash('sha256').update(input3).digest('hex');

      // Same input should produce same hash
      expect(hash1).toBe(hash2);
      // Different input should produce different hash
      expect(hash1).not.toBe(hash3);
      // Hash should be 64 characters (SHA-256)
      expect(hash1).toHaveLength(64);
      expect(hash1).toMatch(/^[a-f0-9]{64}$/);
    });

    it('should handle empty instanceId in cache key generation', () => {
      const url = 'https://api.n8n.cloud';
      const key = 'test-key';
      const instanceId = '';

      const cacheInput = `${url}:${key}:${instanceId}`;
      const hash = createHash('sha256').update(cacheInput).digest('hex');

      expect(hash).toBeDefined();
      expect(hash).toHaveLength(64);
    });

    it('should handle undefined values in cache key generation', () => {
      const url = 'https://api.n8n.cloud';
      const key = 'test-key';
      const instanceId = undefined;

      // This simulates the actual cache key generation in the code
      const cacheInput = `${url}:${key}:${instanceId || ''}`;
      const hash = createHash('sha256').update(cacheInput).digest('hex');

      expect(hash).toBeDefined();
      expect(cacheInput).toBe('https://api.n8n.cloud:test-key:');
    });
  });

  describe('URL Sanitization', () => {
    it('should sanitize URLs for logging', () => {
      const fullUrl = 'https://secret.example.com/api/v1/private';

      // This simulates the URL sanitization in the logging code
      const sanitizedUrl = fullUrl.replace(/^(https?:\/\/[^\/]+).*/, '$1');

      expect(sanitizedUrl).toBe('https://secret.example.com');
      expect(sanitizedUrl).not.toContain('/api/v1/private');
    });

    it('should handle various URL formats in sanitization', () => {
      const testUrls = [
        'https://api.n8n.cloud',
        'https://api.n8n.cloud/',
        'https://api.n8n.cloud/webhook/abc123',
        'http://localhost:5678/api/v1',
        'https://subdomain.domain.com/path/to/resource'
      ];

      testUrls.forEach(url => {
        const sanitized = url.replace(/^(https?:\/\/[^\/]+).*/, '$1');

        // Should contain protocol and domain only
        expect(sanitized).toMatch(/^https?:\/\/[^\/]+$/);
        // Should not contain paths (but domain names containing 'api' are OK)
        expect(sanitized).not.toContain('/webhook');
        if (!sanitized.includes('api.n8n.cloud')) {
          expect(sanitized).not.toContain('/api');
        }
        expect(sanitized).not.toContain('/path');
      });
    });
  });

  describe('Cache Key Partial Logging', () => {
    it('should create partial cache key for logging', () => {
      const fullHash = 'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890';

      // This simulates the partial key logging in the dispose callback
      const partialKey = fullHash.substring(0, 8) + '...';

      expect(partialKey).toBe('abcdef12...');
      expect(partialKey).toHaveLength(11);
      expect(partialKey).toMatch(/^[a-f0-9]{8}\.\.\.$/);
    });

    it('should handle various hash lengths for partial logging', () => {
      const hashes = [
        'a'.repeat(64),
        'b'.repeat(32),
        'c'.repeat(16),
        'd'.repeat(8)
      ];

      hashes.forEach(hash => {
        const partial = hash.substring(0, 8) + '...';
        expect(partial).toHaveLength(11);
        expect(partial.endsWith('...')).toBe(true);
      });
    });
  });

  describe('Error Message Handling', () => {
    it('should handle different error types correctly', () => {
      // Test the error handling patterns used in the handlers
      const errorTypes = [
        new Error('Standard error'),
        'String error',
        { message: 'Object error' },
        null,
        undefined
      ];

      errorTypes.forEach(error => {
        // This simulates the error handling in handlers
        const errorMessage = error instanceof Error ? error.message : 'Unknown error occurred';

        if (error instanceof Error) {
          expect(errorMessage).toBe(error.message);
        } else {
          expect(errorMessage).toBe('Unknown error occurred');
        }
      });
    });

    it('should handle error objects without message property', () => {
      const errorLikeObject = { code: 500, details: 'Some details' };

      // This simulates error handling for non-Error objects
      const errorMessage = errorLikeObject instanceof Error ?
        errorLikeObject.message : 'Unknown error occurred';

      expect(errorMessage).toBe('Unknown error occurred');
    });
  });

  describe('Configuration Fallbacks', () => {
    it('should handle null config scenarios', () => {
      // Test configuration fallback logic
      const config = null;
      const apiConfigured = config !== null;

      expect(apiConfigured).toBe(false);
    });

    it('should handle undefined config values', () => {
      const contextWithUndefined = {
        n8nApiUrl: 'https://api.n8n.cloud',
        n8nApiKey: 'test-key',
        n8nApiTimeout: undefined,
        n8nApiMaxRetries: undefined
      };

      // Test default value assignment using nullish coalescing
      const timeout = contextWithUndefined.n8nApiTimeout ?? 30000;
      const maxRetries = contextWithUndefined.n8nApiMaxRetries ?? 3;

      expect(timeout).toBe(30000);
      expect(maxRetries).toBe(3);
    });
  });

  describe('Array and Object Handling', () => {
    it('should handle undefined array lengths', () => {
      const workflowData: { nodes?: any[] } = {
        nodes: undefined
      };

      // This simulates the nodeCount calculation in list workflows
      const nodeCount = workflowData.nodes?.length || 0;

      expect(nodeCount).toBe(0);
    });

    it('should handle empty arrays', () => {
      const workflowData = {
        nodes: []
      };

      const nodeCount = workflowData.nodes?.length || 0;

      expect(nodeCount).toBe(0);
    });

    it('should handle arrays with elements', () => {
      const workflowData = {
        nodes: [{ id: 'node1' }, { id: 'node2' }]
      };

      const nodeCount = workflowData.nodes?.length || 0;

      expect(nodeCount).toBe(2);
    });
  });

  describe('Conditional Logic Coverage', () => {
    it('should handle truthy cursor values', () => {
      const response = {
        nextCursor: 'abc123'
      };

      // This simulates the cursor handling logic
      const hasMore = !!response.nextCursor;
      const noteCondition = response.nextCursor ? {
        _note: "More workflows available. Use cursor to get next page."
      } : {};

      expect(hasMore).toBe(true);
      expect(noteCondition._note).toBeDefined();
    });

    it('should handle falsy cursor values', () => {
      const response = {
        nextCursor: null
      };

      const hasMore = !!response.nextCursor;
      const noteCondition = response.nextCursor ? {
        _note: "More workflows available. Use cursor to get next page."
      } : {};

      expect(hasMore).toBe(false);
      expect(noteCondition._note).toBeUndefined();
    });
  });

  describe('String Manipulation', () => {
    it('should handle environment variable filtering', () => {
      const envKeys = [
        'N8N_API_URL',
        'N8N_API_KEY',
        'MCP_MODE',
        'NODE_ENV',
        'PATH',
        'HOME',
        'N8N_CUSTOM_VAR'
      ];

      // This simulates the environment variable filtering in diagnostic
      const filtered = envKeys.filter(key =>
        key.startsWith('N8N_') || key.startsWith('MCP_')
      );

      expect(filtered).toEqual(['N8N_API_URL', 'N8N_API_KEY', 'MCP_MODE', 'N8N_CUSTOM_VAR']);
    });

    it('should handle version string extraction', () => {
      const packageJson = {
        dependencies: {
          n8n: '^1.111.0'
        }
      };

      // This simulates the version extraction logic
      const supportedVersion = packageJson.dependencies?.n8n?.replace(/[^0-9.]/g, '') || '';

      expect(supportedVersion).toBe('1.111.0');
    });

    it('should handle missing dependencies', () => {
      const packageJson: { dependencies?: { n8n?: string } } = {};

      const supportedVersion = packageJson.dependencies?.n8n?.replace(/[^0-9.]/g, '') || '';

      expect(supportedVersion).toBe('');
    });
  });
});
```

--------------------------------------------------------------------------------
/src/utils/template-node-resolver.ts:
--------------------------------------------------------------------------------

```typescript
import { logger } from './logger';

/**
 * Resolves various node type input formats to all possible template node type formats.
 * Templates store node types in full n8n format (e.g., "n8n-nodes-base.slack").
 * This function handles various input formats and expands them to all possible matches.
 * 
 * @param nodeTypes Array of node types in various formats
 * @returns Array of all possible template node type formats
 * 
 * @example
 * resolveTemplateNodeTypes(['slack']) 
 * // Returns: ['n8n-nodes-base.slack', 'n8n-nodes-base.slackTrigger']
 * 
 * resolveTemplateNodeTypes(['nodes-base.webhook'])
 * // Returns: ['n8n-nodes-base.webhook']
 * 
 * resolveTemplateNodeTypes(['httpRequest'])
 * // Returns: ['n8n-nodes-base.httpRequest']
 */
export function resolveTemplateNodeTypes(nodeTypes: string[]): string[] {
  const resolvedTypes = new Set<string>();
  
  for (const nodeType of nodeTypes) {
    // Add all variations for this node type
    const variations = generateTemplateNodeVariations(nodeType);
    variations.forEach(v => resolvedTypes.add(v));
  }
  
  const result = Array.from(resolvedTypes);
  logger.debug(`Resolved ${nodeTypes.length} input types to ${result.length} template variations`, {
    input: nodeTypes,
    output: result
  });
  
  return result;
}

/**
 * Generates all possible template node type variations for a single input.
 * 
 * @param nodeType Single node type in any format
 * @returns Array of possible template formats
 */
function generateTemplateNodeVariations(nodeType: string): string[] {
  const variations = new Set<string>();
  
  // If it's already in full n8n format, just return it
  if (nodeType.startsWith('n8n-nodes-base.') || nodeType.startsWith('@n8n/n8n-nodes-langchain.')) {
    variations.add(nodeType);
    return Array.from(variations);
  }
  
  // Handle partial prefix formats (e.g., "nodes-base.slack" -> "n8n-nodes-base.slack")
  if (nodeType.startsWith('nodes-base.')) {
    const nodeName = nodeType.replace('nodes-base.', '');
    variations.add(`n8n-nodes-base.${nodeName}`);
    // Also try camelCase variations
    addCamelCaseVariations(variations, nodeName, 'n8n-nodes-base');
  } else if (nodeType.startsWith('nodes-langchain.')) {
    const nodeName = nodeType.replace('nodes-langchain.', '');
    variations.add(`@n8n/n8n-nodes-langchain.${nodeName}`);
    // Also try camelCase variations
    addCamelCaseVariations(variations, nodeName, '@n8n/n8n-nodes-langchain');
  } else if (!nodeType.includes('.')) {
    // Bare node name (e.g., "slack", "webhook", "httpRequest")
    // Try both packages with various case combinations
    
    // For n8n-nodes-base
    variations.add(`n8n-nodes-base.${nodeType}`);
    addCamelCaseVariations(variations, nodeType, 'n8n-nodes-base');
    
    // For langchain (less common for bare names, but include for completeness)
    variations.add(`@n8n/n8n-nodes-langchain.${nodeType}`);
    addCamelCaseVariations(variations, nodeType, '@n8n/n8n-nodes-langchain');
    
    // Add common related node types (e.g., "slack" -> also include "slackTrigger")
    addRelatedNodeTypes(variations, nodeType);
  }
  
  return Array.from(variations);
}

/**
 * Adds camelCase variations for a node name.
 * 
 * @param variations Set to add variations to
 * @param nodeName The node name to create variations for
 * @param packagePrefix The package prefix to use
 */
function addCamelCaseVariations(variations: Set<string>, nodeName: string, packagePrefix: string): void {
  const lowerName = nodeName.toLowerCase();
  
  // Common patterns in n8n node names
  const patterns = [
    // Pattern: somethingTrigger (e.g., slackTrigger, webhookTrigger)
    { suffix: 'trigger', capitalize: true },
    { suffix: 'Trigger', capitalize: false },
    // Pattern: somethingRequest (e.g., httpRequest)
    { suffix: 'request', capitalize: true },
    { suffix: 'Request', capitalize: false },
    // Pattern: somethingDatabase (e.g., mysqlDatabase, postgresDatabase)
    { suffix: 'database', capitalize: true },
    { suffix: 'Database', capitalize: false },
    // Pattern: somethingSheet/Sheets (e.g., googleSheets)
    { suffix: 'sheet', capitalize: true },
    { suffix: 'Sheet', capitalize: false },
    { suffix: 'sheets', capitalize: true },
    { suffix: 'Sheets', capitalize: false },
  ];
  
  // Check if the lowercase name matches any pattern
  for (const pattern of patterns) {
    const lowerSuffix = pattern.suffix.toLowerCase();
    
    if (lowerName.endsWith(lowerSuffix)) {
      // Name already has the suffix, try different capitalizations
      const baseName = lowerName.slice(0, -lowerSuffix.length);
      if (baseName) {
        if (pattern.capitalize) {
          // Capitalize the suffix
          const capitalizedSuffix = pattern.suffix.charAt(0).toUpperCase() + pattern.suffix.slice(1).toLowerCase();
          variations.add(`${packagePrefix}.${baseName}${capitalizedSuffix}`);
        } else {
          // Use the suffix as-is
          variations.add(`${packagePrefix}.${baseName}${pattern.suffix}`);
        }
      }
    } else if (!lowerName.includes(lowerSuffix)) {
      // Name doesn't have the suffix, try adding it
      if (pattern.capitalize) {
        const capitalizedSuffix = pattern.suffix.charAt(0).toUpperCase() + pattern.suffix.slice(1).toLowerCase();
        variations.add(`${packagePrefix}.${lowerName}${capitalizedSuffix}`);
      }
    }
  }
  
  // Handle specific known cases
  const specificCases: Record<string, string[]> = {
    'http': ['httpRequest'],
    'httprequest': ['httpRequest'],
    'mysql': ['mysql', 'mysqlDatabase'],
    'postgres': ['postgres', 'postgresDatabase'],
    'postgresql': ['postgres', 'postgresDatabase'],
    'mongo': ['mongoDb', 'mongodb'],
    'mongodb': ['mongoDb', 'mongodb'],
    'google': ['googleSheets', 'googleDrive', 'googleCalendar'],
    'googlesheet': ['googleSheets'],
    'googlesheets': ['googleSheets'],
    'microsoft': ['microsoftTeams', 'microsoftExcel', 'microsoftOutlook'],
    'slack': ['slack'],
    'discord': ['discord'],
    'telegram': ['telegram'],
    'webhook': ['webhook'],
    'schedule': ['scheduleTrigger'],
    'cron': ['cron', 'scheduleTrigger'],
    'email': ['emailSend', 'emailReadImap', 'gmail'],
    'gmail': ['gmail', 'gmailTrigger'],
    'code': ['code'],
    'javascript': ['code'],
    'python': ['code'],
    'js': ['code'],
    'set': ['set'],
    'if': ['if'],
    'switch': ['switch'],
    'merge': ['merge'],
    'loop': ['splitInBatches'],
    'split': ['splitInBatches', 'splitOut'],
    'ai': ['openAi'],
    'openai': ['openAi'],
    'chatgpt': ['openAi'],
    'gpt': ['openAi'],
    'api': ['httpRequest', 'graphql', 'webhook'],
    'csv': ['spreadsheetFile', 'readBinaryFile'],
    'excel': ['microsoftExcel', 'spreadsheetFile'],
    'spreadsheet': ['spreadsheetFile', 'googleSheets', 'microsoftExcel'],
  };
  
  const cases = specificCases[lowerName];
  if (cases) {
    cases.forEach(c => variations.add(`${packagePrefix}.${c}`));
  }
}

/**
 * Adds related node types for common patterns.
 * For example, "slack" should also include "slackTrigger".
 * 
 * @param variations Set to add variations to
 * @param nodeName The base node name
 */
function addRelatedNodeTypes(variations: Set<string>, nodeName: string): void {
  const lowerName = nodeName.toLowerCase();
  
  // Map of base names to their related node types
  const relatedTypes: Record<string, string[]> = {
    'slack': ['slack', 'slackTrigger'],
    'gmail': ['gmail', 'gmailTrigger'],
    'telegram': ['telegram', 'telegramTrigger'],
    'discord': ['discord', 'discordTrigger'],
    'webhook': ['webhook', 'webhookTrigger'],
    'http': ['httpRequest', 'webhook'],
    'email': ['emailSend', 'emailReadImap', 'gmail', 'gmailTrigger'],
    'google': ['googleSheets', 'googleDrive', 'googleCalendar', 'googleDocs'],
    'microsoft': ['microsoftTeams', 'microsoftExcel', 'microsoftOutlook', 'microsoftOneDrive'],
    'database': ['postgres', 'mysql', 'mongoDb', 'redis', 'postgresDatabase', 'mysqlDatabase'],
    'db': ['postgres', 'mysql', 'mongoDb', 'redis'],
    'sql': ['postgres', 'mysql', 'mssql'],
    'nosql': ['mongoDb', 'redis', 'couchDb'],
    'schedule': ['scheduleTrigger', 'cron'],
    'time': ['scheduleTrigger', 'cron', 'wait'],
    'file': ['readBinaryFile', 'writeBinaryFile', 'moveBinaryFile'],
    'binary': ['readBinaryFile', 'writeBinaryFile', 'moveBinaryFile'],
    'csv': ['spreadsheetFile', 'readBinaryFile'],
    'excel': ['microsoftExcel', 'spreadsheetFile'],
    'json': ['code', 'set'],
    'transform': ['code', 'set', 'merge', 'splitInBatches'],
    'ai': ['openAi', 'agent', 'lmChatOpenAi', 'lmChatAnthropic'],
    'llm': ['openAi', 'agent', 'lmChatOpenAi', 'lmChatAnthropic', 'lmChatGoogleGemini'],
    'agent': ['agent', 'toolAgent'],
    'chat': ['chatTrigger', 'agent'],
  };
  
  const related = relatedTypes[lowerName];
  if (related) {
    related.forEach(r => {
      variations.add(`n8n-nodes-base.${r}`);
      // Also check if it might be a langchain node
      if (['agent', 'toolAgent', 'chatTrigger', 'lmChatOpenAi', 'lmChatAnthropic', 'lmChatGoogleGemini'].includes(r)) {
        variations.add(`@n8n/n8n-nodes-langchain.${r}`);
      }
    });
  }
}
```

--------------------------------------------------------------------------------
/src/telemetry/event-validator.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Event Validator for Telemetry
 * Validates and sanitizes telemetry events using Zod schemas
 */

import { z } from 'zod';
import { TelemetryEvent, WorkflowTelemetry } from './telemetry-types';
import { logger } from '../utils/logger';

// Base property schema that sanitizes strings
const sanitizedString = z.string().transform(val => {
  // Remove URLs
  let sanitized = val.replace(/https?:\/\/[^\s]+/gi, '[URL]');
  // Remove potential API keys
  sanitized = sanitized.replace(/[a-zA-Z0-9_-]{32,}/g, '[KEY]');
  // Remove emails
  sanitized = sanitized.replace(/[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g, '[EMAIL]');
  return sanitized;
});

// Schema for generic event properties
const eventPropertiesSchema = z.record(z.unknown()).transform(obj => {
  const sanitized: Record<string, any> = {};

  for (const [key, value] of Object.entries(obj)) {
    // Skip sensitive keys
    if (isSensitiveKey(key)) {
      continue;
    }

    // Sanitize string values
    if (typeof value === 'string') {
      sanitized[key] = sanitizedString.parse(value);
    } else if (typeof value === 'number' || typeof value === 'boolean') {
      sanitized[key] = value;
    } else if (value === null || value === undefined) {
      sanitized[key] = null;
    } else if (typeof value === 'object') {
      // Recursively sanitize nested objects (limited depth)
      sanitized[key] = sanitizeNestedObject(value, 3);
    }
  }

  return sanitized;
});

// Schema for telemetry events
export const telemetryEventSchema = z.object({
  user_id: z.string().min(1).max(64),
  event: z.string().min(1).max(100).regex(/^[a-zA-Z0-9_-]+$/),
  properties: eventPropertiesSchema,
  created_at: z.string().datetime().optional()
});

// Schema for workflow telemetry
export const workflowTelemetrySchema = z.object({
  user_id: z.string().min(1).max(64),
  workflow_hash: z.string().min(1).max(64),
  node_count: z.number().int().min(0).max(1000),
  node_types: z.array(z.string()).max(100),
  has_trigger: z.boolean(),
  has_webhook: z.boolean(),
  complexity: z.enum(['simple', 'medium', 'complex']),
  sanitized_workflow: z.object({
    nodes: z.array(z.any()).max(1000),
    connections: z.record(z.any())
  }),
  created_at: z.string().datetime().optional()
});

// Specific event property schemas for common events
const toolUsagePropertiesSchema = z.object({
  tool: z.string().max(100),
  success: z.boolean(),
  duration: z.number().min(0).max(3600000), // Max 1 hour
});

const searchQueryPropertiesSchema = z.object({
  query: z.string().max(100).transform(val => {
    // Apply same sanitization as sanitizedString
    let sanitized = val.replace(/https?:\/\/[^\s]+/gi, '[URL]');
    sanitized = sanitized.replace(/[a-zA-Z0-9_-]{32,}/g, '[KEY]');
    sanitized = sanitized.replace(/[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}/g, '[EMAIL]');
    return sanitized;
  }),
  resultsFound: z.number().int().min(0),
  searchType: z.string().max(50),
  hasResults: z.boolean(),
  isZeroResults: z.boolean()
});

const validationDetailsPropertiesSchema = z.object({
  nodeType: z.string().max(100),
  errorType: z.string().max(100),
  errorCategory: z.string().max(50),
  details: z.record(z.any()).optional()
});

const performanceMetricPropertiesSchema = z.object({
  operation: z.string().max(100),
  duration: z.number().min(0).max(3600000),
  isSlow: z.boolean(),
  isVerySlow: z.boolean(),
  metadata: z.record(z.any()).optional()
});

// Schema for startup_error event properties (v2.18.2)
const startupErrorPropertiesSchema = z.object({
  checkpoint: z.string().max(100),
  errorMessage: z.string().max(500),
  errorType: z.string().max(100),
  checkpointsPassed: z.array(z.string()).max(20),
  checkpointsPassedCount: z.number().int().min(0).max(20),
  startupDuration: z.number().min(0).max(300000), // Max 5 minutes
  platform: z.string().max(50),
  arch: z.string().max(50),
  nodeVersion: z.string().max(50),
  isDocker: z.boolean()
});

// Schema for startup_completed event properties (v2.18.2)
const startupCompletedPropertiesSchema = z.object({
  version: z.string().max(50)
});

// Map of event names to their specific schemas
const EVENT_SCHEMAS: Record<string, z.ZodSchema<any>> = {
  'tool_used': toolUsagePropertiesSchema,
  'search_query': searchQueryPropertiesSchema,
  'validation_details': validationDetailsPropertiesSchema,
  'performance_metric': performanceMetricPropertiesSchema,
  'startup_error': startupErrorPropertiesSchema,
  'startup_completed': startupCompletedPropertiesSchema,
};

/**
 * Check if a key is sensitive
 * Handles various naming conventions: camelCase, snake_case, kebab-case, and case variations
 */
function isSensitiveKey(key: string): boolean {
  const sensitivePatterns = [
    // Core sensitive terms
    'password', 'passwd', 'pwd',
    'token', 'jwt', 'bearer',
    'apikey', 'api_key', 'api-key',
    'secret', 'private',
    'credential', 'cred', 'auth',

    // Network/Connection sensitive
    'url', 'uri', 'endpoint', 'host', 'hostname',
    'database', 'db', 'connection', 'conn',

    // Service-specific
    'slack', 'discord', 'telegram',
    'oauth', 'client_secret', 'client-secret', 'clientsecret',
    'access_token', 'access-token', 'accesstoken',
    'refresh_token', 'refresh-token', 'refreshtoken'
  ];

  const lowerKey = key.toLowerCase();

  // Check for exact matches first (most efficient)
  if (sensitivePatterns.includes(lowerKey)) {
    return true;
  }

  // Check for compound key terms specifically
  if (lowerKey.includes('key') && lowerKey !== 'key') {
    // Check if it's a compound term like apikey, api_key, etc.
    const keyPatterns = ['apikey', 'api_key', 'api-key', 'secretkey', 'secret_key', 'privatekey', 'private_key'];
    if (keyPatterns.some(pattern => lowerKey.includes(pattern))) {
      return true;
    }
  }

  // Check for substring matches with word boundaries
  return sensitivePatterns.some(pattern => {
    // Match as whole words or with common separators
    const regex = new RegExp(`(?:^|[_-])${pattern}(?:[_-]|$)`, 'i');
    return regex.test(key) || lowerKey.includes(pattern);
  });
}

/**
 * Sanitize nested objects with depth limit
 */
function sanitizeNestedObject(obj: any, maxDepth: number): any {
  if (maxDepth <= 0 || !obj || typeof obj !== 'object') {
    return '[NESTED]';
  }

  if (Array.isArray(obj)) {
    return obj.slice(0, 10).map(item =>
      typeof item === 'object' ? sanitizeNestedObject(item, maxDepth - 1) : item
    );
  }

  const sanitized: Record<string, any> = {};
  let keyCount = 0;

  for (const [key, value] of Object.entries(obj)) {
    if (keyCount++ >= 20) { // Limit keys per object
      sanitized['...'] = 'truncated';
      break;
    }

    if (isSensitiveKey(key)) {
      continue;
    }

    if (typeof value === 'string') {
      sanitized[key] = sanitizedString.parse(value);
    } else if (typeof value === 'object' && value !== null) {
      sanitized[key] = sanitizeNestedObject(value, maxDepth - 1);
    } else {
      sanitized[key] = value;
    }
  }

  return sanitized;
}

export class TelemetryEventValidator {
  private validationErrors: number = 0;
  private validationSuccesses: number = 0;

  /**
   * Validate and sanitize a telemetry event
   */
  validateEvent(event: TelemetryEvent): TelemetryEvent | null {
    try {
      // Use specific schema if available for this event type
      const specificSchema = EVENT_SCHEMAS[event.event];

      if (specificSchema) {
        // Validate properties with specific schema first
        const validatedProperties = specificSchema.safeParse(event.properties);
        if (!validatedProperties.success) {
          logger.debug(`Event validation failed for ${event.event}:`, validatedProperties.error.errors);
          this.validationErrors++;
          return null;
        }
        event.properties = validatedProperties.data;
      }

      // Validate the complete event
      const validated = telemetryEventSchema.parse(event);
      this.validationSuccesses++;
      return validated;
    } catch (error) {
      if (error instanceof z.ZodError) {
        logger.debug('Event validation error:', error.errors);
      } else {
        logger.debug('Unexpected validation error:', error);
      }
      this.validationErrors++;
      return null;
    }
  }

  /**
   * Validate workflow telemetry
   */
  validateWorkflow(workflow: WorkflowTelemetry): WorkflowTelemetry | null {
    try {
      const validated = workflowTelemetrySchema.parse(workflow);
      this.validationSuccesses++;
      return validated;
    } catch (error) {
      if (error instanceof z.ZodError) {
        logger.debug('Workflow validation error:', error.errors);
      } else {
        logger.debug('Unexpected workflow validation error:', error);
      }
      this.validationErrors++;
      return null;
    }
  }

  /**
   * Get validation statistics
   */
  getStats() {
    return {
      errors: this.validationErrors,
      successes: this.validationSuccesses,
      total: this.validationErrors + this.validationSuccesses,
      errorRate: this.validationErrors / (this.validationErrors + this.validationSuccesses) || 0
    };
  }

  /**
   * Reset statistics
   */
  resetStats(): void {
    this.validationErrors = 0;
    this.validationSuccesses = 0;
  }
}
```

--------------------------------------------------------------------------------
/src/telemetry/early-error-logger.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Early Error Logger (v2.18.3)
 * Captures errors that occur BEFORE the main telemetry system is ready
 * Uses direct Supabase insert to bypass batching and ensure immediate persistence
 *
 * CRITICAL FIXES:
 * - Singleton pattern to prevent multiple instances
 * - Defensive initialization (safe defaults before any throwing operation)
 * - Timeout wrapper for Supabase operations (5s max)
 * - Shared sanitization utilities (DRY principle)
 */

import { createClient, SupabaseClient } from '@supabase/supabase-js';
import { TelemetryConfigManager } from './config-manager';
import { TELEMETRY_BACKEND } from './telemetry-types';
import { StartupCheckpoint, isValidCheckpoint, getCheckpointDescription } from './startup-checkpoints';
import { sanitizeErrorMessageCore } from './error-sanitization-utils';
import { logger } from '../utils/logger';

/**
 * Timeout wrapper for async operations
 * Prevents hanging if Supabase is unreachable
 */
async function withTimeout<T>(promise: Promise<T>, timeoutMs: number, operation: string): Promise<T | null> {
  try {
    const timeoutPromise = new Promise<T>((_, reject) => {
      setTimeout(() => reject(new Error(`${operation} timeout after ${timeoutMs}ms`)), timeoutMs);
    });

    return await Promise.race([promise, timeoutPromise]);
  } catch (error) {
    logger.debug(`${operation} failed or timed out:`, error);
    return null;
  }
}

export class EarlyErrorLogger {
  // Singleton instance
  private static instance: EarlyErrorLogger | null = null;

  // DEFENSIVE INITIALIZATION: Initialize all fields to safe defaults FIRST
  // This ensures the object is in a valid state even if initialization fails
  private enabled: boolean = false;  // Safe default: disabled
  private supabase: SupabaseClient | null = null;  // Safe default: null
  private userId: string | null = null;  // Safe default: null
  private checkpoints: StartupCheckpoint[] = [];
  private startTime: number = Date.now();
  private initPromise: Promise<void>;

  /**
   * Private constructor - use getInstance() instead
   * Ensures only one instance exists per process
   */
  private constructor() {
    // Kick off async initialization without blocking
    this.initPromise = this.initialize();
  }

  /**
   * Get singleton instance
   * Safe to call from anywhere - initialization errors won't crash caller
   */
  static getInstance(): EarlyErrorLogger {
    if (!EarlyErrorLogger.instance) {
      EarlyErrorLogger.instance = new EarlyErrorLogger();
    }
    return EarlyErrorLogger.instance;
  }

  /**
   * Async initialization logic
   * Separated from constructor to prevent throwing before safe defaults are set
   */
  private async initialize(): Promise<void> {
    try {
      // Validate backend configuration before using
      if (!TELEMETRY_BACKEND.URL || !TELEMETRY_BACKEND.ANON_KEY) {
        logger.debug('Telemetry backend not configured, early error logger disabled');
        this.enabled = false;
        return;
      }

      // Check if telemetry is disabled by user
      const configManager = TelemetryConfigManager.getInstance();
      const isEnabled = configManager.isEnabled();

      if (!isEnabled) {
        logger.debug('Telemetry disabled by user, early error logger will not send events');
        this.enabled = false;
        return;
      }

      // Initialize Supabase client for direct inserts
      this.supabase = createClient(
        TELEMETRY_BACKEND.URL,
        TELEMETRY_BACKEND.ANON_KEY,
        {
          auth: {
            persistSession: false,
            autoRefreshToken: false,
          },
        }
      );

      // Get user ID from config manager
      this.userId = configManager.getUserId();

      // Mark as enabled only after successful initialization
      this.enabled = true;

      logger.debug('Early error logger initialized successfully');
    } catch (error) {
      // Initialization failed - ensure safe state
      logger.debug('Early error logger initialization failed:', error);
      this.enabled = false;
      this.supabase = null;
      this.userId = null;
    }
  }

  /**
   * Wait for initialization to complete (for testing)
   * Not needed in production - all methods handle uninitialized state gracefully
   */
  async waitForInit(): Promise<void> {
    await this.initPromise;
  }

  /**
   * Log a checkpoint as the server progresses through startup
   * FIRE-AND-FORGET: Does not block caller (no await needed)
   */
  logCheckpoint(checkpoint: StartupCheckpoint): void {
    if (!this.enabled) {
      return;
    }

    try {
      // Validate checkpoint
      if (!isValidCheckpoint(checkpoint)) {
        logger.warn(`Invalid checkpoint: ${checkpoint}`);
        return;
      }

      // Add to internal checkpoint list
      this.checkpoints.push(checkpoint);

      logger.debug(`Checkpoint passed: ${checkpoint} (${getCheckpointDescription(checkpoint)})`);
    } catch (error) {
      // Don't throw - we don't want checkpoint logging to crash the server
      logger.debug('Failed to log checkpoint:', error);
    }
  }

  /**
   * Log a startup error with checkpoint context
   * This is the main error capture mechanism
   * FIRE-AND-FORGET: Does not block caller
   */
  logStartupError(checkpoint: StartupCheckpoint, error: unknown): void {
    if (!this.enabled || !this.supabase || !this.userId) {
      return;
    }

    // Run async operation without blocking caller
    this.logStartupErrorAsync(checkpoint, error).catch((logError) => {
      // Swallow errors - telemetry must never crash the server
      logger.debug('Failed to log startup error:', logError);
    });
  }

  /**
   * Internal async implementation with timeout wrapper
   */
  private async logStartupErrorAsync(checkpoint: StartupCheckpoint, error: unknown): Promise<void> {
    try {
      // Sanitize error message using shared utilities (v2.18.3)
      let errorMessage = 'Unknown error';
      if (error instanceof Error) {
        errorMessage = error.message;
        if (error.stack) {
          errorMessage = error.stack;
        }
      } else if (typeof error === 'string') {
        errorMessage = error;
      } else {
        errorMessage = String(error);
      }

      const sanitizedError = sanitizeErrorMessageCore(errorMessage);

      // Extract error type if it's an Error object
      let errorType = 'unknown';
      if (error instanceof Error) {
        errorType = error.name || 'Error';
      } else if (typeof error === 'string') {
        errorType = 'string_error';
      }

      // Create startup_error event
      const event = {
        user_id: this.userId!,
        event: 'startup_error',
        properties: {
          checkpoint,
          errorMessage: sanitizedError,
          errorType,
          checkpointsPassed: this.checkpoints,
          checkpointsPassedCount: this.checkpoints.length,
          startupDuration: Date.now() - this.startTime,
          platform: process.platform,
          arch: process.arch,
          nodeVersion: process.version,
          isDocker: process.env.IS_DOCKER === 'true',
        },
        created_at: new Date().toISOString(),
      };

      // Direct insert to Supabase with timeout (5s max)
      const insertOperation = async () => {
        return await this.supabase!
          .from('events')
          .insert(event)
          .select()
          .single();
      };

      const result = await withTimeout(insertOperation(), 5000, 'Startup error insert');

      if (result && 'error' in result && result.error) {
        logger.debug('Failed to insert startup error event:', result.error);
      } else if (result) {
        logger.debug(`Startup error logged for checkpoint: ${checkpoint}`);
      }
    } catch (logError) {
      // Don't throw - telemetry failures should never crash the server
      logger.debug('Failed to log startup error:', logError);
    }
  }

  /**
   * Log successful startup completion
   * Called when all checkpoints have been passed
   * FIRE-AND-FORGET: Does not block caller
   */
  logStartupSuccess(checkpoints: StartupCheckpoint[], durationMs: number): void {
    if (!this.enabled) {
      return;
    }

    try {
      // Store checkpoints for potential session_start enhancement
      this.checkpoints = checkpoints;

      logger.debug(`Startup successful: ${checkpoints.length} checkpoints passed in ${durationMs}ms`);

      // We don't send a separate event here - this data will be included
      // in the session_start event sent by the main telemetry system
    } catch (error) {
      logger.debug('Failed to log startup success:', error);
    }
  }

  /**
   * Get the list of checkpoints passed so far
   */
  getCheckpoints(): StartupCheckpoint[] {
    return [...this.checkpoints];
  }

  /**
   * Get startup duration in milliseconds
   */
  getStartupDuration(): number {
    return Date.now() - this.startTime;
  }

  /**
   * Get startup data for inclusion in session_start event
   */
  getStartupData(): { durationMs: number; checkpoints: StartupCheckpoint[] } | null {
    if (!this.enabled) {
      return null;
    }

    return {
      durationMs: this.getStartupDuration(),
      checkpoints: this.getCheckpoints(),
    };
  }

  /**
   * Check if early logger is enabled
   */
  isEnabled(): boolean {
    return this.enabled && this.supabase !== null && this.userId !== null;
  }
}

```

--------------------------------------------------------------------------------
/src/telemetry/telemetry-manager.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Telemetry Manager
 * Main telemetry coordinator using modular components
 */

import { createClient, SupabaseClient } from '@supabase/supabase-js';
import { TelemetryConfigManager } from './config-manager';
import { TelemetryEventTracker } from './event-tracker';
import { TelemetryBatchProcessor } from './batch-processor';
import { TelemetryPerformanceMonitor } from './performance-monitor';
import { TELEMETRY_BACKEND } from './telemetry-types';
import { TelemetryError, TelemetryErrorType, TelemetryErrorAggregator } from './telemetry-error';
import { logger } from '../utils/logger';

export class TelemetryManager {
  private static instance: TelemetryManager;
  private supabase: SupabaseClient | null = null;
  private configManager: TelemetryConfigManager;
  private eventTracker: TelemetryEventTracker;
  private batchProcessor: TelemetryBatchProcessor;
  private performanceMonitor: TelemetryPerformanceMonitor;
  private errorAggregator: TelemetryErrorAggregator;
  private isInitialized: boolean = false;

  private constructor() {
    // Prevent direct instantiation even when TypeScript is bypassed
    if (TelemetryManager.instance) {
      throw new Error('Use TelemetryManager.getInstance() instead of new TelemetryManager()');
    }

    this.configManager = TelemetryConfigManager.getInstance();
    this.errorAggregator = new TelemetryErrorAggregator();
    this.performanceMonitor = new TelemetryPerformanceMonitor();

    // Initialize event tracker with callbacks
    this.eventTracker = new TelemetryEventTracker(
      () => this.configManager.getUserId(),
      () => this.isEnabled()
    );

    // Initialize batch processor (will be configured after Supabase init)
    this.batchProcessor = new TelemetryBatchProcessor(
      null,
      () => this.isEnabled()
    );

    // Delay initialization to first use, not constructor
    // this.initialize();
  }

  static getInstance(): TelemetryManager {
    if (!TelemetryManager.instance) {
      TelemetryManager.instance = new TelemetryManager();
    }
    return TelemetryManager.instance;
  }

  /**
   * Ensure telemetry is initialized before use
   */
  private ensureInitialized(): void {
    if (!this.isInitialized && this.configManager.isEnabled()) {
      this.initialize();
    }
  }

  /**
   * Initialize telemetry if enabled
   */
  private initialize(): void {
    if (!this.configManager.isEnabled()) {
      logger.debug('Telemetry disabled by user preference');
      return;
    }

    // Use hardcoded credentials for zero-configuration telemetry
    // Environment variables can override for development/testing
    const supabaseUrl = process.env.SUPABASE_URL || TELEMETRY_BACKEND.URL;
    const supabaseAnonKey = process.env.SUPABASE_ANON_KEY || TELEMETRY_BACKEND.ANON_KEY;

    try {
      this.supabase = createClient(supabaseUrl, supabaseAnonKey, {
        auth: {
          persistSession: false,
          autoRefreshToken: false,
        },
        realtime: {
          params: {
            eventsPerSecond: 1,
          },
        },
      });

      // Update batch processor with Supabase client
      this.batchProcessor = new TelemetryBatchProcessor(
        this.supabase,
        () => this.isEnabled()
      );

      this.batchProcessor.start();
      this.isInitialized = true;

      logger.debug('Telemetry initialized successfully');
    } catch (error) {
      const telemetryError = new TelemetryError(
        TelemetryErrorType.INITIALIZATION_ERROR,
        'Failed to initialize telemetry',
        { error: error instanceof Error ? error.message : String(error) }
      );
      this.errorAggregator.record(telemetryError);
      telemetryError.log();
      this.isInitialized = false;
    }
  }

  /**
   * Track a tool usage event
   */
  trackToolUsage(toolName: string, success: boolean, duration?: number): void {
    this.ensureInitialized();
    this.performanceMonitor.startOperation('trackToolUsage');
    this.eventTracker.trackToolUsage(toolName, success, duration);
    this.eventTracker.updateToolSequence(toolName);
    this.performanceMonitor.endOperation('trackToolUsage');
  }

  /**
   * Track workflow creation
   */
  async trackWorkflowCreation(workflow: any, validationPassed: boolean): Promise<void> {
    this.ensureInitialized();
    this.performanceMonitor.startOperation('trackWorkflowCreation');
    try {
      await this.eventTracker.trackWorkflowCreation(workflow, validationPassed);
      // Auto-flush workflows to prevent data loss
      await this.flush();
    } catch (error) {
      const telemetryError = error instanceof TelemetryError
        ? error
        : new TelemetryError(
            TelemetryErrorType.UNKNOWN_ERROR,
            'Failed to track workflow',
            { error: String(error) }
          );
      this.errorAggregator.record(telemetryError);
    } finally {
      this.performanceMonitor.endOperation('trackWorkflowCreation');
    }
  }


  /**
   * Track an error event
   */
  trackError(errorType: string, context: string, toolName?: string, errorMessage?: string): void {
    this.ensureInitialized();
    this.eventTracker.trackError(errorType, context, toolName, errorMessage);
  }

  /**
   * Track a generic event
   */
  trackEvent(eventName: string, properties: Record<string, any>): void {
    this.ensureInitialized();
    this.eventTracker.trackEvent(eventName, properties);
  }

  /**
   * Track session start
   */
  trackSessionStart(): void {
    this.ensureInitialized();
    this.eventTracker.trackSessionStart();
  }

  /**
   * Track search queries
   */
  trackSearchQuery(query: string, resultsFound: number, searchType: string): void {
    this.eventTracker.trackSearchQuery(query, resultsFound, searchType);
  }

  /**
   * Track validation details
   */
  trackValidationDetails(nodeType: string, errorType: string, details: Record<string, any>): void {
    this.eventTracker.trackValidationDetails(nodeType, errorType, details);
  }

  /**
   * Track tool sequences
   */
  trackToolSequence(previousTool: string, currentTool: string, timeDelta: number): void {
    this.eventTracker.trackToolSequence(previousTool, currentTool, timeDelta);
  }

  /**
   * Track node configuration
   */
  trackNodeConfiguration(nodeType: string, propertiesSet: number, usedDefaults: boolean): void {
    this.eventTracker.trackNodeConfiguration(nodeType, propertiesSet, usedDefaults);
  }

  /**
   * Track performance metrics
   */
  trackPerformanceMetric(operation: string, duration: number, metadata?: Record<string, any>): void {
    this.eventTracker.trackPerformanceMetric(operation, duration, metadata);
  }


  /**
   * Flush queued events to Supabase
   */
  async flush(): Promise<void> {
    this.ensureInitialized();
    if (!this.isEnabled() || !this.supabase) return;

    this.performanceMonitor.startOperation('flush');

    // Get queued data from event tracker
    const events = this.eventTracker.getEventQueue();
    const workflows = this.eventTracker.getWorkflowQueue();

    // Clear queues immediately to prevent duplicate processing
    this.eventTracker.clearEventQueue();
    this.eventTracker.clearWorkflowQueue();

    try {
      // Use batch processor to flush
      await this.batchProcessor.flush(events, workflows);
    } catch (error) {
      const telemetryError = error instanceof TelemetryError
        ? error
        : new TelemetryError(
            TelemetryErrorType.NETWORK_ERROR,
            'Failed to flush telemetry',
            { error: String(error) },
            true // Retryable
          );
      this.errorAggregator.record(telemetryError);
      telemetryError.log();
    } finally {
      const duration = this.performanceMonitor.endOperation('flush');
      if (duration > 100) {
        logger.debug(`Telemetry flush took ${duration.toFixed(2)}ms`);
      }
    }
  }


  /**
   * Check if telemetry is enabled
   */
  private isEnabled(): boolean {
    return this.isInitialized && this.configManager.isEnabled();
  }

  /**
   * Disable telemetry
   */
  disable(): void {
    this.configManager.disable();
    this.batchProcessor.stop();
    this.isInitialized = false;
    this.supabase = null;
  }

  /**
   * Enable telemetry
   */
  enable(): void {
    this.configManager.enable();
    this.initialize();
  }

  /**
   * Get telemetry status
   */
  getStatus(): string {
    return this.configManager.getStatus();
  }

  /**
   * Get comprehensive telemetry metrics
   */
  getMetrics() {
    return {
      status: this.isEnabled() ? 'enabled' : 'disabled',
      initialized: this.isInitialized,
      tracking: this.eventTracker.getStats(),
      processing: this.batchProcessor.getMetrics(),
      errors: this.errorAggregator.getStats(),
      performance: this.performanceMonitor.getDetailedReport(),
      overhead: this.performanceMonitor.getTelemetryOverhead()
    };
  }

  /**
   * Reset singleton instance (for testing purposes)
   */
  static resetInstance(): void {
    TelemetryManager.instance = undefined as any;
    (global as any).__telemetryManager = undefined;
  }
}

// Create a global singleton to ensure only one instance across all imports
const globalAny = global as any;

if (!globalAny.__telemetryManager) {
  globalAny.__telemetryManager = TelemetryManager.getInstance();
}

// Export singleton instance
export const telemetry = globalAny.__telemetryManager as TelemetryManager;
```

--------------------------------------------------------------------------------
/tests/utils/data-generators.ts:
--------------------------------------------------------------------------------

```typescript
import { faker } from '@faker-js/faker';
import { WorkflowNode, Workflow } from '@/types/n8n-api';

// Use any type for INodeDefinition since it's from n8n-workflow package
type INodeDefinition = any;

/**
 * Data generators for creating realistic test data
 */

/**
 * Generate a random node type
 */
export function generateNodeType(): string {
  const packages = ['n8n-nodes-base', '@n8n/n8n-nodes-langchain'];
  const nodeTypes = [
    'webhook', 'httpRequest', 'slack', 'googleSheets', 'postgres',
    'function', 'code', 'if', 'switch', 'merge', 'splitInBatches',
    'emailSend', 'redis', 'mongodb', 'mysql', 'ftp', 'ssh'
  ];
  
  const pkg = faker.helpers.arrayElement(packages);
  const type = faker.helpers.arrayElement(nodeTypes);
  
  return `${pkg}.${type}`;
}

/**
 * Generate property definitions for a node
 */
export function generateProperties(count = 5): any[] {
  const properties = [];
  
  for (let i = 0; i < count; i++) {
    const type = faker.helpers.arrayElement([
      'string', 'number', 'boolean', 'options', 'collection'
    ]);
    
    const property: any = {
      displayName: faker.helpers.arrayElement([
        'Resource', 'Operation', 'Field', 'Value', 'Method',
        'URL', 'Headers', 'Body', 'Authentication', 'Options'
      ]),
      name: faker.helpers.slugify(faker.word.noun()).toLowerCase(),
      type,
      default: generateDefaultValue(type),
      description: faker.lorem.sentence()
    };
    
    if (type === 'options') {
      property.options = generateOptions();
    }
    
    if (faker.datatype.boolean()) {
      property.required = true;
    }
    
    if (faker.datatype.boolean()) {
      property.displayOptions = generateDisplayOptions();
    }
    
    properties.push(property);
  }
  
  return properties;
}

/**
 * Generate default value based on type
 */
function generateDefaultValue(type: string): any {
  switch (type) {
    case 'string':
      return faker.lorem.word();
    case 'number':
      return faker.number.int({ min: 0, max: 100 });
    case 'boolean':
      return faker.datatype.boolean();
    case 'options':
      return 'option1';
    case 'collection':
      return {};
    default:
      return '';
  }
}

/**
 * Generate options for select fields
 */
function generateOptions(count = 3): any[] {
  const options = [];
  
  for (let i = 0; i < count; i++) {
    options.push({
      name: faker.helpers.arrayElement([
        'Create', 'Read', 'Update', 'Delete', 'List',
        'Get', 'Post', 'Put', 'Patch', 'Send'
      ]),
      value: `option${i + 1}`,
      description: faker.lorem.sentence()
    });
  }
  
  return options;
}

/**
 * Generate display options for conditional fields
 */
function generateDisplayOptions(): any {
  return {
    show: {
      resource: [faker.helpers.arrayElement(['user', 'post', 'message'])],
      operation: [faker.helpers.arrayElement(['create', 'update', 'get'])]
    }
  };
}

/**
 * Generate a complete node definition
 */
export function generateNodeDefinition(overrides?: Partial<INodeDefinition>): any {
  const nodeCategory = faker.helpers.arrayElement([
    'Core Nodes', 'Communication', 'Data Transformation',
    'Development', 'Files', 'Productivity', 'Analytics'
  ]);
  
  return {
    displayName: faker.company.name() + ' Node',
    name: faker.helpers.slugify(faker.company.name()).toLowerCase(),
    group: [faker.helpers.arrayElement(['trigger', 'transform', 'output'])],
    version: faker.number.float({ min: 1, max: 3, fractionDigits: 1 }),
    subtitle: `={{$parameter["operation"] + ": " + $parameter["resource"]}}`,
    description: faker.lorem.paragraph(),
    defaults: {
      name: faker.company.name(),
      color: faker.color.rgb()
    },
    inputs: ['main'],
    outputs: ['main'],
    credentials: faker.datatype.boolean() ? [{
      name: faker.helpers.slugify(faker.company.name()).toLowerCase() + 'Api',
      required: true
    }] : undefined,
    properties: generateProperties(),
    codex: {
      categories: [nodeCategory],
      subcategories: {
        [nodeCategory]: [faker.word.noun()]
      },
      alias: [faker.word.noun(), faker.word.verb()]
    },
    ...overrides
  };
}

/**
 * Generate workflow nodes
 */
export function generateWorkflowNodes(count = 3): WorkflowNode[] {
  const nodes: WorkflowNode[] = [];
  
  for (let i = 0; i < count; i++) {
    nodes.push({
      id: faker.string.uuid(),
      name: faker.helpers.arrayElement([
        'Webhook', 'HTTP Request', 'Set', 'Function', 'IF',
        'Slack', 'Email', 'Database', 'Code'
      ]) + (i > 0 ? i : ''),
      type: generateNodeType(),
      typeVersion: faker.number.float({ min: 1, max: 3, fractionDigits: 1 }),
      position: [
        250 + i * 200,
        300 + (i % 2) * 100
      ],
      parameters: generateNodeParameters()
    });
  }
  
  return nodes;
}

/**
 * Generate node parameters
 */
function generateNodeParameters(): Record<string, any> {
  const params: Record<string, any> = {};
  
  // Common parameters
  if (faker.datatype.boolean()) {
    params.resource = faker.helpers.arrayElement(['user', 'post', 'message']);
    params.operation = faker.helpers.arrayElement(['create', 'get', 'update', 'delete']);
  }
  
  // Type-specific parameters
  if (faker.datatype.boolean()) {
    params.url = faker.internet.url();
  }
  
  if (faker.datatype.boolean()) {
    params.method = faker.helpers.arrayElement(['GET', 'POST', 'PUT', 'DELETE']);
  }
  
  if (faker.datatype.boolean()) {
    params.authentication = faker.helpers.arrayElement(['none', 'basicAuth', 'oAuth2']);
  }
  
  // Add some random parameters
  const randomParamCount = faker.number.int({ min: 1, max: 5 });
  for (let i = 0; i < randomParamCount; i++) {
    const key = faker.word.noun().toLowerCase();
    params[key] = faker.helpers.arrayElement([
      faker.lorem.word(),
      faker.number.int(),
      faker.datatype.boolean(),
      '={{ $json.data }}'
    ]);
  }
  
  return params;
}

/**
 * Generate workflow connections
 */
export function generateConnections(nodes: WorkflowNode[]): Record<string, any> {
  const connections: Record<string, any> = {};
  
  // Connect nodes sequentially
  for (let i = 0; i < nodes.length - 1; i++) {
    const sourceId = nodes[i].id;
    const targetId = nodes[i + 1].id;
    
    if (!connections[sourceId]) {
      connections[sourceId] = { main: [[]] };
    }
    
    connections[sourceId].main[0].push({
      node: targetId,
      type: 'main',
      index: 0
    });
  }
  
  // Add some random connections
  if (nodes.length > 2 && faker.datatype.boolean()) {
    const sourceIdx = faker.number.int({ min: 0, max: nodes.length - 2 });
    const targetIdx = faker.number.int({ min: sourceIdx + 1, max: nodes.length - 1 });
    
    const sourceId = nodes[sourceIdx].id;
    const targetId = nodes[targetIdx].id;
    
    if (connections[sourceId]?.main[0]) {
      connections[sourceId].main[0].push({
        node: targetId,
        type: 'main',
        index: 0
      });
    }
  }
  
  return connections;
}

/**
 * Generate a complete workflow
 */
export function generateWorkflow(nodeCount = 3): Workflow {
  const nodes = generateWorkflowNodes(nodeCount);
  
  return {
    id: faker.string.uuid(),
    name: faker.helpers.arrayElement([
      'Data Processing Workflow',
      'API Integration Flow',
      'Notification Pipeline',
      'ETL Process',
      'Webhook Handler'
    ]),
    active: faker.datatype.boolean(),
    nodes,
    connections: generateConnections(nodes),
    settings: {
      executionOrder: 'v1',
      saveManualExecutions: true,
      timezone: faker.location.timeZone()
    },
    staticData: {},
    tags: generateTags().map(t => t.name),
    createdAt: faker.date.past().toISOString(),
    updatedAt: faker.date.recent().toISOString()
  };
}

/**
 * Generate workflow tags
 */
function generateTags(): Array<{ id: string; name: string }> {
  const tagCount = faker.number.int({ min: 0, max: 3 });
  const tags = [];
  
  for (let i = 0; i < tagCount; i++) {
    tags.push({
      id: faker.string.uuid(),
      name: faker.helpers.arrayElement([
        'production', 'development', 'testing',
        'automation', 'integration', 'notification'
      ])
    });
  }
  
  return tags;
}

/**
 * Generate test templates
 */
export function generateTemplate() {
  const workflow = generateWorkflow();
  
  return {
    id: faker.number.int({ min: 1000, max: 9999 }),
    name: workflow.name,
    description: faker.lorem.paragraph(),
    workflow,
    categories: faker.helpers.arrayElements([
      'Sales', 'Marketing', 'Engineering',
      'HR', 'Finance', 'Operations'
    ], { min: 1, max: 3 }),
    useCases: faker.helpers.arrayElements([
      'Lead Generation', 'Data Sync', 'Notifications',
      'Reporting', 'Automation', 'Integration'
    ], { min: 1, max: 3 }),
    views: faker.number.int({ min: 0, max: 10000 }),
    recentViews: faker.number.int({ min: 0, max: 100 })
  };
}

/**
 * Generate bulk test data
 */
export function generateBulkData(counts: {
  nodes?: number;
  workflows?: number;
  templates?: number;
}) {
  const { nodes = 10, workflows = 5, templates = 3 } = counts;
  
  return {
    nodes: Array.from({ length: nodes }, () => generateNodeDefinition()),
    workflows: Array.from({ length: workflows }, () => generateWorkflow()),
    templates: Array.from({ length: templates }, () => generateTemplate())
  };
}
```

--------------------------------------------------------------------------------
/MEMORY_TEMPLATE_UPDATE.md:
--------------------------------------------------------------------------------

```markdown
# Template Update Process - Quick Reference

## Overview

The n8n-mcp project maintains a database of workflow templates from n8n.io. This guide explains how to update the template database incrementally without rebuilding from scratch.

## Current Database State

As of the last update:
- **2,598 templates** in database
- Templates from the last 12 months
- Latest template: September 12, 2025

## Quick Commands

### Incremental Update (Recommended)
```bash
# Build if needed
npm run build

# Fetch only NEW templates (5-10 minutes)
npm run fetch:templates:update
```

### Full Rebuild (Rare)
```bash
# Rebuild entire database from scratch (30-40 minutes)
npm run fetch:templates
```

## How It Works

### Incremental Update Mode (`--update`)

The incremental update is **smart and efficient**:

1. **Loads existing template IDs** from database (~2,598 templates)
2. **Fetches template list** from n8n.io API (all templates from last 12 months)
3. **Filters** to find only NEW templates not in database
4. **Fetches details** for new templates only (saves time and API calls)
5. **Saves** new templates to database (existing ones untouched)
6. **Rebuilds FTS5** search index for new templates

### Key Benefits

✅ **Non-destructive**: All existing templates preserved
✅ **Fast**: Only fetches new templates (5-10 min vs 30-40 min)
✅ **API friendly**: Reduces load on n8n.io API
✅ **Safe**: Preserves AI-generated metadata
✅ **Smart**: Automatically skips duplicates

## Performance Comparison

| Mode | Templates Fetched | Time | Use Case |
|------|------------------|------|----------|
| **Update** | Only new (~50-200) | 5-10 min | Regular updates |
| **Rebuild** | All (~8000+) | 30-40 min | Initial setup or corruption |

## Command Options

### Basic Update
```bash
npm run fetch:templates:update
```

### Full Rebuild
```bash
npm run fetch:templates
```

### With Metadata Generation
```bash
# Update templates and generate AI metadata
npm run fetch:templates -- --update --generate-metadata

# Or just generate metadata for existing templates
npm run fetch:templates -- --metadata-only
```

### Help
```bash
npm run fetch:templates -- --help
```

## Update Frequency

Recommended update schedule:
- **Weekly**: Run incremental update to get latest templates
- **Monthly**: Review database statistics
- **As needed**: Rebuild only if database corruption suspected

## Template Filtering

The fetcher automatically filters templates:
- ✅ **Includes**: Templates from last 12 months
- ✅ **Includes**: Templates with >10 views
- ❌ **Excludes**: Templates with ≤10 views (too niche)
- ❌ **Excludes**: Templates older than 12 months

## Workflow

### Regular Update Workflow

```bash
# 1. Check current state
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"

# 2. Build project (if code changed)
npm run build

# 3. Run incremental update
npm run fetch:templates:update

# 4. Verify new templates added
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"
```

### After n8n Dependency Update

When you update n8n dependencies, templates remain compatible:
```bash
# 1. Update n8n (from MEMORY_N8N_UPDATE.md)
npm run update:all

# 2. Fetch new templates incrementally
npm run fetch:templates:update

# 3. Check how many templates were added
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"

# 4. Generate AI metadata for new templates (optional, requires OPENAI_API_KEY)
npm run fetch:templates -- --metadata-only

# 5. IMPORTANT: Sanitize templates before pushing database
npm run build
npm run sanitize:templates
```

Templates are independent of n8n version - they're just workflow JSON data.

**CRITICAL**: Always run `npm run sanitize:templates` before pushing the database to remove API tokens from template workflows.

**Note**: New templates fetched via `--update` mode will NOT have AI-generated metadata by default. You need to run `--metadata-only` separately to generate metadata for templates that don't have it yet.

## Troubleshooting

### No New Templates Found

This is normal! It means:
- All recent templates are already in your database
- n8n.io hasn't published many new templates recently
- Your database is up to date

```bash
📊 Update mode: 0 new templates to fetch (skipping 2598 existing)
✅ All templates already have metadata
```

### API Rate Limiting

If you hit rate limits:
- The fetcher includes built-in delays (150ms between requests)
- Wait a few minutes and try again
- Use `--update` mode instead of full rebuild

### Database Corruption

If you suspect corruption:
```bash
# Full rebuild from scratch
npm run fetch:templates

# This will:
# - Drop and recreate templates table
# - Fetch all templates fresh
# - Rebuild search indexes
```

## Database Schema

Templates are stored with:
- Basic info (id, name, description, author, views, created_at)
- Node types used (JSON array)
- Complete workflow (gzip compressed, base64 encoded)
- AI-generated metadata (optional, requires OpenAI API key)
- FTS5 search index for fast text search

## Metadata Generation

Generate AI metadata for templates:
```bash
# Requires OPENAI_API_KEY in .env
export OPENAI_API_KEY="sk-..."

# Generate for templates without metadata (recommended after incremental update)
npm run fetch:templates -- --metadata-only

# Generate during template fetch (slower, but automatic)
npm run fetch:templates:update -- --generate-metadata
```

**Important**: Incremental updates (`--update`) do NOT generate metadata by default. After running `npm run fetch:templates:update`, you'll have new templates without metadata. Run `--metadata-only` separately to generate metadata for them.

### Check Metadata Coverage

```bash
# See how many templates have metadata
sqlite3 data/nodes.db "SELECT
  COUNT(*) as total,
  SUM(CASE WHEN metadata_json IS NOT NULL THEN 1 ELSE 0 END) as with_metadata,
  SUM(CASE WHEN metadata_json IS NULL THEN 1 ELSE 0 END) as without_metadata
FROM templates"

# See recent templates without metadata
sqlite3 data/nodes.db "SELECT id, name, created_at
FROM templates
WHERE metadata_json IS NULL
ORDER BY created_at DESC
LIMIT 10"
```

Metadata includes:
- Categories
- Complexity level (simple/medium/complex)
- Use cases
- Estimated setup time
- Required services
- Key features
- Target audience

### Metadata Generation Troubleshooting

If metadata generation fails:

1. **Check error file**: Errors are saved to `temp/batch/batch_*_error.jsonl`
2. **Common issues**:
   - `"Unsupported value: 'temperature'"` - Model doesn't support custom temperature
   - `"Invalid request"` - Check OPENAI_API_KEY is valid
   - Model availability issues
3. **Model**: Uses `gpt-5-mini-2025-08-07` by default
4. **Token limit**: 3000 tokens per request for detailed metadata

The system will automatically:
- Process error files and assign default metadata to failed templates
- Save error details for debugging
- Continue processing even if some templates fail

**Example error handling**:
```bash
# If you see: "No output file available for batch job"
# Check: temp/batch/batch_*_error.jsonl for error details
# The system now automatically processes errors and generates default metadata
```

## Environment Variables

Optional configuration:
```bash
# OpenAI for metadata generation
OPENAI_API_KEY=sk-...
OPENAI_MODEL=gpt-4o-mini  # Default model
OPENAI_BATCH_SIZE=50      # Batch size for metadata generation

# Metadata generation limits
METADATA_LIMIT=100        # Max templates to process (0 = all)
```

## Statistics

After update, check stats:
```bash
# Template count
sqlite3 data/nodes.db "SELECT COUNT(*) FROM templates"

# Most recent template
sqlite3 data/nodes.db "SELECT MAX(created_at) FROM templates"

# Templates by view count
sqlite3 data/nodes.db "SELECT COUNT(*),
  CASE
    WHEN views < 50 THEN '<50'
    WHEN views < 100 THEN '50-100'
    WHEN views < 500 THEN '100-500'
    ELSE '500+'
  END as view_range
  FROM templates GROUP BY view_range"
```

## Integration with n8n-mcp

Templates are available through MCP tools:
- `list_templates`: List all templates
- `get_template`: Get specific template with workflow
- `search_templates`: Search by keyword
- `list_node_templates`: Templates using specific nodes
- `get_templates_for_task`: Templates for common tasks
- `search_templates_by_metadata`: Advanced filtering

See `npm run test:templates` for usage examples.

## Time Estimates

Typical incremental update:
- Loading existing IDs: 1-2 seconds
- Fetching template list: 2-3 minutes
- Filtering new templates: instant
- Fetching details for 100 new templates: ~15 seconds (0.15s each)
- Saving and indexing: 5-10 seconds
- **Total: 3-5 minutes**

Full rebuild:
- Fetching 8000+ templates: 25-30 minutes
- Saving and indexing: 5-10 minutes
- **Total: 30-40 minutes**

## Best Practices

1. **Use incremental updates** for regular maintenance
2. **Rebuild only when necessary** (corruption, major changes)
3. **Generate metadata incrementally** to avoid OpenAI costs
4. **Monitor template count** to verify updates working
5. **Keep database backed up** before major operations

## Next Steps

After updating templates:
1. Test template search: `npm run test:templates`
2. Verify MCP tools work: Test in Claude Desktop
3. Check statistics in database
4. Commit changes if desired (database changes)

## Related Documentation

- `MEMORY_N8N_UPDATE.md` - Updating n8n dependencies
- `CLAUDE.md` - Project overview and architecture
- `README.md` - User documentation
```

--------------------------------------------------------------------------------
/src/utils/validation-schemas.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Zod validation schemas for MCP tool parameters
 * Provides robust input validation with detailed error messages
 */

// Simple validation without zod for now, since it's not installed
// We can use TypeScript's built-in validation with better error messages

export class ValidationError extends Error {
  constructor(message: string, public field?: string, public value?: any) {
    super(message);
    this.name = 'ValidationError';
  }
}

export interface ValidationResult {
  valid: boolean;
  errors: Array<{
    field: string;
    message: string;
    value?: any;
  }>;
}

/**
 * Basic validation utilities
 */
export class Validator {
  /**
   * Validate that a value is a non-empty string
   */
  static validateString(value: any, fieldName: string, required: boolean = true): ValidationResult {
    const errors: Array<{field: string, message: string, value?: any}> = [];
    
    if (required && (value === undefined || value === null)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} is required`,
        value
      });
    } else if (value !== undefined && value !== null && typeof value !== 'string') {
      errors.push({
        field: fieldName,
        message: `${fieldName} must be a string, got ${typeof value}`,
        value
      });
    } else if (required && typeof value === 'string' && value.trim().length === 0) {
      errors.push({
        field: fieldName,
        message: `${fieldName} cannot be empty`,
        value
      });
    }

    return {
      valid: errors.length === 0,
      errors
    };
  }

  /**
   * Validate that a value is a valid object (not null, not array)
   */
  static validateObject(value: any, fieldName: string, required: boolean = true): ValidationResult {
    const errors: Array<{field: string, message: string, value?: any}> = [];
    
    if (required && (value === undefined || value === null)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} is required`,
        value
      });
    } else if (value !== undefined && value !== null) {
      if (typeof value !== 'object') {
        errors.push({
          field: fieldName,
          message: `${fieldName} must be an object, got ${typeof value}`,
          value
        });
      } else if (Array.isArray(value)) {
        errors.push({
          field: fieldName,
          message: `${fieldName} must be an object, not an array`,
          value
        });
      }
    }

    return {
      valid: errors.length === 0,
      errors
    };
  }

  /**
   * Validate that a value is an array
   */
  static validateArray(value: any, fieldName: string, required: boolean = true): ValidationResult {
    const errors: Array<{field: string, message: string, value?: any}> = [];
    
    if (required && (value === undefined || value === null)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} is required`,
        value
      });
    } else if (value !== undefined && value !== null && !Array.isArray(value)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} must be an array, got ${typeof value}`,
        value
      });
    }

    return {
      valid: errors.length === 0,
      errors
    };
  }

  /**
   * Validate that a value is a number
   */
  static validateNumber(value: any, fieldName: string, required: boolean = true, min?: number, max?: number): ValidationResult {
    const errors: Array<{field: string, message: string, value?: any}> = [];
    
    if (required && (value === undefined || value === null)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} is required`,
        value
      });
    } else if (value !== undefined && value !== null) {
      if (typeof value !== 'number' || isNaN(value)) {
        errors.push({
          field: fieldName,
          message: `${fieldName} must be a number, got ${typeof value}`,
          value
        });
      } else {
        if (min !== undefined && value < min) {
          errors.push({
            field: fieldName,
            message: `${fieldName} must be at least ${min}, got ${value}`,
            value
          });
        }
        if (max !== undefined && value > max) {
          errors.push({
            field: fieldName,
            message: `${fieldName} must be at most ${max}, got ${value}`,
            value
          });
        }
      }
    }

    return {
      valid: errors.length === 0,
      errors
    };
  }

  /**
   * Validate that a value is one of allowed values
   */
  static validateEnum<T>(value: any, fieldName: string, allowedValues: T[], required: boolean = true): ValidationResult {
    const errors: Array<{field: string, message: string, value?: any}> = [];
    
    if (required && (value === undefined || value === null)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} is required`,
        value
      });
    } else if (value !== undefined && value !== null && !allowedValues.includes(value)) {
      errors.push({
        field: fieldName,
        message: `${fieldName} must be one of: ${allowedValues.join(', ')}, got "${value}"`,
        value
      });
    }

    return {
      valid: errors.length === 0,
      errors
    };
  }

  /**
   * Combine multiple validation results
   */
  static combineResults(...results: ValidationResult[]): ValidationResult {
    const allErrors = results.flatMap(r => r.errors);
    return {
      valid: allErrors.length === 0,
      errors: allErrors
    };
  }

  /**
   * Create a detailed error message from validation result
   */
  static formatErrors(result: ValidationResult, toolName?: string): string {
    if (result.valid) return '';
    
    const prefix = toolName ? `${toolName}: ` : '';
    const errors = result.errors.map(e => `  • ${e.field}: ${e.message}`).join('\n');
    
    return `${prefix}Validation failed:\n${errors}`;
  }
}

/**
 * Tool-specific validation schemas
 */
export class ToolValidation {
  /**
   * Validate parameters for validate_node_operation tool
   */
  static validateNodeOperation(args: any): ValidationResult {
    const nodeTypeResult = Validator.validateString(args.nodeType, 'nodeType');
    const configResult = Validator.validateObject(args.config, 'config');
    const profileResult = Validator.validateEnum(
      args.profile, 
      'profile', 
      ['minimal', 'runtime', 'ai-friendly', 'strict'], 
      false // optional
    );

    return Validator.combineResults(nodeTypeResult, configResult, profileResult);
  }

  /**
   * Validate parameters for validate_node_minimal tool
   */
  static validateNodeMinimal(args: any): ValidationResult {
    const nodeTypeResult = Validator.validateString(args.nodeType, 'nodeType');
    const configResult = Validator.validateObject(args.config, 'config');

    return Validator.combineResults(nodeTypeResult, configResult);
  }

  /**
   * Validate parameters for validate_workflow tool
   */
  static validateWorkflow(args: any): ValidationResult {
    const workflowResult = Validator.validateObject(args.workflow, 'workflow');
    
    // Validate workflow structure if it's an object
    let nodesResult: ValidationResult = { valid: true, errors: [] };
    let connectionsResult: ValidationResult = { valid: true, errors: [] };
    
    if (workflowResult.valid && args.workflow) {
      nodesResult = Validator.validateArray(args.workflow.nodes, 'workflow.nodes');
      connectionsResult = Validator.validateObject(args.workflow.connections, 'workflow.connections');
    }

    const optionsResult = args.options ? 
      Validator.validateObject(args.options, 'options', false) : 
      { valid: true, errors: [] };

    return Validator.combineResults(workflowResult, nodesResult, connectionsResult, optionsResult);
  }

  /**
   * Validate parameters for search_nodes tool
   */
  static validateSearchNodes(args: any): ValidationResult {
    const queryResult = Validator.validateString(args.query, 'query');
    const limitResult = Validator.validateNumber(args.limit, 'limit', false, 1, 200);
    const modeResult = Validator.validateEnum(
      args.mode, 
      'mode', 
      ['OR', 'AND', 'FUZZY'], 
      false
    );

    return Validator.combineResults(queryResult, limitResult, modeResult);
  }

  /**
   * Validate parameters for list_node_templates tool
   */
  static validateListNodeTemplates(args: any): ValidationResult {
    const nodeTypesResult = Validator.validateArray(args.nodeTypes, 'nodeTypes');
    const limitResult = Validator.validateNumber(args.limit, 'limit', false, 1, 50);

    return Validator.combineResults(nodeTypesResult, limitResult);
  }

  /**
   * Validate parameters for n8n workflow operations
   */
  static validateWorkflowId(args: any): ValidationResult {
    return Validator.validateString(args.id, 'id');
  }

  /**
   * Validate parameters for n8n_create_workflow tool
   */
  static validateCreateWorkflow(args: any): ValidationResult {
    const nameResult = Validator.validateString(args.name, 'name');
    const nodesResult = Validator.validateArray(args.nodes, 'nodes');
    const connectionsResult = Validator.validateObject(args.connections, 'connections');
    const settingsResult = args.settings ? 
      Validator.validateObject(args.settings, 'settings', false) : 
      { valid: true, errors: [] };

    return Validator.combineResults(nameResult, nodesResult, connectionsResult, settingsResult);
  }
}
```

--------------------------------------------------------------------------------
/tests/examples/using-database-utils.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
import {
  createTestDatabase,
  seedTestNodes,
  seedTestTemplates,
  createTestNode,
  createTestTemplate,
  createDatabaseSnapshot,
  restoreDatabaseSnapshot,
  loadFixtures,
  dbHelpers,
  TestDatabase
} from '../utils/database-utils';
import * as path from 'path';

/**
 * Example test file showing how to use database utilities
 * in real test scenarios
 */

describe('Example: Using Database Utils in Tests', () => {
  let testDb: TestDatabase;
  
  // Always cleanup after each test
  afterEach(async () => {
    if (testDb) {
      await testDb.cleanup();
    }
  });
  
  describe('Basic Database Setup', () => {
    it('should setup a test database for unit testing', async () => {
      // Create an in-memory database for fast tests
      testDb = await createTestDatabase();
      
      // Seed some test data
      await seedTestNodes(testDb.nodeRepository, [
        { nodeType: 'nodes-base.myCustomNode', displayName: 'My Custom Node' }
      ]);
      
      // Use the repository to test your logic
      const node = testDb.nodeRepository.getNode('nodes-base.myCustomNode');
      expect(node).toBeDefined();
      expect(node.displayName).toBe('My Custom Node');
    });
    
    it('should setup a file-based database for integration testing', async () => {
      // Create a file-based database when you need persistence
      testDb = await createTestDatabase({
        inMemory: false,
        dbPath: path.join(__dirname, '../temp/integration-test.db')
      });
      
      // The database will persist until cleanup() is called
      await seedTestNodes(testDb.nodeRepository);
      
      // You can verify the file exists
      expect(testDb.path).toContain('integration-test.db');
    });
  });
  
  describe('Testing with Fixtures', () => {
    it('should load complex test scenarios from fixtures', async () => {
      testDb = await createTestDatabase();
      
      // Load fixtures from JSON file
      const fixturePath = path.join(__dirname, '../fixtures/database/test-nodes.json');
      await loadFixtures(testDb.adapter, fixturePath);
      
      // Verify the fixture data was loaded
      expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(3);
      expect(dbHelpers.countRows(testDb.adapter, 'templates')).toBe(1);
      
      // Test your business logic with the fixture data
      const slackNode = testDb.nodeRepository.getNode('nodes-base.slack');
      expect(slackNode.isAITool).toBe(true);
      expect(slackNode.category).toBe('Communication');
    });
  });
  
  describe('Testing Repository Methods', () => {
    beforeEach(async () => {
      testDb = await createTestDatabase();
    });
    
    it('should test custom repository queries', async () => {
      // Seed nodes with specific properties
      await seedTestNodes(testDb.nodeRepository, [
        { nodeType: 'nodes-base.ai1', isAITool: true },
        { nodeType: 'nodes-base.ai2', isAITool: true },
        { nodeType: 'nodes-base.regular', isAITool: false }
      ]);
      
      // Test custom queries
      const aiNodes = testDb.nodeRepository.getAITools();
      expect(aiNodes).toHaveLength(4); // 2 custom + 2 default (httpRequest, slack)
      
      // Use dbHelpers for quick checks
      const allNodeTypes = dbHelpers.getAllNodeTypes(testDb.adapter);
      expect(allNodeTypes).toContain('nodes-base.ai1');
      expect(allNodeTypes).toContain('nodes-base.ai2');
    });
  });
  
  describe('Testing with Snapshots', () => {
    it('should test rollback scenarios using snapshots', async () => {
      testDb = await createTestDatabase();
      
      // Setup initial state
      await seedTestNodes(testDb.nodeRepository);
      await seedTestTemplates(testDb.templateRepository);
      
      // Create a snapshot of the good state
      const snapshot = await createDatabaseSnapshot(testDb.adapter);
      
      // Perform operations that might fail
      try {
        // Simulate a complex operation
        await testDb.nodeRepository.saveNode(createTestNode({
          nodeType: 'nodes-base.problematic',
          displayName: 'This might cause issues'
        }));
        
        // Simulate an error
        throw new Error('Something went wrong!');
      } catch (error) {
        // Restore to the known good state
        await restoreDatabaseSnapshot(testDb.adapter, snapshot);
      }
      
      // Verify we're back to the original state
      expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(snapshot.metadata.nodeCount);
      expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.problematic')).toBe(false);
    });
  });
  
  describe('Testing Database Performance', () => {
    it('should measure performance of database operations', async () => {
      testDb = await createTestDatabase();
      
      // Measure bulk insert performance
      const insertDuration = await measureDatabaseOperation('Bulk Insert', async () => {
        const nodes = Array.from({ length: 100 }, (_, i) => 
          createTestNode({
            nodeType: `nodes-base.perf${i}`,
            displayName: `Performance Test Node ${i}`
          })
        );
        
        for (const node of nodes) {
          testDb.nodeRepository.saveNode(node);
        }
      });
      
      // Measure query performance
      const queryDuration = await measureDatabaseOperation('Query All Nodes', async () => {
        const allNodes = testDb.nodeRepository.getAllNodes();
        expect(allNodes.length).toBe(100); // 100 bulk nodes (no defaults as we're not using seedTestNodes)
      });
      
      // Assert reasonable performance
      expect(insertDuration).toBeLessThan(1000); // Should complete in under 1 second
      expect(queryDuration).toBeLessThan(100); // Queries should be fast
    });
  });
  
  describe('Testing with Different Database States', () => {
    it('should test behavior with empty database', async () => {
      testDb = await createTestDatabase();
      
      // Test with empty database
      expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(0);
      
      const nonExistentNode = testDb.nodeRepository.getNode('nodes-base.doesnotexist');
      expect(nonExistentNode).toBeNull();
    });
    
    it('should test behavior with populated database', async () => {
      testDb = await createTestDatabase();
      
      // Populate with many nodes
      const nodes = Array.from({ length: 50 }, (_, i) => ({
        nodeType: `nodes-base.node${i}`,
        displayName: `Node ${i}`,
        category: i % 2 === 0 ? 'Category A' : 'Category B'
      }));
      
      await seedTestNodes(testDb.nodeRepository, nodes);
      
      // Test queries on populated database
      const allNodes = dbHelpers.getAllNodeTypes(testDb.adapter);
      expect(allNodes.length).toBe(53); // 50 custom + 3 default
      
      // Test filtering by category
      const categoryANodes = testDb.adapter
        .prepare('SELECT COUNT(*) as count FROM nodes WHERE category = ?')
        .get('Category A') as { count: number };
      
      expect(categoryANodes.count).toBe(25);
    });
  });
  
  describe('Testing Error Scenarios', () => {
    it('should handle database errors gracefully', async () => {
      testDb = await createTestDatabase();
      
      // Test saving invalid data
      const invalidNode = createTestNode({
        nodeType: '', // Invalid: empty nodeType
        displayName: 'Invalid Node'
      });
      
      // SQLite allows NULL in PRIMARY KEY, so test with empty string instead
      // which should violate any business logic constraints
      // For now, we'll just verify the save doesn't crash
      expect(() => {
        testDb.nodeRepository.saveNode(invalidNode);
      }).not.toThrow();
      
      // Database should still be functional
      await seedTestNodes(testDb.nodeRepository);
      expect(dbHelpers.countRows(testDb.adapter, 'nodes')).toBe(4); // 3 default nodes + 1 invalid node
    });
  });
  
  describe('Testing with Transactions', () => {
    it('should test transactional behavior', async () => {
      testDb = await createTestDatabase();
      
      // Seed initial data
      await seedTestNodes(testDb.nodeRepository);
      const initialCount = dbHelpers.countRows(testDb.adapter, 'nodes');
      
      // Use transaction for atomic operations
      try {
        testDb.adapter.transaction(() => {
          // Add multiple nodes atomically
          testDb.nodeRepository.saveNode(createTestNode({ nodeType: 'nodes-base.tx1' }));
          testDb.nodeRepository.saveNode(createTestNode({ nodeType: 'nodes-base.tx2' }));
          
          // Simulate error in transaction
          throw new Error('Transaction failed');
        });
      } catch (error) {
        // Transaction should have rolled back
      }
      
      // Verify no nodes were added
      const finalCount = dbHelpers.countRows(testDb.adapter, 'nodes');
      expect(finalCount).toBe(initialCount);
      expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.tx1')).toBe(false);
      expect(dbHelpers.nodeExists(testDb.adapter, 'nodes-base.tx2')).toBe(false);
    });
  });
});

// Helper function for performance measurement
async function measureDatabaseOperation(
  name: string,
  operation: () => Promise<void>
): Promise<number> {
  const start = performance.now();
  await operation();
  const duration = performance.now() - start;
  console.log(`[Performance] ${name}: ${duration.toFixed(2)}ms`);
  return duration;
}
```

--------------------------------------------------------------------------------
/src/scripts/test-execution-filtering.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node
/**
 * Manual testing script for execution filtering feature
 *
 * This script demonstrates all modes of the n8n_get_execution tool
 * with various filtering options.
 *
 * Usage: npx tsx src/scripts/test-execution-filtering.ts
 */

import {
  generatePreview,
  filterExecutionData,
  processExecution,
} from '../services/execution-processor';
import { ExecutionFilterOptions, Execution, ExecutionStatus } from '../types/n8n-api';

console.log('='.repeat(80));
console.log('Execution Filtering Feature - Manual Test Suite');
console.log('='.repeat(80));
console.log('');

/**
 * Mock execution factory (simplified version for testing)
 */
function createTestExecution(itemCount: number): Execution {
  const items = Array.from({ length: itemCount }, (_, i) => ({
    json: {
      id: i + 1,
      name: `Item ${i + 1}`,
      email: `user${i}@example.com`,
      value: Math.random() * 1000,
      metadata: {
        createdAt: new Date().toISOString(),
        tags: ['tag1', 'tag2'],
      },
    },
  }));

  return {
    id: `test-exec-${Date.now()}`,
    workflowId: 'workflow-test',
    status: ExecutionStatus.SUCCESS,
    mode: 'manual',
    finished: true,
    startedAt: '2024-01-01T10:00:00.000Z',
    stoppedAt: '2024-01-01T10:00:05.000Z',
    data: {
      resultData: {
        runData: {
          'HTTP Request': [
            {
              startTime: Date.now(),
              executionTime: 234,
              data: {
                main: [items],
              },
            },
          ],
          'Filter': [
            {
              startTime: Date.now(),
              executionTime: 45,
              data: {
                main: [items.slice(0, Math.floor(itemCount / 2))],
              },
            },
          ],
          'Set': [
            {
              startTime: Date.now(),
              executionTime: 12,
              data: {
                main: [items.slice(0, 5)],
              },
            },
          ],
        },
      },
    },
  };
}

/**
 * Test 1: Preview Mode
 */
console.log('📊 TEST 1: Preview Mode (No Data, Just Structure)');
console.log('-'.repeat(80));

const execution1 = createTestExecution(50);
const { preview, recommendation } = generatePreview(execution1);

console.log('Preview:', JSON.stringify(preview, null, 2));
console.log('\nRecommendation:', JSON.stringify(recommendation, null, 2));
console.log('\n✅ Preview mode shows structure without consuming tokens for data\n');

/**
 * Test 2: Summary Mode (Default)
 */
console.log('📝 TEST 2: Summary Mode (2 items per node)');
console.log('-'.repeat(80));

const execution2 = createTestExecution(50);
const summaryResult = filterExecutionData(execution2, { mode: 'summary' });

console.log('Summary Mode Result:');
console.log('- Mode:', summaryResult.mode);
console.log('- Summary:', JSON.stringify(summaryResult.summary, null, 2));
console.log('- HTTP Request items shown:', summaryResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown);
console.log('- HTTP Request truncated:', summaryResult.nodes?.['HTTP Request']?.data?.metadata.truncated);
console.log('\n✅ Summary mode returns 2 items per node (safe default)\n');

/**
 * Test 3: Filtered Mode with Custom Limit
 */
console.log('🎯 TEST 3: Filtered Mode (Custom itemsLimit: 5)');
console.log('-'.repeat(80));

const execution3 = createTestExecution(100);
const filteredResult = filterExecutionData(execution3, {
  mode: 'filtered',
  itemsLimit: 5,
});

console.log('Filtered Mode Result:');
console.log('- Items shown per node:', filteredResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown);
console.log('- Total items available:', filteredResult.nodes?.['HTTP Request']?.data?.metadata.totalItems);
console.log('- More data available:', filteredResult.summary?.hasMoreData);
console.log('\n✅ Filtered mode allows custom item limits\n');

/**
 * Test 4: Node Name Filtering
 */
console.log('🔍 TEST 4: Filter to Specific Nodes');
console.log('-'.repeat(80));

const execution4 = createTestExecution(30);
const nodeFilterResult = filterExecutionData(execution4, {
  mode: 'filtered',
  nodeNames: ['HTTP Request'],
  itemsLimit: 3,
});

console.log('Node Filter Result:');
console.log('- Nodes in result:', Object.keys(nodeFilterResult.nodes || {}));
console.log('- Expected: ["HTTP Request"]');
console.log('- Executed nodes:', nodeFilterResult.summary?.executedNodes);
console.log('- Total nodes:', nodeFilterResult.summary?.totalNodes);
console.log('\n✅ Can filter to specific nodes only\n');

/**
 * Test 5: Structure-Only Mode (itemsLimit: 0)
 */
console.log('🏗️  TEST 5: Structure-Only Mode (itemsLimit: 0)');
console.log('-'.repeat(80));

const execution5 = createTestExecution(100);
const structureResult = filterExecutionData(execution5, {
  mode: 'filtered',
  itemsLimit: 0,
});

console.log('Structure-Only Result:');
console.log('- Items shown:', structureResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown);
console.log('- First item (structure):', JSON.stringify(
  structureResult.nodes?.['HTTP Request']?.data?.output?.[0]?.[0],
  null,
  2
));
console.log('\n✅ Structure-only mode shows data shape without values\n');

/**
 * Test 6: Full Mode
 */
console.log('💾 TEST 6: Full Mode (All Data)');
console.log('-'.repeat(80));

const execution6 = createTestExecution(5); // Small dataset
const fullResult = filterExecutionData(execution6, { mode: 'full' });

console.log('Full Mode Result:');
console.log('- Items shown:', fullResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown);
console.log('- Total items:', fullResult.nodes?.['HTTP Request']?.data?.metadata.totalItems);
console.log('- Truncated:', fullResult.nodes?.['HTTP Request']?.data?.metadata.truncated);
console.log('\n✅ Full mode returns all data (use with caution)\n');

/**
 * Test 7: Backward Compatibility
 */
console.log('🔄 TEST 7: Backward Compatibility (No Filtering)');
console.log('-'.repeat(80));

const execution7 = createTestExecution(10);
const legacyResult = processExecution(execution7, {});

console.log('Legacy Result:');
console.log('- Returns original execution:', legacyResult === execution7);
console.log('- Type:', typeof legacyResult);
console.log('\n✅ Backward compatible - no options returns original execution\n');

/**
 * Test 8: Input Data Inclusion
 */
console.log('🔗 TEST 8: Include Input Data');
console.log('-'.repeat(80));

const execution8 = createTestExecution(5);
const inputDataResult = filterExecutionData(execution8, {
  mode: 'filtered',
  itemsLimit: 2,
  includeInputData: true,
});

console.log('Input Data Result:');
console.log('- Has input data:', !!inputDataResult.nodes?.['HTTP Request']?.data?.input);
console.log('- Has output data:', !!inputDataResult.nodes?.['HTTP Request']?.data?.output);
console.log('\n✅ Can include input data for debugging\n');

/**
 * Test 9: itemsLimit Validation
 */
console.log('⚠️  TEST 9: itemsLimit Validation');
console.log('-'.repeat(80));

const execution9 = createTestExecution(50);

// Test negative value
const negativeResult = filterExecutionData(execution9, {
  mode: 'filtered',
  itemsLimit: -5,
});
console.log('- Negative itemsLimit (-5) handled:', negativeResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown === 2);

// Test very large value
const largeResult = filterExecutionData(execution9, {
  mode: 'filtered',
  itemsLimit: 999999,
});
console.log('- Large itemsLimit (999999) capped:', (largeResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown || 0) <= 1000);

// Test unlimited (-1)
const unlimitedResult = filterExecutionData(execution9, {
  mode: 'filtered',
  itemsLimit: -1,
});
console.log('- Unlimited itemsLimit (-1) works:', unlimitedResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown === 50);

console.log('\n✅ itemsLimit validation works correctly\n');

/**
 * Test 10: Recommendation Following
 */
console.log('🎯 TEST 10: Follow Recommendation Workflow');
console.log('-'.repeat(80));

const execution10 = createTestExecution(100);
const { preview: preview10, recommendation: rec10 } = generatePreview(execution10);

console.log('1. Preview shows:', {
  totalItems: preview10.nodes['HTTP Request']?.itemCounts.output,
  sizeKB: preview10.estimatedSizeKB,
});

console.log('\n2. Recommendation:', {
  canFetchFull: rec10.canFetchFull,
  suggestedMode: rec10.suggestedMode,
  suggestedItemsLimit: rec10.suggestedItemsLimit,
  reason: rec10.reason,
});

// Follow recommendation
const options: ExecutionFilterOptions = {
  mode: rec10.suggestedMode,
  itemsLimit: rec10.suggestedItemsLimit,
};

const recommendedResult = filterExecutionData(execution10, options);

console.log('\n3. Following recommendation gives:', {
  mode: recommendedResult.mode,
  itemsShown: recommendedResult.nodes?.['HTTP Request']?.data?.metadata.itemsShown,
  hasMoreData: recommendedResult.summary?.hasMoreData,
});

console.log('\n✅ Recommendation workflow helps make optimal choices\n');

/**
 * Summary
 */
console.log('='.repeat(80));
console.log('✨ All Tests Completed Successfully!');
console.log('='.repeat(80));
console.log('\n🎉 Execution Filtering Feature is Working!\n');
console.log('Key Takeaways:');
console.log('1. Always use preview mode first for unknown datasets');
console.log('2. Follow the recommendation for optimal token usage');
console.log('3. Use nodeNames to filter to relevant nodes');
console.log('4. itemsLimit: 0 shows structure without data');
console.log('5. itemsLimit: -1 returns unlimited items (use with caution)');
console.log('6. Summary mode (2 items) is a safe default');
console.log('7. Full mode should only be used for small datasets');
console.log('');

```

--------------------------------------------------------------------------------
/tests/utils/builders/workflow.builder.ts:
--------------------------------------------------------------------------------

```typescript
import { v4 as uuidv4 } from 'uuid';

// Type definitions
export interface INodeParameters {
  [key: string]: any;
}

export interface INodeCredentials {
  [credentialType: string]: {
    id?: string;
    name: string;
  };
}

export interface INode {
  id: string;
  name: string;
  type: string;
  typeVersion: number;
  position: [number, number];
  parameters: INodeParameters;
  credentials?: INodeCredentials;
  disabled?: boolean;
  notes?: string;
  continueOnFail?: boolean;
  retryOnFail?: boolean;
  maxTries?: number;
  waitBetweenTries?: number;
  onError?: 'continueRegularOutput' | 'continueErrorOutput' | 'stopWorkflow';
}

export interface IConnection {
  node: string;
  type: 'main';
  index: number;
}

export interface IConnections {
  [nodeId: string]: {
    [outputType: string]: Array<Array<IConnection | null>>;
  };
}

export interface IWorkflowSettings {
  executionOrder?: 'v0' | 'v1';
  saveDataErrorExecution?: 'all' | 'none';
  saveDataSuccessExecution?: 'all' | 'none';
  saveManualExecutions?: boolean;
  saveExecutionProgress?: boolean;
  executionTimeout?: number;
  errorWorkflow?: string;
  timezone?: string;
}

export interface IWorkflow {
  id?: string;
  name: string;
  nodes: INode[];
  connections: IConnections;
  active?: boolean;
  settings?: IWorkflowSettings;
  staticData?: any;
  tags?: string[];
  pinData?: any;
  versionId?: string;
  meta?: {
    instanceId?: string;
  };
}

// Type guard for INode validation
function isValidNode(node: any): node is INode {
  return (
    typeof node === 'object' &&
    typeof node.id === 'string' &&
    typeof node.name === 'string' &&
    typeof node.type === 'string' &&
    typeof node.typeVersion === 'number' &&
    Array.isArray(node.position) &&
    node.position.length === 2 &&
    typeof node.position[0] === 'number' &&
    typeof node.position[1] === 'number' &&
    typeof node.parameters === 'object'
  );
}

export class WorkflowBuilder {
  private workflow: IWorkflow;
  private nodeCounter = 0;
  private defaultPosition: [number, number] = [250, 300];
  private positionIncrement = 280;

  constructor(name = 'Test Workflow') {
    this.workflow = {
      name,
      nodes: [],
      connections: {},
      active: false,
      settings: {
        executionOrder: 'v1',
        saveDataErrorExecution: 'all',
        saveDataSuccessExecution: 'all',
        saveManualExecutions: true,
        saveExecutionProgress: true,
      },
    };
  }

  /**
   * Add a node to the workflow
   */
  addNode(node: Partial<INode> & { type: string; typeVersion: number }): this {
    const nodeId = node.id || uuidv4();
    const nodeName = node.name || `${node.type} ${++this.nodeCounter}`;
    
    const fullNode: INode = {
      ...node,  // Spread first to allow overrides
      id: nodeId,
      name: nodeName,
      type: node.type,
      typeVersion: node.typeVersion,
      position: node.position || this.getNextPosition(),
      parameters: node.parameters || {},
    };

    this.workflow.nodes.push(fullNode);
    return this;
  }

  /**
   * Add a webhook node (common trigger)
   */
  addWebhookNode(options: Partial<INode> = {}): this {
    return this.addNode({
      type: 'n8n-nodes-base.webhook',
      typeVersion: 2,
      parameters: {
        path: 'test-webhook',
        method: 'POST',
        responseMode: 'onReceived',
        responseData: 'allEntries',
        responsePropertyName: 'data',
        ...options.parameters,
      },
      ...options,
    });
  }

  /**
   * Add a Slack node
   */
  addSlackNode(options: Partial<INode> = {}): this {
    return this.addNode({
      type: 'n8n-nodes-base.slack',
      typeVersion: 2.2,
      parameters: {
        resource: 'message',
        operation: 'post',
        channel: '#general',
        text: 'Test message',
        ...options.parameters,
      },
      credentials: {
        slackApi: {
          name: 'Slack Account',
        },
      },
      ...options,
    });
  }

  /**
   * Add an HTTP Request node
   */
  addHttpRequestNode(options: Partial<INode> = {}): this {
    return this.addNode({
      type: 'n8n-nodes-base.httpRequest',
      typeVersion: 4.2,
      parameters: {
        method: 'GET',
        url: 'https://api.example.com/data',
        authentication: 'none',
        ...options.parameters,
      },
      ...options,
    });
  }

  /**
   * Add a Code node
   */
  addCodeNode(options: Partial<INode> = {}): this {
    return this.addNode({
      type: 'n8n-nodes-base.code',
      typeVersion: 2,
      parameters: {
        mode: 'runOnceForAllItems',
        language: 'javaScript',
        jsCode: 'return items;',
        ...options.parameters,
      },
      ...options,
    });
  }

  /**
   * Add an IF node
   */
  addIfNode(options: Partial<INode> = {}): this {
    return this.addNode({
      type: 'n8n-nodes-base.if',
      typeVersion: 2,
      parameters: {
        conditions: {
          options: {
            caseSensitive: true,
            leftValue: '',
            typeValidation: 'strict',
          },
          conditions: [
            {
              id: uuidv4(),
              leftValue: '={{ $json.value }}',
              rightValue: 'test',
              operator: {
                type: 'string',
                operation: 'equals',
              },
            },
          ],
          combinator: 'and',
        },
        ...options.parameters,
      },
      ...options,
    });
  }

  /**
   * Add an AI Agent node
   */
  addAiAgentNode(options: Partial<INode> = {}): this {
    return this.addNode({
      type: '@n8n/n8n-nodes-langchain.agent',
      typeVersion: 1.7,
      parameters: {
        agent: 'conversationalAgent',
        promptType: 'define',
        text: '={{ $json.prompt }}',
        ...options.parameters,
      },
      ...options,
    });
  }

  /**
   * Connect two nodes
   * @param sourceNodeId - ID of the source node
   * @param targetNodeId - ID of the target node
   * @param sourceOutput - Output index on the source node (default: 0)
   * @param targetInput - Input index on the target node (default: 0)
   * @returns The WorkflowBuilder instance for chaining
   * @example
   * builder.connect('webhook-1', 'slack-1', 0, 0);
   */
  connect(
    sourceNodeId: string,
    targetNodeId: string,
    sourceOutput = 0,
    targetInput = 0
  ): this {
    // Validate that both nodes exist
    const sourceNode = this.findNode(sourceNodeId);
    const targetNode = this.findNode(targetNodeId);
    
    if (!sourceNode) {
      throw new Error(`Source node not found: ${sourceNodeId}`);
    }
    if (!targetNode) {
      throw new Error(`Target node not found: ${targetNodeId}`);
    }
    
    if (!this.workflow.connections[sourceNodeId]) {
      this.workflow.connections[sourceNodeId] = {
        main: [],
      };
    }

    // Ensure the output array exists
    while (this.workflow.connections[sourceNodeId].main.length <= sourceOutput) {
      this.workflow.connections[sourceNodeId].main.push([]);
    }

    // Add the connection
    this.workflow.connections[sourceNodeId].main[sourceOutput].push({
      node: targetNodeId,
      type: 'main',
      index: targetInput,
    });

    return this;
  }

  /**
   * Connect nodes in sequence
   */
  connectSequentially(nodeIds: string[]): this {
    for (let i = 0; i < nodeIds.length - 1; i++) {
      this.connect(nodeIds[i], nodeIds[i + 1]);
    }
    return this;
  }

  /**
   * Set workflow settings
   */
  setSettings(settings: IWorkflowSettings): this {
    this.workflow.settings = {
      ...this.workflow.settings,
      ...settings,
    };
    return this;
  }

  /**
   * Set workflow as active
   */
  setActive(active = true): this {
    this.workflow.active = active;
    return this;
  }

  /**
   * Add tags to the workflow
   */
  addTags(...tags: string[]): this {
    this.workflow.tags = [...(this.workflow.tags || []), ...tags];
    return this;
  }

  /**
   * Set workflow ID
   */
  setId(id: string): this {
    this.workflow.id = id;
    return this;
  }

  /**
   * Build and return the workflow
   */
  build(): IWorkflow {
    // Return a deep clone to prevent modifications
    return JSON.parse(JSON.stringify(this.workflow));
  }

  /**
   * Get the next node position
   */
  private getNextPosition(): [number, number] {
    const nodeCount = this.workflow.nodes.length;
    return [
      this.defaultPosition[0] + (nodeCount * this.positionIncrement),
      this.defaultPosition[1],
    ];
  }

  /**
   * Find a node by name or ID
   */
  findNode(nameOrId: string): INode | undefined {
    return this.workflow.nodes.find(
      node => node.name === nameOrId || node.id === nameOrId
    );
  }

  /**
   * Get all node IDs
   */
  getNodeIds(): string[] {
    return this.workflow.nodes.map(node => node.id);
  }

  /**
   * Add a custom node type
   */
  addCustomNode(type: string, typeVersion: number, parameters: INodeParameters, options: Partial<INode> = {}): this {
    return this.addNode({
      type,
      typeVersion,
      parameters,
      ...options,
    });
  }

  /**
   * Clear all nodes and connections
   */
  clear(): this {
    this.workflow.nodes = [];
    this.workflow.connections = {};
    this.nodeCounter = 0;
    return this;
  }

  /**
   * Clone the current workflow builder
   */
  clone(): WorkflowBuilder {
    const cloned = new WorkflowBuilder(this.workflow.name);
    cloned.workflow = JSON.parse(JSON.stringify(this.workflow));
    cloned.nodeCounter = this.nodeCounter;
    return cloned;
  }
}

// Export a factory function for convenience
export function createWorkflow(name?: string): WorkflowBuilder {
  return new WorkflowBuilder(name);
}
```

--------------------------------------------------------------------------------
/tests/test-database-extraction.js:
--------------------------------------------------------------------------------

```javascript
#!/usr/bin/env node

/**
 * Test node extraction for database storage
 * Focus on extracting known nodes with proper structure for DB storage
 */

const fs = require('fs').promises;
const path = require('path');
const crypto = require('crypto');

// Import our extractor
const { NodeSourceExtractor } = require('../dist/utils/node-source-extractor');

// Known n8n nodes to test
const KNOWN_NODES = [
  // Core nodes
  { type: 'n8n-nodes-base.Function', package: 'n8n-nodes-base', name: 'Function' },
  { type: 'n8n-nodes-base.Webhook', package: 'n8n-nodes-base', name: 'Webhook' },
  { type: 'n8n-nodes-base.HttpRequest', package: 'n8n-nodes-base', name: 'HttpRequest' },
  { type: 'n8n-nodes-base.If', package: 'n8n-nodes-base', name: 'If' },
  { type: 'n8n-nodes-base.SplitInBatches', package: 'n8n-nodes-base', name: 'SplitInBatches' },
  
  // AI nodes
  { type: '@n8n/n8n-nodes-langchain.Agent', package: '@n8n/n8n-nodes-langchain', name: 'Agent' },
  { type: '@n8n/n8n-nodes-langchain.OpenAiAssistant', package: '@n8n/n8n-nodes-langchain', name: 'OpenAiAssistant' },
  { type: '@n8n/n8n-nodes-langchain.ChainLlm', package: '@n8n/n8n-nodes-langchain', name: 'ChainLlm' },
  
  // Integration nodes
  { type: 'n8n-nodes-base.Airtable', package: 'n8n-nodes-base', name: 'Airtable' },
  { type: 'n8n-nodes-base.GoogleSheets', package: 'n8n-nodes-base', name: 'GoogleSheets' },
  { type: 'n8n-nodes-base.Slack', package: 'n8n-nodes-base', name: 'Slack' },
  { type: 'n8n-nodes-base.Discord', package: 'n8n-nodes-base', name: 'Discord' },
];

// Database schema for storing nodes
const DB_SCHEMA = `
-- Main nodes table
CREATE TABLE IF NOT EXISTS nodes (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  node_type VARCHAR(255) UNIQUE NOT NULL,
  name VARCHAR(255) NOT NULL,
  package_name VARCHAR(255) NOT NULL,
  display_name VARCHAR(255),
  description TEXT,
  version VARCHAR(50),
  code_hash VARCHAR(64) NOT NULL,
  code_length INTEGER NOT NULL,
  source_location TEXT NOT NULL,
  has_credentials BOOLEAN DEFAULT FALSE,
  extracted_at TIMESTAMP NOT NULL DEFAULT NOW(),
  updated_at TIMESTAMP NOT NULL DEFAULT NOW(),
  CONSTRAINT idx_node_type UNIQUE (node_type),
  INDEX idx_package_name (package_name),
  INDEX idx_code_hash (code_hash)
);

-- Source code storage
CREATE TABLE IF NOT EXISTS node_source_code (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  node_id UUID NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
  source_code TEXT NOT NULL,
  minified_code TEXT,
  source_map TEXT,
  created_at TIMESTAMP NOT NULL DEFAULT NOW(),
  CONSTRAINT idx_node_source UNIQUE (node_id)
);

-- Credentials definitions
CREATE TABLE IF NOT EXISTS node_credentials (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  node_id UUID NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
  credential_type VARCHAR(255) NOT NULL,
  credential_code TEXT NOT NULL,
  required_fields JSONB,
  created_at TIMESTAMP NOT NULL DEFAULT NOW(),
  INDEX idx_node_credentials (node_id)
);

-- Package metadata
CREATE TABLE IF NOT EXISTS node_packages (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  package_name VARCHAR(255) UNIQUE NOT NULL,
  version VARCHAR(50),
  description TEXT,
  author VARCHAR(255),
  license VARCHAR(50),
  repository_url TEXT,
  metadata JSONB,
  created_at TIMESTAMP NOT NULL DEFAULT NOW(),
  updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);

-- Node dependencies
CREATE TABLE IF NOT EXISTS node_dependencies (
  id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
  node_id UUID NOT NULL REFERENCES nodes(id) ON DELETE CASCADE,
  depends_on_node_id UUID NOT NULL REFERENCES nodes(id),
  dependency_type VARCHAR(50), -- 'extends', 'imports', 'requires'
  created_at TIMESTAMP NOT NULL DEFAULT NOW(),
  CONSTRAINT unique_dependency UNIQUE (node_id, depends_on_node_id)
);
`;

async function main() {
  console.log('=== n8n Node Extraction for Database Storage Test ===\n');
  
  const extractor = new NodeSourceExtractor();
  const results = {
    tested: 0,
    extracted: 0,
    failed: 0,
    nodes: [],
    errors: [],
    totalSize: 0
  };
  
  // Create output directory
  const outputDir = path.join(__dirname, 'extracted-nodes-db');
  await fs.mkdir(outputDir, { recursive: true });
  
  console.log(`Testing extraction of ${KNOWN_NODES.length} known nodes...\n`);
  
  // Extract each node
  for (const nodeConfig of KNOWN_NODES) {
    console.log(`📦 Extracting: ${nodeConfig.type}`);
    results.tested++;
    
    try {
      const startTime = Date.now();
      const nodeInfo = await extractor.extractNodeSource(nodeConfig.type);
      const extractTime = Date.now() - startTime;
      
      // Calculate hash for deduplication
      const codeHash = crypto.createHash('sha256').update(nodeInfo.sourceCode).digest('hex');
      
      // Prepare database record
      const dbRecord = {
        // Primary data
        node_type: nodeConfig.type,
        name: nodeConfig.name,
        package_name: nodeConfig.package,
        code_hash: codeHash,
        code_length: nodeInfo.sourceCode.length,
        source_location: nodeInfo.location,
        has_credentials: !!nodeInfo.credentialCode,
        
        // Source code (separate table in real DB)
        source_code: nodeInfo.sourceCode,
        credential_code: nodeInfo.credentialCode,
        
        // Package info
        package_info: nodeInfo.packageInfo,
        
        // Metadata
        extraction_time_ms: extractTime,
        extracted_at: new Date().toISOString()
      };
      
      results.nodes.push(dbRecord);
      results.extracted++;
      results.totalSize += nodeInfo.sourceCode.length;
      
      console.log(`  ✅ Success: ${nodeInfo.sourceCode.length} bytes (${extractTime}ms)`);
      console.log(`  📍 Location: ${nodeInfo.location}`);
      console.log(`  🔑 Hash: ${codeHash.substring(0, 12)}...`);
      
      if (nodeInfo.credentialCode) {
        console.log(`  🔐 Has credentials: ${nodeInfo.credentialCode.length} bytes`);
      }
      
      // Save individual node data
      const nodeFile = path.join(outputDir, `${nodeConfig.package}__${nodeConfig.name}.json`);
      await fs.writeFile(nodeFile, JSON.stringify(dbRecord, null, 2));
      
    } catch (error) {
      results.failed++;
      results.errors.push({
        node: nodeConfig.type,
        error: error.message
      });
      console.log(`  ❌ Failed: ${error.message}`);
    }
    
    console.log('');
  }
  
  // Generate summary report
  const successRate = ((results.extracted / results.tested) * 100).toFixed(1);
  
  console.log('='.repeat(60));
  console.log('EXTRACTION SUMMARY');
  console.log('='.repeat(60));
  console.log(`Total nodes tested: ${results.tested}`);
  console.log(`Successfully extracted: ${results.extracted} (${successRate}%)`);
  console.log(`Failed: ${results.failed}`);
  console.log(`Total code size: ${(results.totalSize / 1024).toFixed(2)} KB`);
  console.log(`Average node size: ${(results.totalSize / results.extracted / 1024).toFixed(2)} KB`);
  
  // Test database insertion simulation
  console.log('\n📊 Database Storage Simulation:');
  console.log('--------------------------------');
  
  if (results.extracted > 0) {
    // Group by package
    const packages = {};
    results.nodes.forEach(node => {
      if (!packages[node.package_name]) {
        packages[node.package_name] = {
          name: node.package_name,
          nodes: [],
          totalSize: 0
        };
      }
      packages[node.package_name].nodes.push(node.name);
      packages[node.package_name].totalSize += node.code_length;
    });
    
    console.log('\nPackages:');
    Object.values(packages).forEach(pkg => {
      console.log(`  📦 ${pkg.name}`);
      console.log(`     Nodes: ${pkg.nodes.length}`);
      console.log(`     Total size: ${(pkg.totalSize / 1024).toFixed(2)} KB`);
      console.log(`     Nodes: ${pkg.nodes.join(', ')}`);
    });
    
    // Save database-ready JSON
    const dbData = {
      schema: DB_SCHEMA,
      extracted_at: new Date().toISOString(),
      statistics: {
        total_nodes: results.extracted,
        total_size_bytes: results.totalSize,
        packages: Object.keys(packages).length,
        success_rate: successRate
      },
      nodes: results.nodes
    };
    
    const dbFile = path.join(outputDir, 'database-import.json');
    await fs.writeFile(dbFile, JSON.stringify(dbData, null, 2));
    console.log(`\n💾 Database import file saved: ${dbFile}`);
    
    // Create SQL insert statements
    const sqlFile = path.join(outputDir, 'insert-nodes.sql');
    let sql = '-- Auto-generated SQL for n8n nodes\n\n';
    
    results.nodes.forEach(node => {
      sql += `-- Node: ${node.node_type}\n`;
      sql += `INSERT INTO nodes (node_type, name, package_name, code_hash, code_length, source_location, has_credentials)\n`;
      sql += `VALUES ('${node.node_type}', '${node.name}', '${node.package_name}', '${node.code_hash}', ${node.code_length}, '${node.source_location}', ${node.has_credentials});\n\n`;
    });
    
    await fs.writeFile(sqlFile, sql);
    console.log(`📝 SQL insert file saved: ${sqlFile}`);
  }
  
  // Save full report
  const reportFile = path.join(outputDir, 'extraction-report.json');
  await fs.writeFile(reportFile, JSON.stringify(results, null, 2));
  console.log(`\n📄 Full report saved: ${reportFile}`);
  
  // Show any errors
  if (results.errors.length > 0) {
    console.log('\n⚠️  Extraction Errors:');
    results.errors.forEach(err => {
      console.log(`  - ${err.node}: ${err.error}`);
    });
  }
  
  console.log('\n✨ Database extraction test completed!');
  console.log(`📁 Results saved in: ${outputDir}`);
  
  // Exit with appropriate code
  process.exit(results.failed > 0 ? 1 : 0);
}

// Run the test
main().catch(error => {
  console.error('Fatal error:', error);
  process.exit(1);
});
```
Page 10/45FirstPrevNextLast