#
tokens: 44958/50000 6/1179 files (page 14/21)
lines: off (toggle) GitHub
raw markdown copy
This is page 14 of 21. Use http://codebase.md/sparesparrow/mcp-project-orchestrator?page={x} to view the full context.

# Directory Structure

```
├── .cursorrules
├── .env.example
├── .github
│   └── workflows
│       ├── build.yml
│       ├── ci-cd.yml
│       ├── ci.yml
│       ├── deploy.yml
│       ├── ecosystem-monitor.yml
│       ├── fan-out-orchestrator.yml
│       └── release.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AUTOMOTIVE_CAMERA_SYSTEM_SUMMARY.md
├── automotive-camera-system
│   ├── docs
│   │   └── IMPLEMENTACE_CS.md
│   └── README.md
├── AWS_MCP_IMPLEMENTATION_SUMMARY.md
├── AWS_MCP_QUICKSTART.md
├── AWS_SIP_TRUNK_DEPLOYMENT_COMPLETE.md
├── aws-sip-trunk
│   ├── .gitignore
│   ├── config
│   │   ├── extensions.conf.j2
│   │   └── pjsip.conf.j2
│   ├── DEPLOYMENT_SUMMARY.md
│   ├── docs
│   │   ├── DEPLOYMENT.md
│   │   └── TROUBLESHOOTING.md
│   ├── PROJECT_INDEX.md
│   ├── pyproject.toml
│   ├── QUICKSTART.md
│   ├── README.md
│   ├── scripts
│   │   ├── deploy-asterisk-aws.sh
│   │   └── user-data.sh
│   ├── terraform
│   │   ├── ec2.tf
│   │   ├── main.tf
│   │   ├── monitoring.tf
│   │   ├── networking.tf
│   │   ├── outputs.tf
│   │   ├── storage.tf
│   │   ├── terraform.tfvars.example
│   │   └── variables.tf
│   ├── tests
│   │   └── test_sip_connectivity.py
│   └── VERIFICATION_CHECKLIST.md
├── CLAUDE.md
├── component_templates.json
├── conanfile.py
├── config
│   ├── default.json
│   └── project_orchestration.json
├── Containerfile
├── cursor-templates
│   └── openssl
│       ├── linux-dev.mdc.jinja2
│       └── shared.mdc.jinja2
├── data
│   └── prompts
│       └── templates
│           ├── advanced-multi-server-template.json
│           ├── analysis-assistant.json
│           ├── analyze-mermaid-diagram.json
│           ├── architecture-design-assistant.json
│           ├── code-diagram-documentation-creator.json
│           ├── code-refactoring-assistant.json
│           ├── code-review-assistant.json
│           ├── collaborative-development.json
│           ├── consolidated-interfaces-template.json
│           ├── could-you-interpret-the-assumed-applicat.json
│           ├── data-analysis-template.json
│           ├── database-query-assistant.json
│           ├── debugging-assistant.json
│           ├── development-system-prompt-zcna0.json
│           ├── development-system-prompt.json
│           ├── development-workflow.json
│           ├── docker-compose-prompt-combiner.json
│           ├── docker-containerization-guide.json
│           ├── docker-mcp-servers-orchestration.json
│           ├── foresight-assistant.json
│           ├── generate-different-types-of-questions-ab.json
│           ├── generate-mermaid-diagram.json
│           ├── image-1-describe-the-icon-in-one-sen.json
│           ├── initialize-project-setup-for-a-new-micro.json
│           ├── install-dependencies-build-run-test.json
│           ├── mcp-code-generator.json
│           ├── mcp-integration-assistant.json
│           ├── mcp-resources-explorer.json
│           ├── mcp-resources-integration.json
│           ├── mcp-server-configurator.json
│           ├── mcp-server-dev-prompt-combiner.json
│           ├── mcp-server-integration-template.json
│           ├── mcp-template-system.json
│           ├── mermaid-analysis-expert.json
│           ├── mermaid-class-diagram-generator.json
│           ├── mermaid-diagram-generator.json
│           ├── mermaid-diagram-modifier.json
│           ├── modify-mermaid-diagram.json
│           ├── monorepo-migration-guide.json
│           ├── multi-resource-context.json
│           ├── project-analysis-assistant.json
│           ├── prompt-combiner-interface.json
│           ├── prompt-templates.json
│           ├── repository-explorer.json
│           ├── research-assistant.json
│           ├── sequential-data-analysis.json
│           ├── solid-code-analysis-visualizer.json
│           ├── task-list-helper-8ithy.json
│           ├── template-based-mcp-integration.json
│           ├── templates.json
│           ├── test-prompt.json
│           └── you-are-limited-to-respond-yes-or-no-onl.json
├── docs
│   ├── AWS_MCP.md
│   ├── AWS.md
│   ├── CONAN.md
│   └── integration.md
├── elevenlabs-agents
│   ├── agent-prompts.json
│   └── README.md
├── IMPLEMENTATION_STATUS.md
├── integration_plan.md
├── LICENSE
├── MANIFEST.in
├── mcp-project-orchestrator
│   └── openssl
│       ├── .github
│       │   └── workflows
│       │       └── validate-cursor-config.yml
│       ├── conanfile.py
│       ├── CURSOR_DEPLOYMENT_POLISH.md
│       ├── cursor-rules
│       │   ├── mcp.json.jinja2
│       │   ├── prompts
│       │   │   ├── fips-compliance.md.jinja2
│       │   │   ├── openssl-coding-standards.md.jinja2
│       │   │   └── pr-review.md.jinja2
│       │   └── rules
│       │       ├── ci-linux.mdc.jinja2
│       │       ├── linux-dev.mdc.jinja2
│       │       ├── macos-dev.mdc.jinja2
│       │       ├── shared.mdc.jinja2
│       │       └── windows-dev.mdc.jinja2
│       ├── docs
│       │   └── cursor-configuration-management.md
│       ├── examples
│       │   └── example-workspace
│       │       ├── .cursor
│       │       │   ├── mcp.json
│       │       │   └── rules
│       │       │       ├── linux-dev.mdc
│       │       │       └── shared.mdc
│       │       ├── .gitignore
│       │       ├── CMakeLists.txt
│       │       ├── conanfile.py
│       │       ├── profiles
│       │       │   ├── linux-gcc-debug.profile
│       │       │   └── linux-gcc-release.profile
│       │       ├── README.md
│       │       └── src
│       │           ├── crypto_utils.cpp
│       │           ├── crypto_utils.h
│       │           └── main.cpp
│       ├── IMPLEMENTATION_SUMMARY.md
│       ├── mcp_orchestrator
│       │   ├── __init__.py
│       │   ├── cli.py
│       │   ├── conan_integration.py
│       │   ├── cursor_config.py
│       │   ├── cursor_deployer.py
│       │   ├── deploy_cursor.py
│       │   ├── env_config.py
│       │   ├── platform_detector.py
│       │   └── yaml_validator.py
│       ├── openssl-cursor-example-workspace-20251014_121133.zip
│       ├── pyproject.toml
│       ├── README.md
│       ├── requirements.txt
│       ├── scripts
│       │   └── create_example_workspace.py
│       ├── setup.py
│       ├── test_deployment.py
│       └── tests
│           ├── __init__.py
│           ├── test_cursor_deployer.py
│           └── test_template_validation.py
├── printcast-agent
│   ├── .env.example
│   ├── config
│   │   └── asterisk
│   │       └── extensions.conf
│   ├── Containerfile
│   ├── docker-compose.yml
│   ├── pyproject.toml
│   ├── README.md
│   ├── scripts
│   │   └── docker-entrypoint.sh
│   ├── src
│   │   ├── integrations
│   │   │   ├── __init__.py
│   │   │   ├── asterisk.py
│   │   │   ├── content.py
│   │   │   ├── delivery.py
│   │   │   ├── elevenlabs.py
│   │   │   └── printing.py
│   │   ├── mcp_server
│   │   │   ├── __init__.py
│   │   │   ├── main.py
│   │   │   └── server.py
│   │   └── orchestration
│   │       ├── __init__.py
│   │       └── workflow.py
│   └── tests
│       └── test_mcp_server.py
├── project_orchestration.json
├── project_templates.json
├── pyproject.toml
├── README.md
├── REFACTORING_COMPLETED.md
├── REFACTORING_RECOMMENDATIONS.md
├── requirements.txt
├── scripts
│   ├── archive
│   │   ├── init_claude_test.sh
│   │   ├── init_postgres.sh
│   │   ├── start_mcp_servers.sh
│   │   └── test_claude_desktop.sh
│   ├── consolidate_mermaid.py
│   ├── consolidate_prompts.py
│   ├── consolidate_resources.py
│   ├── consolidate_templates.py
│   ├── INSTRUCTIONS.md
│   ├── README.md
│   ├── setup_aws_mcp.sh
│   ├── setup_mcp.sh
│   ├── setup_orchestrator.sh
│   ├── setup_project.py
│   └── test_mcp.sh
├── src
│   └── mcp_project_orchestrator
│       ├── __init__.py
│       ├── __main__.py
│       ├── aws_mcp.py
│       ├── cli
│       │   └── __init__.py
│       ├── cli.py
│       ├── commands
│       │   └── openssl_cli.py
│       ├── core
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── config.py
│       │   ├── exceptions.py
│       │   ├── fastmcp.py
│       │   ├── logging.py
│       │   └── managers.py
│       ├── cursor_deployer.py
│       ├── ecosystem_monitor.py
│       ├── fan_out_orchestrator.py
│       ├── fastmcp.py
│       ├── mcp-py
│       │   ├── AggregateVersions.py
│       │   ├── CustomBashTool.py
│       │   ├── FileAnnotator.py
│       │   ├── mcp-client.py
│       │   ├── mcp-server.py
│       │   ├── MermaidDiagramGenerator.py
│       │   ├── NamingAgent.py
│       │   └── solid-analyzer-agent.py
│       ├── mermaid
│       │   ├── __init__.py
│       │   ├── generator.py
│       │   ├── mermaid_orchestrator.py
│       │   ├── renderer.py
│       │   ├── templates
│       │   │   ├── AbstractFactory-diagram.json
│       │   │   ├── Adapter-diagram.json
│       │   │   ├── Analyze_Mermaid_Diagram.json
│       │   │   ├── Builder-diagram.json
│       │   │   ├── Chain-diagram.json
│       │   │   ├── Code_Diagram_Documentation_Creator.json
│       │   │   ├── Command-diagram.json
│       │   │   ├── Decorator-diagram.json
│       │   │   ├── Facade-diagram.json
│       │   │   ├── Factory-diagram.json
│       │   │   ├── flowchart
│       │   │   │   ├── AbstractFactory-diagram.json
│       │   │   │   ├── Adapter-diagram.json
│       │   │   │   ├── Analyze_Mermaid_Diagram.json
│       │   │   │   ├── Builder-diagram.json
│       │   │   │   ├── Chain-diagram.json
│       │   │   │   ├── Code_Diagram_Documentation_Creator.json
│       │   │   │   ├── Command-diagram.json
│       │   │   │   ├── Decorator-diagram.json
│       │   │   │   ├── Facade-diagram.json
│       │   │   │   ├── Factory-diagram.json
│       │   │   │   ├── Generate_Mermaid_Diagram.json
│       │   │   │   ├── generated_diagram.json
│       │   │   │   ├── integration.json
│       │   │   │   ├── Iterator-diagram.json
│       │   │   │   ├── Mediator-diagram.json
│       │   │   │   ├── Memento-diagram.json
│       │   │   │   ├── Mermaid_Analysis_Expert.json
│       │   │   │   ├── Mermaid_Class_Diagram_Generator.json
│       │   │   │   ├── Mermaid_Diagram_Generator.json
│       │   │   │   ├── Mermaid_Diagram_Modifier.json
│       │   │   │   ├── Modify_Mermaid_Diagram.json
│       │   │   │   ├── Observer-diagram.json
│       │   │   │   ├── Prototype-diagram.json
│       │   │   │   ├── Proxy-diagram.json
│       │   │   │   ├── README.json
│       │   │   │   ├── Singleton-diagram.json
│       │   │   │   ├── State-diagram.json
│       │   │   │   ├── Strategy-diagram.json
│       │   │   │   ├── TemplateMethod-diagram.json
│       │   │   │   ├── theme_dark.json
│       │   │   │   ├── theme_default.json
│       │   │   │   ├── theme_pastel.json
│       │   │   │   ├── theme_vibrant.json
│       │   │   │   └── Visitor-diagram.json
│       │   │   ├── Generate_Mermaid_Diagram.json
│       │   │   ├── generated_diagram.json
│       │   │   ├── index.json
│       │   │   ├── integration.json
│       │   │   ├── Iterator-diagram.json
│       │   │   ├── Mediator-diagram.json
│       │   │   ├── Memento-diagram.json
│       │   │   ├── Mermaid_Analysis_Expert.json
│       │   │   ├── Mermaid_Class_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Modifier.json
│       │   │   ├── Modify_Mermaid_Diagram.json
│       │   │   ├── Observer-diagram.json
│       │   │   ├── Prototype-diagram.json
│       │   │   ├── Proxy-diagram.json
│       │   │   ├── README.json
│       │   │   ├── Singleton-diagram.json
│       │   │   ├── State-diagram.json
│       │   │   ├── Strategy-diagram.json
│       │   │   ├── TemplateMethod-diagram.json
│       │   │   ├── theme_dark.json
│       │   │   ├── theme_default.json
│       │   │   ├── theme_pastel.json
│       │   │   ├── theme_vibrant.json
│       │   │   └── Visitor-diagram.json
│       │   └── types.py
│       ├── project_orchestration.py
│       ├── prompt_manager
│       │   ├── __init__.py
│       │   ├── loader.py
│       │   ├── manager.py
│       │   └── template.py
│       ├── prompts
│       │   ├── __dirname.json
│       │   ├── __image_1___describe_the_icon_in_one_sen___.json
│       │   ├── __init__.py
│       │   ├── __type.json
│       │   ├── _.json
│       │   ├── _DEFAULT_OPEN_DELIMITER.json
│       │   ├── _emojiRegex.json
│       │   ├── _UUID_CHARS.json
│       │   ├── a.json
│       │   ├── A.json
│       │   ├── Aa.json
│       │   ├── aAnnotationPadding.json
│       │   ├── absoluteThresholdGroup.json
│       │   ├── add.json
│       │   ├── ADDITIONAL_PROPERTY_FLAG.json
│       │   ├── Advanced_Multi-Server_Integration_Template.json
│       │   ├── allOptionsList.json
│       │   ├── analysis
│       │   │   ├── Data_Analysis_Template.json
│       │   │   ├── index.json
│       │   │   ├── Mermaid_Analysis_Expert.json
│       │   │   ├── Sequential_Data_Analysis_with_MCP_Integration.json
│       │   │   └── SOLID_Code_Analysis_Visualizer.json
│       │   ├── Analysis_Assistant.json
│       │   ├── Analyze_Mermaid_Diagram.json
│       │   ├── ANDROID_EVERGREEN_FIRST.json
│       │   ├── ANSI_ESCAPE_BELL.json
│       │   ├── architecture
│       │   │   ├── index.json
│       │   │   └── PromptCombiner_Interface.json
│       │   ├── Architecture_Design_Assistant.json
│       │   ├── argsTag.json
│       │   ├── ARROW.json
│       │   ├── assistant
│       │   │   ├── Analysis_Assistant.json
│       │   │   ├── Architecture_Design_Assistant.json
│       │   │   ├── Code_Refactoring_Assistant.json
│       │   │   ├── Code_Review_Assistant.json
│       │   │   ├── Database_Query_Assistant.json
│       │   │   ├── Debugging_Assistant.json
│       │   │   ├── Foresight_Assistant.json
│       │   │   ├── index.json
│       │   │   ├── MCP_Integration_Assistant.json
│       │   │   ├── Project_Analysis_Assistant.json
│       │   │   └── Research_Assistant.json
│       │   ├── astralRange.json
│       │   ├── at.json
│       │   ├── authorization_endpoint.json
│       │   ├── b.json
│       │   ├── BABELIGNORE_FILENAME.json
│       │   ├── BACKSLASH.json
│       │   ├── backupId.json
│       │   ├── BANG.json
│       │   ├── BASE64_MAP.json
│       │   ├── baseFlags.json
│       │   ├── Basic_Template.json
│       │   ├── bgModel.json
│       │   ├── bignum.json
│       │   ├── blockKeywordsStr.json
│       │   ├── BOMChar.json
│       │   ├── boundary.json
│       │   ├── brackets.json
│       │   ├── BROWSER_VAR.json
│       │   ├── bt.json
│       │   ├── BUILTIN.json
│       │   ├── BULLET.json
│       │   ├── c.json
│       │   ├── C.json
│       │   ├── CACHE_VERSION.json
│       │   ├── cacheControl.json
│       │   ├── cacheProp.json
│       │   ├── category.py
│       │   ├── CHANGE_EVENT.json
│       │   ├── CHAR_CODE_0.json
│       │   ├── chars.json
│       │   ├── cjsPattern.json
│       │   ├── cKeywords.json
│       │   ├── classForPercent.json
│       │   ├── classStr.json
│       │   ├── clientFirstMessageBare.json
│       │   ├── cmd.json
│       │   ├── Code_Diagram_Documentation_Creator.json
│       │   ├── Code_Refactoring_Assistant.json
│       │   ├── Code_Review_Assistant.json
│       │   ├── code.json
│       │   ├── coding
│       │   │   ├── __dirname.json
│       │   │   ├── _.json
│       │   │   ├── _DEFAULT_OPEN_DELIMITER.json
│       │   │   ├── _emojiRegex.json
│       │   │   ├── _UUID_CHARS.json
│       │   │   ├── a.json
│       │   │   ├── A.json
│       │   │   ├── aAnnotationPadding.json
│       │   │   ├── absoluteThresholdGroup.json
│       │   │   ├── add.json
│       │   │   ├── ADDITIONAL_PROPERTY_FLAG.json
│       │   │   ├── allOptionsList.json
│       │   │   ├── ANDROID_EVERGREEN_FIRST.json
│       │   │   ├── ANSI_ESCAPE_BELL.json
│       │   │   ├── argsTag.json
│       │   │   ├── ARROW.json
│       │   │   ├── astralRange.json
│       │   │   ├── at.json
│       │   │   ├── authorization_endpoint.json
│       │   │   ├── BABELIGNORE_FILENAME.json
│       │   │   ├── BACKSLASH.json
│       │   │   ├── BANG.json
│       │   │   ├── BASE64_MAP.json
│       │   │   ├── baseFlags.json
│       │   │   ├── bgModel.json
│       │   │   ├── bignum.json
│       │   │   ├── blockKeywordsStr.json
│       │   │   ├── BOMChar.json
│       │   │   ├── boundary.json
│       │   │   ├── brackets.json
│       │   │   ├── BROWSER_VAR.json
│       │   │   ├── bt.json
│       │   │   ├── BUILTIN.json
│       │   │   ├── BULLET.json
│       │   │   ├── c.json
│       │   │   ├── C.json
│       │   │   ├── CACHE_VERSION.json
│       │   │   ├── cacheControl.json
│       │   │   ├── cacheProp.json
│       │   │   ├── CHANGE_EVENT.json
│       │   │   ├── CHAR_CODE_0.json
│       │   │   ├── chars.json
│       │   │   ├── cjsPattern.json
│       │   │   ├── cKeywords.json
│       │   │   ├── classForPercent.json
│       │   │   ├── classStr.json
│       │   │   ├── clientFirstMessageBare.json
│       │   │   ├── cmd.json
│       │   │   ├── code.json
│       │   │   ├── colorCode.json
│       │   │   ├── comma.json
│       │   │   ├── command.json
│       │   │   ├── configJsContent.json
│       │   │   ├── connectionString.json
│       │   │   ├── cssClassStr.json
│       │   │   ├── currentBoundaryParse.json
│       │   │   ├── d.json
│       │   │   ├── data.json
│       │   │   ├── DATA.json
│       │   │   ├── dataWebpackPrefix.json
│       │   │   ├── debug.json
│       │   │   ├── decodeStateVectorV2.json
│       │   │   ├── DEFAULT_DELIMITER.json
│       │   │   ├── DEFAULT_DIAGRAM_DIRECTION.json
│       │   │   ├── DEFAULT_JS_PATTERN.json
│       │   │   ├── DEFAULT_LOG_TARGET.json
│       │   │   ├── defaultHelpOpt.json
│       │   │   ├── defaultHost.json
│       │   │   ├── deferY18nLookupPrefix.json
│       │   │   ├── DELIM.json
│       │   │   ├── delimiter.json
│       │   │   ├── DEPRECATION.json
│       │   │   ├── destMain.json
│       │   │   ├── DID_NOT_THROW.json
│       │   │   ├── direction.json
│       │   │   ├── displayValue.json
│       │   │   ├── DNS.json
│       │   │   ├── doc.json
│       │   │   ├── DOCUMENTATION_NOTE.json
│       │   │   ├── DOT.json
│       │   │   ├── DOTS.json
│       │   │   ├── dummyCompoundId.json
│       │   │   ├── e.json
│       │   │   ├── E.json
│       │   │   ├── earlyHintsLink.json
│       │   │   ├── elide.json
│       │   │   ├── EMPTY.json
│       │   │   ├── end.json
│       │   │   ├── endpoint.json
│       │   │   ├── environment.json
│       │   │   ├── ERR_CODE.json
│       │   │   ├── errMessage.json
│       │   │   ├── errMsg.json
│       │   │   ├── ERROR_MESSAGE.json
│       │   │   ├── error.json
│       │   │   ├── ERROR.json
│       │   │   ├── ERRORCLASS.json
│       │   │   ├── errorMessage.json
│       │   │   ├── es6Default.json
│       │   │   ├── ESC.json
│       │   │   ├── Escapable.json
│       │   │   ├── escapedChar.json
│       │   │   ├── escapeFuncStr.json
│       │   │   ├── escSlash.json
│       │   │   ├── ev.json
│       │   │   ├── event.json
│       │   │   ├── execaMessage.json
│       │   │   ├── EXPECTED_LABEL.json
│       │   │   ├── expected.json
│       │   │   ├── expectedString.json
│       │   │   ├── expression1.json
│       │   │   ├── EXTENSION.json
│       │   │   ├── f.json
│       │   │   ├── FAIL_TEXT.json
│       │   │   ├── FILE_BROWSER_FACTORY.json
│       │   │   ├── fill.json
│       │   │   ├── findPackageJson.json
│       │   │   ├── fnKey.json
│       │   │   ├── FORMAT.json
│       │   │   ├── formatted.json
│       │   │   ├── from.json
│       │   │   ├── fullpaths.json
│       │   │   ├── FUNC_ERROR_TEXT.json
│       │   │   ├── GenStateSuspendedStart.json
│       │   │   ├── GENSYNC_EXPECTED_START.json
│       │   │   ├── gutter.json
│       │   │   ├── h.json
│       │   │   ├── handlerFuncName.json
│       │   │   ├── HASH_UNDEFINED.json
│       │   │   ├── head.json
│       │   │   ├── helpMessage.json
│       │   │   ├── HINT_ARG.json
│       │   │   ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│       │   │   ├── i.json
│       │   │   ├── id.json
│       │   │   ├── identifier.json
│       │   │   ├── Identifier.json
│       │   │   ├── INDENT.json
│       │   │   ├── indentation.json
│       │   │   ├── index.json
│       │   │   ├── INDIRECTION_FRAGMENT.json
│       │   │   ├── input.json
│       │   │   ├── inputText.json
│       │   │   ├── insert.json
│       │   │   ├── insertPromptQuery.json
│       │   │   ├── INSPECT_MAX_BYTES.json
│       │   │   ├── intToCharMap.json
│       │   │   ├── IS_ITERABLE_SENTINEL.json
│       │   │   ├── IS_KEYED_SENTINEL.json
│       │   │   ├── isConfigType.json
│       │   │   ├── isoSentinel.json
│       │   │   ├── isSourceNode.json
│       │   │   ├── j.json
│       │   │   ├── JAKE_CMD.json
│       │   │   ├── JEST_GLOBAL_NAME.json
│       │   │   ├── JEST_GLOBALS_MODULE_NAME.json
│       │   │   ├── JSON_SYNTAX_CHAR.json
│       │   │   ├── json.json
│       │   │   ├── jsonType.json
│       │   │   ├── jupyter_namespaceObject.json
│       │   │   ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│       │   │   ├── k.json
│       │   │   ├── KERNEL_STATUS_ERROR_CLASS.json
│       │   │   ├── key.json
│       │   │   ├── l.json
│       │   │   ├── labelId.json
│       │   │   ├── LATEST_PROTOCOL_VERSION.json
│       │   │   ├── LETTERDASHNUMBER.json
│       │   │   ├── LF.json
│       │   │   ├── LIMIT_REPLACE_NODE.json
│       │   │   ├── logTime.json
│       │   │   ├── lstatkey.json
│       │   │   ├── lt.json
│       │   │   ├── m.json
│       │   │   ├── maliciousPayload.json
│       │   │   ├── mask.json
│       │   │   ├── match.json
│       │   │   ├── matchingDelim.json
│       │   │   ├── MAXIMUM_MESSAGE_SIZE.json
│       │   │   ├── mdcContent.json
│       │   │   ├── MERMAID_DOM_ID_PREFIX.json
│       │   │   ├── message.json
│       │   │   ├── messages.json
│       │   │   ├── meth.json
│       │   │   ├── minimatch.json
│       │   │   ├── MOCK_CONSTRUCTOR_NAME.json
│       │   │   ├── MOCKS_PATTERN.json
│       │   │   ├── moduleDirectory.json
│       │   │   ├── msg.json
│       │   │   ├── mtr.json
│       │   │   ├── multipartType.json
│       │   │   ├── n.json
│       │   │   ├── N.json
│       │   │   ├── name.json
│       │   │   ├── NATIVE_PLATFORM.json
│       │   │   ├── newUrl.json
│       │   │   ├── NM.json
│       │   │   ├── NO_ARGUMENTS.json
│       │   │   ├── NO_DIFF_MESSAGE.json
│       │   │   ├── NODE_MODULES.json
│       │   │   ├── nodeInternalPrefix.json
│       │   │   ├── nonASCIIidentifierStartChars.json
│       │   │   ├── nonKey.json
│       │   │   ├── NOT_A_DOT.json
│       │   │   ├── notCharacterOrDash.json
│       │   │   ├── notebookURL.json
│       │   │   ├── notSelector.json
│       │   │   ├── nullTag.json
│       │   │   ├── num.json
│       │   │   ├── NUMBER.json
│       │   │   ├── o.json
│       │   │   ├── O.json
│       │   │   ├── octChar.json
│       │   │   ├── octetStreamType.json
│       │   │   ├── operators.json
│       │   │   ├── out.json
│       │   │   ├── OUTSIDE_JEST_VM_PROTOCOL.json
│       │   │   ├── override.json
│       │   │   ├── p.json
│       │   │   ├── PACKAGE_FILENAME.json
│       │   │   ├── PACKAGE_JSON.json
│       │   │   ├── packageVersion.json
│       │   │   ├── paddedNumber.json
│       │   │   ├── page.json
│       │   │   ├── parseClass.json
│       │   │   ├── path.json
│       │   │   ├── pathExt.json
│       │   │   ├── pattern.json
│       │   │   ├── PatternBoolean.json
│       │   │   ├── pBuiltins.json
│       │   │   ├── pFloatForm.json
│       │   │   ├── pkg.json
│       │   │   ├── PLUGIN_ID_DOC_MANAGER.json
│       │   │   ├── plusChar.json
│       │   │   ├── PN_CHARS.json
│       │   │   ├── point.json
│       │   │   ├── prefix.json
│       │   │   ├── PRETTY_PLACEHOLDER.json
│       │   │   ├── property_prefix.json
│       │   │   ├── pubkey256.json
│       │   │   ├── Q.json
│       │   │   ├── qmark.json
│       │   │   ├── QO.json
│       │   │   ├── query.json
│       │   │   ├── querystringType.json
│       │   │   ├── queryText.json
│       │   │   ├── r.json
│       │   │   ├── R.json
│       │   │   ├── rangeStart.json
│       │   │   ├── re.json
│       │   │   ├── reI.json
│       │   │   ├── REQUIRED_FIELD_SYMBOL.json
│       │   │   ├── reserve.json
│       │   │   ├── resolvedDestination.json
│       │   │   ├── resolverDir.json
│       │   │   ├── responseType.json
│       │   │   ├── result.json
│       │   │   ├── ROOT_DESCRIBE_BLOCK_NAME.json
│       │   │   ├── ROOT_NAMESPACE_NAME.json
│       │   │   ├── ROOT_TASK_NAME.json
│       │   │   ├── route.json
│       │   │   ├── RUNNING_TEXT.json
│       │   │   ├── s.json
│       │   │   ├── SCHEMA_PATH.json
│       │   │   ├── se.json
│       │   │   ├── SEARCHABLE_CLASS.json
│       │   │   ├── secret.json
│       │   │   ├── selector.json
│       │   │   ├── SEMVER_SPEC_VERSION.json
│       │   │   ├── sensitiveHeaders.json
│       │   │   ├── sep.json
│       │   │   ├── separator.json
│       │   │   ├── SHAPE_STATE.json
│       │   │   ├── shape.json
│       │   │   ├── SHARED.json
│       │   │   ├── short.json
│       │   │   ├── side.json
│       │   │   ├── SNAPSHOT_VERSION.json
│       │   │   ├── SOURCE_MAPPING_PREFIX.json
│       │   │   ├── source.json
│       │   │   ├── sourceMapContent.json
│       │   │   ├── SPACE_SYMBOL.json
│       │   │   ├── SPACE.json
│       │   │   ├── sqlKeywords.json
│       │   │   ├── sranges.json
│       │   │   ├── st.json
│       │   │   ├── ST.json
│       │   │   ├── stack.json
│       │   │   ├── START_HIDING.json
│       │   │   ├── START_OF_LINE.json
│       │   │   ├── startNoTraversal.json
│       │   │   ├── STATES.json
│       │   │   ├── stats.json
│       │   │   ├── statSync.json
│       │   │   ├── storageStatus.json
│       │   │   ├── storageType.json
│       │   │   ├── str.json
│       │   │   ├── stringifiedObject.json
│       │   │   ├── stringPath.json
│       │   │   ├── stringResult.json
│       │   │   ├── stringTag.json
│       │   │   ├── strValue.json
│       │   │   ├── style.json
│       │   │   ├── SUB_NAME.json
│       │   │   ├── subkey.json
│       │   │   ├── SUBPROTOCOL.json
│       │   │   ├── SUITE_NAME.json
│       │   │   ├── symbolPattern.json
│       │   │   ├── symbolTag.json
│       │   │   ├── t.json
│       │   │   ├── T.json
│       │   │   ├── templateDir.json
│       │   │   ├── tempName.json
│       │   │   ├── text.json
│       │   │   ├── time.json
│       │   │   ├── titleSeparator.json
│       │   │   ├── tmpl.json
│       │   │   ├── tn.json
│       │   │   ├── toValue.json
│       │   │   ├── transform.json
│       │   │   ├── trustProxyDefaultSymbol.json
│       │   │   ├── typeArgumentsKey.json
│       │   │   ├── typeKey.json
│       │   │   ├── typeMessage.json
│       │   │   ├── typesRegistryPackageName.json
│       │   │   ├── u.json
│       │   │   ├── UNDEFINED.json
│       │   │   ├── unit.json
│       │   │   ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│       │   │   ├── ur.json
│       │   │   ├── USAGE.json
│       │   │   ├── value.json
│       │   │   ├── Vr.json
│       │   │   ├── watchmanURL.json
│       │   │   ├── webkit.json
│       │   │   ├── xhtml.json
│       │   │   ├── XP_DEFAULT_PATHEXT.json
│       │   │   └── y.json
│       │   ├── Collaborative_Development_with_MCP_Integration.json
│       │   ├── colorCode.json
│       │   ├── comma.json
│       │   ├── command.json
│       │   ├── completionShTemplate.json
│       │   ├── configJsContent.json
│       │   ├── connectionString.json
│       │   ├── Consolidated_TypeScript_Interfaces_Template.json
│       │   ├── Could_you_interpret_the_assumed_applicat___.json
│       │   ├── cssClassStr.json
│       │   ├── currentBoundaryParse.json
│       │   ├── d.json
│       │   ├── Data_Analysis_Template.json
│       │   ├── data.json
│       │   ├── DATA.json
│       │   ├── Database_Query_Assistant.json
│       │   ├── dataWebpackPrefix.json
│       │   ├── debug.json
│       │   ├── Debugging_Assistant.json
│       │   ├── decodeStateVectorV2.json
│       │   ├── DEFAULT_DELIMITER.json
│       │   ├── DEFAULT_DIAGRAM_DIRECTION.json
│       │   ├── DEFAULT_INDENT.json
│       │   ├── DEFAULT_JS_PATTERN.json
│       │   ├── DEFAULT_LOG_TARGET.json
│       │   ├── defaultHelpOpt.json
│       │   ├── defaultHost.json
│       │   ├── deferY18nLookupPrefix.json
│       │   ├── DELIM.json
│       │   ├── delimiter.json
│       │   ├── DEPRECATION.json
│       │   ├── DESCENDING.json
│       │   ├── destMain.json
│       │   ├── development
│       │   │   ├── Collaborative_Development_with_MCP_Integration.json
│       │   │   ├── Consolidated_TypeScript_Interfaces_Template.json
│       │   │   ├── Development_Workflow.json
│       │   │   ├── index.json
│       │   │   ├── MCP_Server_Development_Prompt_Combiner.json
│       │   │   └── Monorepo_Migration_and_Code_Organization_Guide.json
│       │   ├── Development_System_Prompt.json
│       │   ├── Development_Workflow.json
│       │   ├── devops
│       │   │   ├── Docker_Compose_Prompt_Combiner.json
│       │   │   ├── Docker_Containerization_Guide.json
│       │   │   └── index.json
│       │   ├── DID_NOT_THROW.json
│       │   ├── direction.json
│       │   ├── displayValue.json
│       │   ├── DNS.json
│       │   ├── doc.json
│       │   ├── Docker_Compose_Prompt_Combiner.json
│       │   ├── Docker_Containerization_Guide.json
│       │   ├── Docker_MCP_Servers_Orchestration_Guide.json
│       │   ├── DOCUMENTATION_NOTE.json
│       │   ├── DOT.json
│       │   ├── DOTS.json
│       │   ├── dummyCompoundId.json
│       │   ├── e.json
│       │   ├── E.json
│       │   ├── earlyHintsLink.json
│       │   ├── elide.json
│       │   ├── EMPTY.json
│       │   ├── encoded.json
│       │   ├── end.json
│       │   ├── endpoint.json
│       │   ├── environment.json
│       │   ├── ERR_CODE.json
│       │   ├── errMessage.json
│       │   ├── errMsg.json
│       │   ├── ERROR_MESSAGE.json
│       │   ├── error.json
│       │   ├── ERROR.json
│       │   ├── ERRORCLASS.json
│       │   ├── errorMessage.json
│       │   ├── es6Default.json
│       │   ├── ESC.json
│       │   ├── Escapable.json
│       │   ├── escapedChar.json
│       │   ├── escapeFuncStr.json
│       │   ├── escSlash.json
│       │   ├── ev.json
│       │   ├── event.json
│       │   ├── execaMessage.json
│       │   ├── EXPECTED_LABEL.json
│       │   ├── expected.json
│       │   ├── expectedString.json
│       │   ├── expression1.json
│       │   ├── EXTENSION.json
│       │   ├── f.json
│       │   ├── FAIL_TEXT.json
│       │   ├── FILE_BROWSER_FACTORY.json
│       │   ├── fill.json
│       │   ├── findPackageJson.json
│       │   ├── fnKey.json
│       │   ├── Foresight_Assistant.json
│       │   ├── FORMAT.json
│       │   ├── formatted.json
│       │   ├── from.json
│       │   ├── fullpaths.json
│       │   ├── FUNC_ERROR_TEXT.json
│       │   ├── general
│       │   │   └── index.json
│       │   ├── Generate_different_types_of_questions_ab___.json
│       │   ├── Generate_Mermaid_Diagram.json
│       │   ├── GenStateSuspendedStart.json
│       │   ├── GENSYNC_EXPECTED_START.json
│       │   ├── GitHub_Repository_Explorer.json
│       │   ├── gutter.json
│       │   ├── h.json
│       │   ├── handlerFuncName.json
│       │   ├── HASH_UNDEFINED.json
│       │   ├── head.json
│       │   ├── helpMessage.json
│       │   ├── HINT_ARG.json
│       │   ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│       │   ├── i.json
│       │   ├── id.json
│       │   ├── identifier.json
│       │   ├── Identifier.json
│       │   ├── INDENT.json
│       │   ├── indentation.json
│       │   ├── index.json
│       │   ├── INDIRECTION_FRAGMENT.json
│       │   ├── Initialize_project_setup_for_a_new_micro___.json
│       │   ├── input.json
│       │   ├── inputText.json
│       │   ├── insert.json
│       │   ├── insertPromptQuery.json
│       │   ├── INSPECT_MAX_BYTES.json
│       │   ├── install_dependencies__build__run__test____.json
│       │   ├── intToCharMap.json
│       │   ├── IS_ITERABLE_SENTINEL.json
│       │   ├── IS_KEYED_SENTINEL.json
│       │   ├── isConfigType.json
│       │   ├── isoSentinel.json
│       │   ├── isSourceNode.json
│       │   ├── j.json
│       │   ├── J.json
│       │   ├── JAKE_CMD.json
│       │   ├── JEST_GLOBAL_NAME.json
│       │   ├── JEST_GLOBALS_MODULE_NAME.json
│       │   ├── JSON_SYNTAX_CHAR.json
│       │   ├── json.json
│       │   ├── jsonType.json
│       │   ├── jupyter_namespaceObject.json
│       │   ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│       │   ├── k.json
│       │   ├── KERNEL_STATUS_ERROR_CLASS.json
│       │   ├── key.json
│       │   ├── l.json
│       │   ├── labelId.json
│       │   ├── LATEST_PROTOCOL_VERSION.json
│       │   ├── LETTERDASHNUMBER.json
│       │   ├── LF.json
│       │   ├── LIMIT_REPLACE_NODE.json
│       │   ├── LINE_FEED.json
│       │   ├── logTime.json
│       │   ├── lstatkey.json
│       │   ├── lt.json
│       │   ├── m.json
│       │   ├── maliciousPayload.json
│       │   ├── manager.py
│       │   ├── marker.json
│       │   ├── mask.json
│       │   ├── match.json
│       │   ├── matchingDelim.json
│       │   ├── MAXIMUM_MESSAGE_SIZE.json
│       │   ├── MCP_Integration_Assistant.json
│       │   ├── MCP_Resources_Explorer.json
│       │   ├── MCP_Resources_Integration_Guide.json
│       │   ├── MCP_Server_Development_Prompt_Combiner.json
│       │   ├── MCP_Server_Integration_Guide.json
│       │   ├── mcp-code-generator.json
│       │   ├── mdcContent.json
│       │   ├── Mermaid_Analysis_Expert.json
│       │   ├── Mermaid_Class_Diagram_Generator.json
│       │   ├── Mermaid_Diagram_Generator.json
│       │   ├── Mermaid_Diagram_Modifier.json
│       │   ├── MERMAID_DOM_ID_PREFIX.json
│       │   ├── message.json
│       │   ├── messages.json
│       │   ├── meth.json
│       │   ├── minimatch.json
│       │   ├── MOBILE_QUERY.json
│       │   ├── MOCK_CONSTRUCTOR_NAME.json
│       │   ├── MOCKS_PATTERN.json
│       │   ├── Modify_Mermaid_Diagram.json
│       │   ├── moduleDirectory.json
│       │   ├── Monorepo_Migration_and_Code_Organization_Guide.json
│       │   ├── msg.json
│       │   ├── mtr.json
│       │   ├── Multi-Resource_Context_Assistant.json
│       │   ├── multipartType.json
│       │   ├── n.json
│       │   ├── N.json
│       │   ├── name.json
│       │   ├── NATIVE_PLATFORM.json
│       │   ├── newUrl.json
│       │   ├── NM.json
│       │   ├── NO_ARGUMENTS.json
│       │   ├── NO_DIFF_MESSAGE.json
│       │   ├── NODE_MODULES.json
│       │   ├── nodeInternalPrefix.json
│       │   ├── nonASCIIidentifierStartChars.json
│       │   ├── nonKey.json
│       │   ├── NOT_A_DOT.json
│       │   ├── notCharacterOrDash.json
│       │   ├── notebookURL.json
│       │   ├── notSelector.json
│       │   ├── nullTag.json
│       │   ├── num.json
│       │   ├── NUMBER.json
│       │   ├── o.json
│       │   ├── O.json
│       │   ├── octChar.json
│       │   ├── octetStreamType.json
│       │   ├── operators.json
│       │   ├── other
│       │   │   ├── __image_1___describe_the_icon_in_one_sen___.json
│       │   │   ├── __type.json
│       │   │   ├── Advanced_Multi-Server_Integration_Template.json
│       │   │   ├── Analyze_Mermaid_Diagram.json
│       │   │   ├── Basic_Template.json
│       │   │   ├── Code_Diagram_Documentation_Creator.json
│       │   │   ├── Collaborative_Development_with_MCP_Integration.json
│       │   │   ├── completionShTemplate.json
│       │   │   ├── Could_you_interpret_the_assumed_applicat___.json
│       │   │   ├── DEFAULT_INDENT.json
│       │   │   ├── Docker_MCP_Servers_Orchestration_Guide.json
│       │   │   ├── Generate_different_types_of_questions_ab___.json
│       │   │   ├── Generate_Mermaid_Diagram.json
│       │   │   ├── GitHub_Repository_Explorer.json
│       │   │   ├── index.json
│       │   │   ├── Initialize_project_setup_for_a_new_micro___.json
│       │   │   ├── install_dependencies__build__run__test____.json
│       │   │   ├── LINE_FEED.json
│       │   │   ├── MCP_Resources_Explorer.json
│       │   │   ├── MCP_Resources_Integration_Guide.json
│       │   │   ├── MCP_Server_Integration_Guide.json
│       │   │   ├── mcp-code-generator.json
│       │   │   ├── Mermaid_Class_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Modifier.json
│       │   │   ├── Modify_Mermaid_Diagram.json
│       │   │   ├── Multi-Resource_Context_Assistant.json
│       │   │   ├── output.json
│       │   │   ├── sseUrl.json
│       │   │   ├── string.json
│       │   │   ├── Task_List_Helper.json
│       │   │   ├── Template-Based_MCP_Integration.json
│       │   │   ├── Test_Prompt.json
│       │   │   ├── type.json
│       │   │   ├── VERSION.json
│       │   │   ├── WIN_SLASH.json
│       │   │   └── You_are_limited_to_respond_Yes_or_No_onl___.json
│       │   ├── out.json
│       │   ├── output.json
│       │   ├── OUTSIDE_JEST_VM_PROTOCOL.json
│       │   ├── override.json
│       │   ├── p.json
│       │   ├── PACKAGE_FILENAME.json
│       │   ├── PACKAGE_JSON.json
│       │   ├── packageVersion.json
│       │   ├── paddedNumber.json
│       │   ├── page.json
│       │   ├── parseClass.json
│       │   ├── PATH_NODE_MODULES.json
│       │   ├── path.json
│       │   ├── pathExt.json
│       │   ├── pattern.json
│       │   ├── PatternBoolean.json
│       │   ├── pBuiltins.json
│       │   ├── pFloatForm.json
│       │   ├── pkg.json
│       │   ├── PLUGIN_ID_DOC_MANAGER.json
│       │   ├── plusChar.json
│       │   ├── PN_CHARS.json
│       │   ├── point.json
│       │   ├── prefix.json
│       │   ├── PRETTY_PLACEHOLDER.json
│       │   ├── Project_Analysis_Assistant.json
│       │   ├── ProjectsUpdatedInBackgroundEvent.json
│       │   ├── PromptCombiner_Interface.json
│       │   ├── promptId.json
│       │   ├── property_prefix.json
│       │   ├── pubkey256.json
│       │   ├── Q.json
│       │   ├── qmark.json
│       │   ├── QO.json
│       │   ├── query.json
│       │   ├── querystringType.json
│       │   ├── queryText.json
│       │   ├── r.json
│       │   ├── R.json
│       │   ├── rangeStart.json
│       │   ├── re.json
│       │   ├── reI.json
│       │   ├── REQUIRED_FIELD_SYMBOL.json
│       │   ├── Research_Assistant.json
│       │   ├── reserve.json
│       │   ├── resolvedDestination.json
│       │   ├── resolverDir.json
│       │   ├── responseType.json
│       │   ├── result.json
│       │   ├── ROOT_DESCRIBE_BLOCK_NAME.json
│       │   ├── ROOT_NAMESPACE_NAME.json
│       │   ├── ROOT_TASK_NAME.json
│       │   ├── route.json
│       │   ├── RUNNING_TEXT.json
│       │   ├── RXstyle.json
│       │   ├── s.json
│       │   ├── SCHEMA_PATH.json
│       │   ├── schemaQuery.json
│       │   ├── se.json
│       │   ├── SEARCHABLE_CLASS.json
│       │   ├── secret.json
│       │   ├── selector.json
│       │   ├── SEMVER_SPEC_VERSION.json
│       │   ├── sensitiveHeaders.json
│       │   ├── sep.json
│       │   ├── separator.json
│       │   ├── Sequential_Data_Analysis_with_MCP_Integration.json
│       │   ├── SHAPE_STATE.json
│       │   ├── shape.json
│       │   ├── SHARED.json
│       │   ├── short.json
│       │   ├── side.json
│       │   ├── SNAPSHOT_VERSION.json
│       │   ├── SOLID_Code_Analysis_Visualizer.json
│       │   ├── SOURCE_MAPPING_PREFIX.json
│       │   ├── source.json
│       │   ├── sourceMapContent.json
│       │   ├── SPACE_SYMBOL.json
│       │   ├── SPACE.json
│       │   ├── sqlKeywords.json
│       │   ├── sranges.json
│       │   ├── sseUrl.json
│       │   ├── st.json
│       │   ├── ST.json
│       │   ├── stack.json
│       │   ├── START_HIDING.json
│       │   ├── START_OF_LINE.json
│       │   ├── startNoTraversal.json
│       │   ├── STATES.json
│       │   ├── stats.json
│       │   ├── statSync.json
│       │   ├── status.json
│       │   ├── storageStatus.json
│       │   ├── storageType.json
│       │   ├── str.json
│       │   ├── string.json
│       │   ├── stringifiedObject.json
│       │   ├── stringPath.json
│       │   ├── stringResult.json
│       │   ├── stringTag.json
│       │   ├── strValue.json
│       │   ├── style.json
│       │   ├── SUB_NAME.json
│       │   ├── subkey.json
│       │   ├── SUBPROTOCOL.json
│       │   ├── SUITE_NAME.json
│       │   ├── symbolPattern.json
│       │   ├── symbolTag.json
│       │   ├── system
│       │   │   ├── Aa.json
│       │   │   ├── b.json
│       │   │   ├── Development_System_Prompt.json
│       │   │   ├── index.json
│       │   │   ├── marker.json
│       │   │   ├── PATH_NODE_MODULES.json
│       │   │   ├── ProjectsUpdatedInBackgroundEvent.json
│       │   │   ├── RXstyle.json
│       │   │   ├── status.json
│       │   │   └── versionMajorMinor.json
│       │   ├── t.json
│       │   ├── T.json
│       │   ├── Task_List_Helper.json
│       │   ├── Template-Based_MCP_Integration.json
│       │   ├── template.py
│       │   ├── templateDir.json
│       │   ├── tempName.json
│       │   ├── Test_Prompt.json
│       │   ├── text.json
│       │   ├── time.json
│       │   ├── titleSeparator.json
│       │   ├── tmpl.json
│       │   ├── tn.json
│       │   ├── TOPBAR_FACTORY.json
│       │   ├── toValue.json
│       │   ├── transform.json
│       │   ├── trustProxyDefaultSymbol.json
│       │   ├── txt.json
│       │   ├── type.json
│       │   ├── typeArgumentsKey.json
│       │   ├── typeKey.json
│       │   ├── typeMessage.json
│       │   ├── typesRegistryPackageName.json
│       │   ├── u.json
│       │   ├── UNDEFINED.json
│       │   ├── unit.json
│       │   ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│       │   ├── ur.json
│       │   ├── usage.json
│       │   ├── USAGE.json
│       │   ├── user
│       │   │   ├── backupId.json
│       │   │   ├── DESCENDING.json
│       │   │   ├── encoded.json
│       │   │   ├── index.json
│       │   │   ├── J.json
│       │   │   ├── MOBILE_QUERY.json
│       │   │   ├── promptId.json
│       │   │   ├── schemaQuery.json
│       │   │   ├── TOPBAR_FACTORY.json
│       │   │   ├── txt.json
│       │   │   └── usage.json
│       │   ├── value.json
│       │   ├── VERSION.json
│       │   ├── version.py
│       │   ├── versionMajorMinor.json
│       │   ├── Vr.json
│       │   ├── watchmanURL.json
│       │   ├── webkit.json
│       │   ├── WIN_SLASH.json
│       │   ├── xhtml.json
│       │   ├── XP_DEFAULT_PATHEXT.json
│       │   ├── y.json
│       │   └── You_are_limited_to_respond_Yes_or_No_onl___.json
│       ├── resources
│       │   ├── __init__.py
│       │   ├── code_examples
│       │   │   └── index.json
│       │   ├── config
│       │   │   └── index.json
│       │   ├── documentation
│       │   │   └── index.json
│       │   ├── images
│       │   │   └── index.json
│       │   ├── index.json
│       │   └── other
│       │       └── index.json
│       ├── server.py
│       ├── templates
│       │   ├── __init__.py
│       │   ├── AbstractFactory.json
│       │   ├── Adapter.json
│       │   ├── base.py
│       │   ├── Builder.json
│       │   ├── Chain.json
│       │   ├── Command.json
│       │   ├── component
│       │   │   ├── AbstractFactory.json
│       │   │   ├── Adapter.json
│       │   │   ├── Builder.json
│       │   │   ├── Chain.json
│       │   │   ├── Command.json
│       │   │   ├── Decorator.json
│       │   │   ├── Facade.json
│       │   │   ├── Factory.json
│       │   │   ├── Iterator.json
│       │   │   ├── Mediator.json
│       │   │   ├── Memento.json
│       │   │   ├── Observer.json
│       │   │   ├── Prototype.json
│       │   │   ├── Proxy.json
│       │   │   ├── Singleton.json
│       │   │   ├── State.json
│       │   │   ├── Strategy.json
│       │   │   ├── TemplateMethod.json
│       │   │   └── Visitor.json
│       │   ├── component.py
│       │   ├── Decorator.json
│       │   ├── Facade.json
│       │   ├── Factory.json
│       │   ├── index.json
│       │   ├── Iterator.json
│       │   ├── manager.py
│       │   ├── Mediator.json
│       │   ├── Memento.json
│       │   ├── Observer.json
│       │   ├── project.py
│       │   ├── Prototype.json
│       │   ├── Proxy.json
│       │   ├── renderer.py
│       │   ├── Singleton.json
│       │   ├── State.json
│       │   ├── Strategy.json
│       │   ├── template_manager.py
│       │   ├── TemplateMethod.json
│       │   ├── types.py
│       │   └── Visitor.json
│       └── utils
│           └── __init__.py
├── SUMMARY.md
├── TASK_COMPLETION_SUMMARY.md
├── templates
│   └── openssl
│       ├── files
│       │   ├── CMakeLists.txt.jinja2
│       │   ├── conanfile.py.jinja2
│       │   ├── main.cpp.jinja2
│       │   └── README.md.jinja2
│       ├── openssl-consumer.json
│       └── template.json
├── test_openssl_integration.sh
├── test_package
│   └── conanfile.py
└── tests
    ├── __init__.py
    ├── conftest.py
    ├── integration
    │   ├── test_core_integration.py
    │   ├── test_mermaid_integration.py
    │   ├── test_prompt_manager_integration.py
    │   └── test_server_integration.py
    ├── test_aws_mcp.py
    ├── test_base_classes.py
    ├── test_config.py
    ├── test_exceptions.py
    ├── test_mermaid.py
    ├── test_prompts.py
    └── test_templates.py
```

# Files

--------------------------------------------------------------------------------
/printcast-agent/src/integrations/content.py:
--------------------------------------------------------------------------------

```python
"""
Content fetching integration for PrintCast Agent.

Handles fetching content from various sources:
- GitHub Trending repositories
- RSS feeds
- News sources
"""

import asyncio
import json
from typing import Any, Dict, List, Optional
from datetime import datetime, timedelta
from urllib.parse import urlencode

import httpx
import feedparser
from bs4 import BeautifulSoup
import structlog
from pydantic import BaseModel, Field

logger = structlog.get_logger(__name__)


class ContentItem(BaseModel):
    """Represents a content item."""
    
    id: str
    source: str
    title: str
    description: Optional[str] = None
    url: Optional[str] = None
    author: Optional[str] = None
    published_date: Optional[datetime] = None
    tags: List[str] = Field(default_factory=list)
    metadata: Dict[str, Any] = Field(default_factory=dict)
    content_type: str = "text"


class ContentFetcher:
    """
    Fetches content from various sources for printing.
    
    Supports:
    - GitHub Trending repositories
    - RSS feeds
    - News APIs
    - Custom content sources
    """
    
    def __init__(self, config: Dict[str, Any]):
        """
        Initialize content fetcher.
        
        Args:
            config: Configuration including:
                - github_token: GitHub API token (optional)
                - rss_feeds: List of RSS feed URLs
                - news_api_key: News API key (optional)
                - cache_ttl: Cache TTL in seconds
        """
        self.config = config
        self.github_token = config.get("github_token")
        self.rss_feeds = config.get("rss_feeds", [
            "https://news.ycombinator.com/rss",
            "https://feeds.feedburner.com/TechCrunch/",
            "https://www.reddit.com/r/programming/.rss"
        ])
        self.news_api_key = config.get("news_api_key")
        self.cache_ttl = config.get("cache_ttl", 3600)
        
        self.client: Optional[httpx.AsyncClient] = None
        self.cache: Dict[str, Dict[str, Any]] = {}
        
        logger.info(
            "Content fetcher initialized",
            rss_feeds_count=len(self.rss_feeds),
            has_github_token=bool(self.github_token)
        )
    
    async def initialize(self):
        """Initialize HTTP client."""
        self.client = httpx.AsyncClient(
            timeout=30.0,
            follow_redirects=True
        )
    
    async def shutdown(self):
        """Cleanup resources."""
        if self.client:
            await self.client.aclose()
    
    async def get_available_content(self) -> Dict[str, Any]:
        """
        Get overview of available content sources.
        
        Returns:
            Dictionary of available content types and counts
        """
        return {
            "sources": {
                "github": {
                    "name": "GitHub Trending",
                    "available": True,
                    "languages": ["python", "javascript", "go", "rust", "java"]
                },
                "rss": {
                    "name": "RSS Feeds",
                    "available": True,
                    "feeds": len(self.rss_feeds)
                },
                "news": {
                    "name": "News Articles",
                    "available": bool(self.news_api_key)
                }
            }
        }
    
    async def fetch_github_trending(
        self,
        language: Optional[str] = None,
        since: str = "daily",
        limit: int = 10
    ) -> List[ContentItem]:
        """
        Fetch trending GitHub repositories.
        
        Args:
            language: Programming language filter
            since: Time range (daily, weekly, monthly)
            limit: Maximum number of repositories
        
        Returns:
            List of trending repositories
        """
        cache_key = f"github_{language}_{since}"
        
        # Check cache
        if cache_key in self.cache:
            cached = self.cache[cache_key]
            if cached["expires"] > datetime.now():
                logger.debug("Using cached GitHub trending", key=cache_key)
                return cached["data"][:limit]
        
        try:
            # Scrape GitHub Trending page (no official API)
            url = "https://github.com/trending"
            params = {}
            if language:
                params["spoken_language_code"] = "en"
                url = f"{url}/{language}"
            if since:
                params["since"] = since
            
            if params:
                url = f"{url}?{urlencode(params)}"
            
            response = await self.client.get(url)
            
            if response.status_code != 200:
                logger.error(
                    "Failed to fetch GitHub trending",
                    status=response.status_code
                )
                return []
            
            # Parse HTML
            soup = BeautifulSoup(response.text, "html.parser")
            repos = []
            
            for article in soup.find_all("article", class_="Box-row", limit=limit):
                try:
                    # Extract repository info
                    h2 = article.find("h2", class_="h3")
                    if not h2:
                        continue
                    
                    repo_link = h2.find("a")
                    if not repo_link:
                        continue
                    
                    repo_path = repo_link.get("href", "").strip("/")
                    if not repo_path:
                        continue
                    
                    repo_name = repo_path.split("/")[-1]
                    owner = repo_path.split("/")[0] if "/" in repo_path else ""
                    
                    # Get description
                    desc_elem = article.find("p", class_="col-9")
                    description = desc_elem.text.strip() if desc_elem else ""
                    
                    # Get language
                    lang_elem = article.find("span", itemprop="programmingLanguage")
                    prog_language = lang_elem.text.strip() if lang_elem else ""
                    
                    # Get stars
                    stars_elem = article.find("svg", class_="octicon-star")
                    stars_text = "0"
                    if stars_elem and stars_elem.parent:
                        stars_text = stars_elem.parent.text.strip().replace(",", "")
                        # Extract just the number
                        stars_text = "".join(filter(str.isdigit, stars_text))
                    
                    # Get today's stars
                    today_stars = "0"
                    star_elem = article.find("span", class_="d-inline-block")
                    if star_elem:
                        star_text = star_elem.text.strip()
                        if "stars" in star_text:
                            today_stars = star_text.split()[0].replace(",", "")
                    
                    item = ContentItem(
                        id=f"gh_{repo_path.replace('/', '_')}",
                        source="github",
                        title=f"{owner}/{repo_name}",
                        description=description,
                        url=f"https://github.com/{repo_path}",
                        author=owner,
                        tags=[prog_language] if prog_language else [],
                        metadata={
                            "stars": int(stars_text) if stars_text.isdigit() else 0,
                            "today_stars": int(today_stars) if today_stars.isdigit() else 0,
                            "language": prog_language,
                            "repository": repo_name,
                            "owner": owner
                        },
                        content_type="repository"
                    )
                    
                    repos.append(item)
                    
                except Exception as e:
                    logger.warning(
                        "Failed to parse repository",
                        error=str(e)
                    )
                    continue
            
            # Cache results
            self.cache[cache_key] = {
                "data": repos,
                "expires": datetime.now() + timedelta(seconds=self.cache_ttl)
            }
            
            logger.info(
                "Fetched GitHub trending",
                count=len(repos),
                language=language,
                since=since
            )
            
            return repos[:limit]
            
        except Exception as e:
            logger.error("Failed to fetch GitHub trending", error=str(e))
            return []
    
    async def fetch_rss_feeds(
        self,
        feed_urls: Optional[List[str]] = None,
        limit: int = 10
    ) -> List[ContentItem]:
        """
        Fetch content from RSS feeds.
        
        Args:
            feed_urls: Optional list of feed URLs (uses config if not provided)
            limit: Maximum number of items per feed
        
        Returns:
            List of RSS items
        """
        feeds = feed_urls or self.rss_feeds
        all_items = []
        
        async def fetch_feed(url: str) -> List[ContentItem]:
            """Fetch single RSS feed."""
            cache_key = f"rss_{url}"
            
            # Check cache
            if cache_key in self.cache:
                cached = self.cache[cache_key]
                if cached["expires"] > datetime.now():
                    logger.debug("Using cached RSS feed", url=url)
                    return cached["data"]
            
            try:
                response = await self.client.get(url)
                if response.status_code != 200:
                    logger.warning(
                        "Failed to fetch RSS feed",
                        url=url,
                        status=response.status_code
                    )
                    return []
                
                # Parse feed
                feed = feedparser.parse(response.text)
                items = []
                
                for entry in feed.entries[:limit]:
                    # Parse published date
                    published = None
                    if hasattr(entry, "published_parsed") and entry.published_parsed:
                        published = datetime.fromtimestamp(
                            feedparser._mktime_tz(entry.published_parsed)
                        )
                    
                    # Extract tags
                    tags = []
                    if hasattr(entry, "tags"):
                        tags = [tag.term for tag in entry.tags]
                    
                    item = ContentItem(
                        id=f"rss_{hash(entry.get('id', entry.get('link', '')))}"[:20],
                        source=feed.feed.get("title", url),
                        title=entry.get("title", ""),
                        description=entry.get("summary", ""),
                        url=entry.get("link"),
                        author=entry.get("author"),
                        published_date=published,
                        tags=tags,
                        metadata={
                            "feed_title": feed.feed.get("title"),
                            "feed_url": url
                        },
                        content_type="article"
                    )
                    
                    items.append(item)
                
                # Cache results
                self.cache[cache_key] = {
                    "data": items,
                    "expires": datetime.now() + timedelta(seconds=self.cache_ttl)
                }
                
                logger.info(
                    "Fetched RSS feed",
                    url=url,
                    count=len(items)
                )
                
                return items
                
            except Exception as e:
                logger.error(
                    "Failed to fetch RSS feed",
                    url=url,
                    error=str(e)
                )
                return []
        
        # Fetch all feeds concurrently
        tasks = [fetch_feed(url) for url in feeds]
        results = await asyncio.gather(*tasks)
        
        # Combine and sort by date
        for items in results:
            all_items.extend(items)
        
        # Sort by published date (newest first)
        all_items.sort(
            key=lambda x: x.published_date or datetime.min,
            reverse=True
        )
        
        return all_items[:limit]
    
    async def fetch_news(
        self,
        query: Optional[str] = None,
        category: str = "technology",
        limit: int = 10
    ) -> List[ContentItem]:
        """
        Fetch news articles.
        
        Args:
            query: Search query
            category: News category
            limit: Maximum number of articles
        
        Returns:
            List of news articles
        """
        if not self.news_api_key:
            logger.warning("News API key not configured")
            return []
        
        cache_key = f"news_{query}_{category}"
        
        # Check cache
        if cache_key in self.cache:
            cached = self.cache[cache_key]
            if cached["expires"] > datetime.now():
                logger.debug("Using cached news", key=cache_key)
                return cached["data"][:limit]
        
        try:
            # Use NewsAPI or similar service
            url = "https://newsapi.org/v2/top-headlines"
            params = {
                "apiKey": self.news_api_key,
                "category": category,
                "pageSize": limit
            }
            
            if query:
                params["q"] = query
            
            response = await self.client.get(url, params=params)
            
            if response.status_code != 200:
                logger.error(
                    "Failed to fetch news",
                    status=response.status_code
                )
                return []
            
            data = response.json()
            items = []
            
            for article in data.get("articles", []):
                # Parse date
                published = None
                if article.get("publishedAt"):
                    published = datetime.fromisoformat(
                        article["publishedAt"].replace("Z", "+00:00")
                    )
                
                item = ContentItem(
                    id=f"news_{hash(article.get('url', ''))}",
                    source=article.get("source", {}).get("name", "News"),
                    title=article.get("title", ""),
                    description=article.get("description", ""),
                    url=article.get("url"),
                    author=article.get("author"),
                    published_date=published,
                    metadata={
                        "source_id": article.get("source", {}).get("id"),
                        "image_url": article.get("urlToImage")
                    },
                    content_type="news"
                )
                
                items.append(item)
            
            # Cache results
            self.cache[cache_key] = {
                "data": items,
                "expires": datetime.now() + timedelta(seconds=self.cache_ttl)
            }
            
            logger.info(
                "Fetched news articles",
                count=len(items),
                category=category
            )
            
            return items
            
        except Exception as e:
            logger.error("Failed to fetch news", error=str(e))
            return []
    
    async def search_content(
        self,
        query: str,
        sources: Optional[List[str]] = None,
        limit: int = 20
    ) -> List[ContentItem]:
        """
        Search across all content sources.
        
        Args:
            query: Search query
            sources: Optional list of sources to search
            limit: Maximum results
        
        Returns:
            Combined search results
        """
        sources = sources or ["github", "rss", "news"]
        all_results = []
        
        tasks = []
        if "github" in sources:
            # Search GitHub by using the query as language filter
            tasks.append(self.fetch_github_trending(language=query, limit=limit))
        
        if "rss" in sources:
            # RSS feeds don't support search, just fetch latest
            tasks.append(self.fetch_rss_feeds(limit=limit))
        
        if "news" in sources and self.news_api_key:
            tasks.append(self.fetch_news(query=query, limit=limit))
        
        results = await asyncio.gather(*tasks)
        
        for items in results:
            all_results.extend(items)
        
        # Filter by query in title/description
        query_lower = query.lower()
        filtered = []
        
        for item in all_results:
            if (query_lower in item.title.lower() or 
                (item.description and query_lower in item.description.lower())):
                filtered.append(item)
        
        return filtered[:limit]
    
    async def get_content_by_ids(
        self,
        content_ids: List[str]
    ) -> List[ContentItem]:
        """
        Get specific content items by ID.
        
        Args:
            content_ids: List of content IDs
        
        Returns:
            List of content items
        """
        items = []
        
        # Check all cache entries
        for cache_data in self.cache.values():
            if "data" in cache_data:
                for item in cache_data["data"]:
                    if item.id in content_ids:
                        items.append(item)
        
        return items
    
    def format_for_print(
        self,
        items: List[ContentItem],
        format: str = "text"
    ) -> str:
        """
        Format content items for printing.
        
        Args:
            items: Content items to format
            format: Output format (text, markdown, html)
        
        Returns:
            Formatted content
        """
        if format == "markdown":
            output = "# PrintCast Content Selection\n\n"
            output += f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
            
            for item in items:
                output += f"## {item.title}\n\n"
                if item.author:
                    output += f"**Author:** {item.author}\n\n"
                if item.description:
                    output += f"{item.description}\n\n"
                if item.url:
                    output += f"**URL:** {item.url}\n\n"
                if item.tags:
                    output += f"**Tags:** {', '.join(item.tags)}\n\n"
                output += "---\n\n"
                
        elif format == "html":
            output = """<!DOCTYPE html>
<html>
<head>
    <title>PrintCast Content</title>
    <style>
        body { font-family: Arial, sans-serif; margin: 20px; }
        h1 { color: #333; }
        .item { margin-bottom: 30px; padding: 15px; border: 1px solid #ddd; }
        .meta { color: #666; font-size: 0.9em; }
    </style>
</head>
<body>
    <h1>PrintCast Content Selection</h1>
    <p class="meta">Generated: """ + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + """</p>
"""
            
            for item in items:
                output += f'<div class="item">\n'
                output += f'<h2>{item.title}</h2>\n'
                if item.author:
                    output += f'<p class="meta">Author: {item.author}</p>\n'
                if item.description:
                    output += f'<p>{item.description}</p>\n'
                if item.url:
                    output += f'<p><a href="{item.url}">{item.url}</a></p>\n'
                if item.tags:
                    output += f'<p class="meta">Tags: {", ".join(item.tags)}</p>\n'
                output += '</div>\n'
            
            output += "</body></html>"
            
        else:  # text format
            output = "PRINTCAST CONTENT SELECTION\n"
            output += "=" * 50 + "\n\n"
            output += f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
            
            for i, item in enumerate(items, 1):
                output += f"{i}. {item.title}\n"
                if item.author:
                    output += f"   Author: {item.author}\n"
                if item.description:
                    output += f"   {item.description[:200]}...\n"
                if item.url:
                    output += f"   URL: {item.url}\n"
                output += "\n"
        
        return output
```

--------------------------------------------------------------------------------
/printcast-agent/src/integrations/elevenlabs.py:
--------------------------------------------------------------------------------

```python
"""
ElevenLabs Conversational AI integration for PrintCast Agent.

Provides voice synthesis and conversation management using ElevenLabs API.
"""

import asyncio
import json
from typing import Any, Dict, List, Optional, AsyncGenerator
from datetime import datetime
import base64

import httpx
import structlog
from pydantic import BaseModel, Field

logger = structlog.get_logger(__name__)


class ConversationState(BaseModel):
    """State of an active conversation."""
    
    session_id: str
    agent_id: Optional[str] = None
    conversation_id: Optional[str] = None
    language: str = "cs"
    is_active: bool = False
    start_time: datetime = Field(default_factory=datetime.now)
    transcript: List[Dict[str, str]] = Field(default_factory=list)
    metadata: Dict[str, Any] = Field(default_factory=dict)


class ElevenLabsAgent:
    """
    Manages ElevenLabs Conversational AI integration.
    
    Features:
    - Multi-language TTS/STT
    - Real-time conversation handling
    - Voice cloning support
    - Custom prompts and behaviors
    """
    
    def __init__(self, config: Dict[str, Any]):
        """
        Initialize ElevenLabs agent.
        
        Args:
            config: Configuration including:
                - api_key: ElevenLabs API key
                - agent_id: Pre-configured agent ID (optional)
                - voice_id: Voice ID for TTS
                - model: TTS model (eleven_multilingual_v2)
                - websocket_url: WebSocket endpoint
        """
        self.config = config
        self.api_key = config.get("api_key", "")
        self.agent_id = config.get("agent_id")
        self.voice_id = config.get("voice_id", "21m00Tcm4TlvDq8ikWAM")  # Default voice
        self.model = config.get("model", "eleven_multilingual_v2")
        self.base_url = config.get("base_url", "https://api.elevenlabs.io/v1")
        self.ws_url = config.get("websocket_url", "wss://api.elevenlabs.io/v1/convai/conversation")
        
        self.client: Optional[httpx.AsyncClient] = None
        self.conversations: Dict[str, ConversationState] = {}
        self.websockets: Dict[str, Any] = {}
        
        logger.info(
            "ElevenLabs agent initialized",
            voice_id=self.voice_id,
            model=self.model
        )
    
    async def initialize(self):
        """Initialize HTTP client and verify API access."""
        try:
            self.client = httpx.AsyncClient(
                base_url=self.base_url,
                headers={
                    "xi-api-key": self.api_key,
                    "Content-Type": "application/json"
                },
                timeout=30.0
            )
            
            # Verify API access
            response = await self.client.get("/user")
            if response.status_code == 200:
                user_info = response.json()
                logger.info(
                    "ElevenLabs API connected",
                    subscription=user_info.get("subscription", {}).get("tier")
                )
            else:
                logger.warning("ElevenLabs API verification failed", status=response.status_code)
                
        except Exception as e:
            logger.error("Failed to initialize ElevenLabs client", error=str(e))
            raise
    
    async def shutdown(self):
        """Shutdown client and cleanup."""
        try:
            # End all active conversations
            for session_id in list(self.conversations.keys()):
                await self.end_conversation(session_id)
            
            # Close HTTP client
            if self.client:
                await self.client.aclose()
            
            logger.info("ElevenLabs agent shutdown")
            
        except Exception as e:
            logger.error("Error during ElevenLabs shutdown", error=str(e))
    
    def is_configured(self) -> bool:
        """Check if agent is properly configured."""
        return bool(self.api_key and self.client)
    
    async def start_conversation(
        self,
        session_id: str,
        language: str = "cs",
        initial_prompt: Optional[str] = None,
        voice_settings: Optional[Dict[str, float]] = None
    ) -> Dict[str, Any]:
        """
        Start a new conversation session.
        
        Args:
            session_id: Unique session identifier
            language: Conversation language
            initial_prompt: Optional initial system prompt
            voice_settings: Voice configuration (stability, similarity_boost)
        
        Returns:
            Conversation initialization result
        """
        try:
            # Create conversation state
            state = ConversationState(
                session_id=session_id,
                language=language,
                is_active=True
            )
            
            # Prepare conversation configuration
            config = {
                "agent_id": self.agent_id,
                "conversation": {
                    "conversation_id": f"conv_{session_id}",
                    "variables": {
                        "language": language,
                        "session_id": session_id
                    }
                }
            }
            
            if initial_prompt:
                config["conversation"]["initial_prompt"] = initial_prompt
            
            if voice_settings:
                config["voice_settings"] = voice_settings
            else:
                config["voice_settings"] = {
                    "stability": 0.5,
                    "similarity_boost": 0.75,
                    "style": 0.0,
                    "use_speaker_boost": True
                }
            
            # Start conversation via API
            if self.agent_id:
                response = await self.client.post(
                    f"/convai/agents/{self.agent_id}/conversation",
                    json=config
                )
                
                if response.status_code == 200:
                    result = response.json()
                    state.conversation_id = result.get("conversation_id")
                    state.agent_id = self.agent_id
                    
                    logger.info(
                        "Conversation started",
                        session_id=session_id,
                        conversation_id=state.conversation_id
                    )
                else:
                    logger.error(
                        "Failed to start conversation",
                        status=response.status_code,
                        error=response.text
                    )
                    raise RuntimeError(f"Failed to start conversation: {response.text}")
            
            self.conversations[session_id] = state
            
            return {
                "ready": True,
                "session_id": session_id,
                "conversation_id": state.conversation_id,
                "language": language
            }
            
        except Exception as e:
            logger.error("Failed to start conversation", error=str(e))
            raise
    
    async def send_message(
        self,
        session_id: str,
        text: str,
        voice_response: bool = True
    ) -> Dict[str, Any]:
        """
        Send a text message to the conversation.
        
        Args:
            session_id: Session identifier
            text: Message text
            voice_response: Whether to generate voice response
        
        Returns:
            Response from the agent
        """
        state = self.conversations.get(session_id)
        if not state or not state.is_active:
            raise ValueError(f"No active conversation for session {session_id}")
        
        try:
            # Add to transcript
            state.transcript.append({
                "role": "user",
                "content": text,
                "timestamp": datetime.now().isoformat()
            })
            
            # Send to conversation API
            if state.conversation_id:
                response = await self.client.post(
                    f"/convai/conversations/{state.conversation_id}/message",
                    json={
                        "text": text,
                        "voice_response": voice_response
                    }
                )
                
                if response.status_code == 200:
                    result = response.json()
                    
                    # Add response to transcript
                    state.transcript.append({
                        "role": "assistant",
                        "content": result.get("text", ""),
                        "timestamp": datetime.now().isoformat()
                    })
                    
                    return {
                        "text": result.get("text"),
                        "audio": result.get("audio"),
                        "metadata": result.get("metadata", {})
                    }
                else:
                    logger.error(
                        "Failed to send message",
                        status=response.status_code,
                        error=response.text
                    )
                    raise RuntimeError(f"Failed to send message: {response.text}")
            
            # Fallback to TTS if no conversation ID
            return await self.text_to_speech(text, language=state.language)
            
        except Exception as e:
            logger.error("Failed to send message", error=str(e))
            raise
    
    async def text_to_speech(
        self,
        text: str,
        language: str = "cs",
        voice_id: Optional[str] = None,
        output_format: str = "mp3_44100_128"
    ) -> Dict[str, Any]:
        """
        Convert text to speech.
        
        Args:
            text: Text to convert
            language: Language code
            voice_id: Optional voice ID override
            output_format: Audio format
        
        Returns:
            Audio data and metadata
        """
        try:
            voice = voice_id or self.voice_id
            
            # Prepare TTS request
            request_data = {
                "text": text,
                "model_id": self.model,
                "voice_settings": {
                    "stability": 0.5,
                    "similarity_boost": 0.75,
                    "style": 0.0,
                    "use_speaker_boost": True
                }
            }
            
            # Language-specific adjustments
            if language == "cs":
                request_data["language_code"] = "cs-CZ"
            elif language == "en":
                request_data["language_code"] = "en-US"
            
            response = await self.client.post(
                f"/text-to-speech/{voice}",
                json=request_data,
                headers={"Accept": f"audio/{output_format.split('_')[0]}"}
            )
            
            if response.status_code == 200:
                audio_data = response.content
                
                return {
                    "audio": base64.b64encode(audio_data).decode("utf-8"),
                    "format": output_format,
                    "duration_estimate": len(text) * 0.15,  # Rough estimate
                    "size_bytes": len(audio_data)
                }
            else:
                logger.error(
                    "TTS failed",
                    status=response.status_code,
                    error=response.text
                )
                raise RuntimeError(f"TTS failed: {response.text}")
                
        except Exception as e:
            logger.error("Failed to generate speech", error=str(e))
            raise
    
    async def generate_content_summary(
        self,
        items: List[Dict[str, Any]],
        language: str = "cs",
        max_items: int = 5
    ) -> str:
        """
        Generate a voice-friendly summary of content items.
        
        Args:
            items: List of content items
            language: Target language
            max_items: Maximum items to include
        
        Returns:
            Formatted summary text for TTS
        """
        try:
            # Limit items
            items = items[:max_items]
            
            # Generate summary based on language
            if language == "cs":
                summary = "Zde jsou nejnovější trendy:\n\n"
                for i, item in enumerate(items, 1):
                    title = item.get("title", item.get("name", ""))
                    description = item.get("description", "")[:100]
                    
                    summary += f"Číslo {i}: {title}. "
                    if description:
                        summary += f"{description}... "
                    summary += "\n"
                
                summary += "\nŘekněte čísla položek, které chcete vytisknout."
                
            else:  # English default
                summary = "Here are the latest trends:\n\n"
                for i, item in enumerate(items, 1):
                    title = item.get("title", item.get("name", ""))
                    description = item.get("description", "")[:100]
                    
                    summary += f"Number {i}: {title}. "
                    if description:
                        summary += f"{description}... "
                    summary += "\n"
                
                summary += "\nPlease say the numbers of items you'd like to print."
            
            return summary
            
        except Exception as e:
            logger.error("Failed to generate summary", error=str(e))
            return "Error generating summary"
    
    async def process_user_selection(
        self,
        session_id: str,
        audio_data: Optional[bytes] = None,
        dtmf_input: Optional[str] = None
    ) -> List[int]:
        """
        Process user's selection from audio or DTMF.
        
        Args:
            session_id: Session identifier
            audio_data: Optional audio input for STT
            dtmf_input: Optional DTMF digits
        
        Returns:
            List of selected item indices
        """
        try:
            selected = []
            
            if dtmf_input:
                # Parse DTMF digits
                for digit in dtmf_input:
                    if digit.isdigit():
                        num = int(digit)
                        if 1 <= num <= 9:
                            selected.append(num)
            
            elif audio_data:
                # Perform speech-to-text
                transcript = await self.speech_to_text(audio_data)
                
                # Parse transcript for numbers
                # Simple implementation - could be enhanced with NLP
                words = transcript.lower().split()
                number_words = {
                    "one": 1, "two": 2, "three": 3, "four": 4, "five": 5,
                    "jedna": 1, "dva": 2, "tři": 3, "čtyři": 4, "pět": 5,
                    "1": 1, "2": 2, "3": 3, "4": 4, "5": 5
                }
                
                for word in words:
                    if word in number_words:
                        selected.append(number_words[word])
                    elif word.isdigit():
                        num = int(word)
                        if 1 <= num <= 9:
                            selected.append(num)
            
            # Remove duplicates while preserving order
            selected = list(dict.fromkeys(selected))
            
            logger.info(
                "User selection processed",
                session_id=session_id,
                selected=selected
            )
            
            return selected
            
        except Exception as e:
            logger.error("Failed to process selection", error=str(e))
            return []
    
    async def speech_to_text(
        self,
        audio_data: bytes,
        language: str = "cs"
    ) -> str:
        """
        Convert speech to text.
        
        Args:
            audio_data: Audio data
            language: Language code
        
        Returns:
            Transcribed text
        """
        try:
            # Note: ElevenLabs doesn't provide STT directly
            # This would integrate with another service like OpenAI Whisper
            # For now, returning placeholder
            
            logger.warning("STT not fully implemented - using placeholder")
            return "jedna a tři"  # Placeholder
            
        except Exception as e:
            logger.error("Failed to transcribe speech", error=str(e))
            return ""
    
    async def end_conversation(self, session_id: str) -> Dict[str, Any]:
        """
        End a conversation session.
        
        Args:
            session_id: Session identifier
        
        Returns:
            Conversation summary
        """
        state = self.conversations.get(session_id)
        if not state:
            return {"error": "Conversation not found"}
        
        try:
            # Mark as inactive
            state.is_active = False
            
            # End conversation via API if active
            if state.conversation_id and self.client:
                await self.client.post(
                    f"/convai/conversations/{state.conversation_id}/end"
                )
            
            # Close WebSocket if exists
            if session_id in self.websockets:
                ws = self.websockets[session_id]
                await ws.close()
                del self.websockets[session_id]
            
            # Generate summary
            duration = (datetime.now() - state.start_time).total_seconds()
            summary = {
                "session_id": session_id,
                "duration_seconds": duration,
                "messages_count": len(state.transcript),
                "language": state.language
            }
            
            # Clean up
            del self.conversations[session_id]
            
            logger.info("Conversation ended", **summary)
            
            return summary
            
        except Exception as e:
            logger.error("Failed to end conversation", error=str(e))
            return {"error": str(e)}
    
    def get_conversation_transcript(
        self,
        session_id: str
    ) -> List[Dict[str, str]]:
        """Get conversation transcript."""
        state = self.conversations.get(session_id)
        return state.transcript if state else []
    
    async def create_custom_agent(
        self,
        name: str,
        prompt: str,
        voice_id: str,
        language: str = "cs",
        tools: Optional[List[Dict[str, Any]]] = None
    ) -> str:
        """
        Create a custom conversational agent.
        
        Args:
            name: Agent name
            prompt: System prompt
            voice_id: Voice to use
            language: Primary language
            tools: Optional tools/functions
        
        Returns:
            Agent ID
        """
        try:
            agent_config = {
                "name": name,
                "prompt": prompt,
                "voice_id": voice_id,
                "language": language,
                "first_message": self._get_greeting(language),
                "tools": tools or []
            }
            
            response = await self.client.post(
                "/convai/agents",
                json=agent_config
            )
            
            if response.status_code == 201:
                result = response.json()
                agent_id = result.get("agent_id")
                
                logger.info(
                    "Custom agent created",
                    agent_id=agent_id,
                    name=name
                )
                
                return agent_id
            else:
                logger.error(
                    "Failed to create agent",
                    status=response.status_code,
                    error=response.text
                )
                raise RuntimeError(f"Failed to create agent: {response.text}")
                
        except Exception as e:
            logger.error("Failed to create custom agent", error=str(e))
            raise
    
    def _get_greeting(self, language: str) -> str:
        """Get language-specific greeting."""
        greetings = {
            "cs": "Dobrý den! Jsem váš asistent PrintCast. Mohu vám přečíst nejnovější trendy a pomoci s jejich tiskem a doručením. Co by vás zajímalo?",
            "en": "Hello! I'm your PrintCast assistant. I can read you the latest trends and help with printing and delivery. What would you like to explore?",
        }
        return greetings.get(language, greetings["en"])
```

--------------------------------------------------------------------------------
/scripts/setup_project.py:
--------------------------------------------------------------------------------

```python
#!/usr/bin/env python3
"""
MCP Project Orchestrator Setup Script.

This script orchestrates the setup of the MCP Project Orchestrator by:
1. Setting up the project directory structure
2. Running all consolidation scripts for prompts, templates, resources, and mermaid diagrams
3. Creating initial configuration files
4. Setting up proper imports and exports

Usage:
    python3 setup_project.py
"""

import os
import sys
import subprocess
import json
import importlib.util
from pathlib import Path
from typing import List, Dict, Any, Optional

# Project root directory
PROJECT_ROOT = Path("/home/sparrow/projects/mcp-project-orchestrator")

# Source directory
SRC_DIR = PROJECT_ROOT / "src" / "mcp_project_orchestrator"

# Scripts directory
SCRIPTS_DIR = PROJECT_ROOT / "scripts"

# Consolidation scripts
CONSOLIDATION_SCRIPTS = [
    SCRIPTS_DIR / "consolidate_prompts.py",
    SCRIPTS_DIR / "consolidate_mermaid.py",
    SCRIPTS_DIR / "consolidate_templates.py",
    SCRIPTS_DIR / "consolidate_resources.py"
]

# Directory structure to ensure
DIRECTORIES = [
    SRC_DIR / "core",
    SRC_DIR / "prompt_manager",
    SRC_DIR / "mermaid",
    SRC_DIR / "templates",
    SRC_DIR / "prompts",
    SRC_DIR / "resources",
    SRC_DIR / "cli",
    SRC_DIR / "utils",
    PROJECT_ROOT / "tests" / "unit",
    PROJECT_ROOT / "tests" / "integration",
    PROJECT_ROOT / "docs",
    PROJECT_ROOT / "config",
]


def ensure_directories():
    """Ensure all required directories exist."""
    print("Ensuring directory structure...")
    for directory in DIRECTORIES:
        directory.mkdir(parents=True, exist_ok=True)
        print(f"  Created/ensured: {directory}")


def run_script(script_path: Path):
    """Run a Python script with proper error handling."""
    print(f"\nRunning script: {script_path}")
    
    try:
        result = subprocess.run(
            [sys.executable, str(script_path)],
            check=True,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE,
            text=True
        )
        print(result.stdout)
        return True
    except subprocess.CalledProcessError as e:
        print(f"Error running script {script_path}:")
        print(f"Exit code: {e.returncode}")
        print(f"Output: {e.stdout}")
        print(f"Error: {e.stderr}")
        return False


def run_consolidation_scripts():
    """Run all consolidation scripts."""
    print("\nRunning consolidation scripts...")
    
    success = True
    for script in CONSOLIDATION_SCRIPTS:
        if not script.exists():
            print(f"Script not found: {script}")
            success = False
            continue
            
        if not run_script(script):
            success = False
    
    return success


def create_config_file():
    """Create a default configuration file."""
    print("\nCreating default configuration file...")
    
    config = {
        "name": "mcp-project-orchestrator",
        "version": "0.1.0",
        "description": "MCP Project Orchestrator Server",
        "server": {
            "host": "127.0.0.1",
            "port": 8080
        },
        "paths": {
            "prompts": str(SRC_DIR / "prompts"),
            "templates": str(SRC_DIR / "templates"),
            "resources": str(SRC_DIR / "resources"),
            "mermaid_templates": str(SRC_DIR / "mermaid" / "templates"),
            "mermaid_output": str(PROJECT_ROOT / "output" / "mermaid")
        },
        "logging": {
            "level": "INFO",
            "file": str(PROJECT_ROOT / "logs" / "orchestrator.log")
        },
        "mermaid": {
            "cli_path": "/usr/local/bin/mmdc"
        }
    }
    
    # Create logs and output directories
    (PROJECT_ROOT / "logs").mkdir(exist_ok=True)
    (PROJECT_ROOT / "output").mkdir(exist_ok=True)
    (PROJECT_ROOT / "output" / "mermaid").mkdir(exist_ok=True)
    
    # Save the config file
    config_path = PROJECT_ROOT / "config" / "default.json"
    with open(config_path, 'w') as f:
        json.dump(config, f, indent=2)
        
    print(f"  Created config file: {config_path}")
    return config_path


def create_init_files():
    """Create or update __init__.py files for all modules."""
    print("\nCreating __init__.py files for modules...")
    
    # Main package init
    with open(SRC_DIR / "__init__.py", 'w') as f:
        f.write('''"""
MCP Project Orchestrator - A comprehensive MCP server for project orchestration.

This package provides tools for project template management, prompt management,
and diagram generation through the Model Context Protocol (MCP).
"""

__version__ = "0.1.0"

from .core import FastMCPServer, MCPConfig, setup_logging, MCPException
from .prompt_manager import PromptManager, PromptTemplate, PromptLoader
from .mermaid import MermaidGenerator, MermaidRenderer, DiagramType

__all__ = [
    "FastMCPServer",
    "MCPConfig",
    "setup_logging",
    "MCPException",
    "PromptManager",
    "PromptTemplate",
    "PromptLoader",
    "MermaidGenerator",
    "MermaidRenderer",
    "DiagramType",
]
''')
    
    # Core module init
    with open(SRC_DIR / "core" / "__init__.py", 'w') as f:
        f.write('''"""
Core functionality for the MCP Project Orchestrator.

This module provides the core server and configuration components.
"""

from .fastmcp import FastMCPServer
from .config import MCPConfig
from .logging import setup_logging
from .exceptions import MCPException

__all__ = [
    "FastMCPServer",
    "MCPConfig",
    "setup_logging",
    "MCPException",
]
''')
    
    # Prompt manager module init
    with open(SRC_DIR / "prompt_manager" / "__init__.py", 'w') as f:
        f.write('''"""
Prompt management for the MCP Project Orchestrator.

This module provides template management and rendering capabilities.
"""

from .manager import PromptManager
from .template import PromptTemplate
from .loader import PromptLoader

__all__ = [
    "PromptManager",
    "PromptTemplate",
    "PromptLoader",
]
''')
    
    # Mermaid module init
    with open(SRC_DIR / "mermaid" / "__init__.py", 'w') as f:
        f.write('''"""
Mermaid diagram generation for the MCP Project Orchestrator.

This module provides diagram generation and rendering capabilities.
"""

from .generator import MermaidGenerator
from .renderer import MermaidRenderer
from .types import DiagramType

__all__ = [
    "MermaidGenerator",
    "MermaidRenderer",
    "DiagramType",
]
''')
    
    # Templates module init
    with open(SRC_DIR / "templates" / "__init__.py", 'w') as f:
        f.write('''"""
Project and component templates for the MCP Project Orchestrator.

This module provides template management and generation capabilities.
"""

from .project_templates import ProjectTemplateManager
from .component_templates import ComponentTemplateManager

__all__ = [
    "ProjectTemplateManager",
    "ComponentTemplateManager",
]
''')
    
    # Basic init files for other modules
    for module in ["prompts", "resources", "cli", "utils"]:
        with open(SRC_DIR / module / "__init__.py", 'w') as f:
            f.write(f'"""\n{module.capitalize()} module for the MCP Project Orchestrator.\n"""\n')
    
    print("  Created/updated all __init__.py files")


def create_entry_point():
    """Create a server entry point file."""
    print("\nCreating server entry point...")
    
    with open(SRC_DIR / "server.py", 'w') as f:
        f.write('''"""
MCP Project Orchestrator Server.

This is the main entry point for the MCP Project Orchestrator server.
"""

import os
import sys
import asyncio
import json
from pathlib import Path
from typing import Dict, Any, Optional

from .core import FastMCPServer, MCPConfig, setup_logging
from .prompt_manager import PromptManager
from .mermaid import MermaidGenerator, MermaidRenderer
from .templates import ProjectTemplateManager, ComponentTemplateManager


class ProjectOrchestratorServer:
    """
    MCP Project Orchestrator Server.
    
    This server integrates prompt management, diagram generation, and project templating
    capabilities into a unified MCP server.
    """
    
    def __init__(self, config: MCPConfig):
        """
        Initialize the server with configuration.
        
        Args:
            config: The server configuration
        """
        self.config = config
        self.mcp = FastMCPServer(config=config)
        self.prompt_manager = None
        self.mermaid_service = None
        self.template_manager = None
        self.logger = setup_logging(log_file=config.log_file)
        
    async def initialize(self):
        """Initialize all components and register tools."""
        self.logger.info("Initializing Project Orchestrator Server")
        
        # Initialize prompt manager
        self.prompt_manager = PromptManager(self.config)
        await self.prompt_manager.initialize()
        
        # Initialize mermaid service
        self.mermaid_service = MermaidGenerator(self.config)
        await self.mermaid_service.initialize()
        
        # Initialize template manager
        self.template_manager = {
            "project": ProjectTemplateManager(self.config),
            "component": ComponentTemplateManager(self.config)
        }
        await self.template_manager["project"].initialize()
        await self.template_manager["component"].initialize()
        
        # Register tools
        self._register_tools()
        
        # Initialize MCP server
        await self.mcp.initialize()
        
        self.logger.info("Project Orchestrator Server initialized successfully")
        
    def _register_tools(self):
        """Register all tools with the MCP server."""
        self.logger.info("Registering tools")
        
        # Register prompt rendering tool
        self.mcp.register_tool(
            name="renderPrompt",
            description="Render a prompt template with variables",
            parameters={
                "type": "object",
                "properties": {
                    "template_name": {
                        "type": "string",
                        "description": "Name of the template to render"
                    },
                    "variables": {
                        "type": "object",
                        "description": "Variables to use for rendering"
                    }
                },
                "required": ["template_name"]
            },
            handler=self._handle_render_prompt
        )
        
        # Register diagram generation tool
        self.mcp.register_tool(
            name="generateDiagram",
            description="Generate a Mermaid diagram",
            parameters={
                "type": "object",
                "properties": {
                    "template_name": {
                        "type": "string",
                        "description": "Name of the diagram template"
                    },
                    "variables": {
                        "type": "object",
                        "description": "Variables to use for rendering"
                    },
                    "output_format": {
                        "type": "string",
                        "enum": ["svg", "png", "pdf"],
                        "default": "svg",
                        "description": "Output format for the diagram"
                    }
                },
                "required": ["template_name"]
            },
            handler=self._handle_generate_diagram
        )
        
        # Register project generation tool
        self.mcp.register_tool(
            name="generateProject",
            description="Generate a project from a template",
            parameters={
                "type": "object",
                "properties": {
                    "template_name": {
                        "type": "string",
                        "description": "Name of the project template"
                    },
                    "variables": {
                        "type": "object",
                        "description": "Variables to use for generation"
                    },
                    "output_dir": {
                        "type": "string",
                        "description": "Output directory for the project"
                    }
                },
                "required": ["template_name", "output_dir"]
            },
            handler=self._handle_generate_project
        )
        
        # Register component generation tool
        self.mcp.register_tool(
            name="generateComponent",
            description="Generate a component from a template",
            parameters={
                "type": "object",
                "properties": {
                    "template_name": {
                        "type": "string",
                        "description": "Name of the component template"
                    },
                    "variables": {
                        "type": "object",
                        "description": "Variables to use for generation"
                    },
                    "output_dir": {
                        "type": "string",
                        "description": "Output directory for the component"
                    }
                },
                "required": ["template_name", "output_dir"]
            },
            handler=self._handle_generate_component
        )
        
    async def _handle_render_prompt(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Handle the renderPrompt tool call.
        
        Args:
            params: Tool parameters
            
        Returns:
            Dict with rendered content
        """
        template_name = params["template_name"]
        variables = params.get("variables", {})
        
        try:
            rendered = await self.prompt_manager.render_template(template_name, variables)
            return {"content": rendered}
        except Exception as e:
            self.logger.error(f"Error rendering prompt template: {str(e)}")
            raise
    
    async def _handle_generate_diagram(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Handle the generateDiagram tool call.
        
        Args:
            params: Tool parameters
            
        Returns:
            Dict with diagram URL
        """
        template_name = params["template_name"]
        variables = params.get("variables", {})
        output_format = params.get("output_format", "svg")
        
        try:
            # Generate diagram content
            diagram = self.mermaid_service.generate_from_template(template_name, variables)
            
            # Render to file
            renderer = MermaidRenderer(self.config)
            await renderer.initialize()
            
            output_file = await renderer.render_to_file(
                diagram,
                template_name,
                output_format=output_format
            )
            
            # Create a relative URL
            url = f"/mermaid/{output_file.name}"
            
            return {
                "diagram_url": url,
                "diagram_path": str(output_file)
            }
        except Exception as e:
            self.logger.error(f"Error generating diagram: {str(e)}")
            raise
    
    async def _handle_generate_project(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Handle the generateProject tool call.
        
        Args:
            params: Tool parameters
            
        Returns:
            Dict with generation result
        """
        template_name = params["template_name"]
        variables = params.get("variables", {})
        output_dir = params["output_dir"]
        
        try:
            # Generate project
            result = await self.template_manager["project"].generate_project(
                template_name,
                variables,
                output_dir
            )
            
            return result
        except Exception as e:
            self.logger.error(f"Error generating project: {str(e)}")
            raise
    
    async def _handle_generate_component(self, params: Dict[str, Any]) -> Dict[str, Any]:
        """
        Handle the generateComponent tool call.
        
        Args:
            params: Tool parameters
            
        Returns:
            Dict with generation result
        """
        template_name = params["template_name"]
        variables = params.get("variables", {})
        output_dir = params["output_dir"]
        
        try:
            # Generate component
            result = await self.template_manager["component"].generate_component(
                template_name,
                variables,
                output_dir
            )
            
            return result
        except Exception as e:
            self.logger.error(f"Error generating component: {str(e)}")
            raise
    
    async def handle_client_message(self, message: Dict[str, Any]) -> Dict[str, Any]:
        """
        Handle client messages.
        
        Args:
            message: The client message
            
        Returns:
            Response message
        """
        try:
            return await self.mcp.handle_message(message)
        except Exception as e:
            self.logger.error(f"Error handling client message: {str(e)}")
            
            # Create an error response
            return {
                "jsonrpc": "2.0",
                "id": message.get("id"),
                "error": {
                    "code": -32603,
                    "message": f"Internal error: {str(e)}"
                }
            }
    
    async def start(self):
        """Start the server."""
        await self.mcp.start()
        
    async def stop(self):
        """Stop the server."""
        await self.mcp.stop()


# Convenience function for starting the server
async def start_server(config_path: Optional[str] = None):
    """
    Start the MCP Project Orchestrator server.
    
    Args:
        config_path: Path to configuration file (optional)
    """
    # Load configuration
    config = MCPConfig(config_file=config_path)
    
    # Create and initialize the server
    server = ProjectOrchestratorServer(config)
    await server.initialize()
    
    # Start the server
    await server.start()
    
    return server
''')
    
    # Create a CLI entry point
    with open(SRC_DIR / "__main__.py", 'w') as f:
        f.write('''"""
Command-line entry point for the MCP Project Orchestrator.
"""

import os
import sys
import asyncio
import argparse
from pathlib import Path

from .server import start_server
from .core import MCPConfig, setup_logging


def main():
    """Main entry point."""
    parser = argparse.ArgumentParser(description="MCP Project Orchestrator")
    parser.add_argument("--config", help="Path to configuration file")
    parser.add_argument("--host", help="Server host")
    parser.add_argument("--port", type=int, help="Server port")
    parser.add_argument("--log-file", help="Log file path")
    parser.add_argument("--log-level", help="Logging level")
    
    args = parser.parse_args()
    
    # Look for config file in standard locations
    config_path = args.config
    if not config_path:
        # Check common config locations
        config_locations = [
            Path.cwd() / "config" / "default.json",
            Path.home() / ".config" / "mcp-project-orchestrator" / "config.json",
            Path("/etc/mcp-project-orchestrator/config.json")
        ]
        
        for location in config_locations:
            if location.exists():
                config_path = str(location)
                break
    
    # Set up logging early
    logger = setup_logging(log_file=args.log_file)
    
    try:
        # Start the server
        loop = asyncio.get_event_loop()
        server = loop.run_until_complete(start_server(config_path))
        
        # Run the server
        loop.run_forever()
    except KeyboardInterrupt:
        # Handle graceful shutdown
        logger.info("Shutting down server...")
        loop.run_until_complete(server.stop())
    except Exception as e:
        logger.error(f"Error starting server: {str(e)}")
        sys.exit(1)
    finally:
        loop.close()
    
    
if __name__ == "__main__":
    main()
''')

    print("  Created server entry point files")
    

def main():
    """Main function to orchestrate the setup process."""
    print("Setting up MCP Project Orchestrator...")
    
    # Ensure all directories exist
    ensure_directories()
    
    # Create init files
    create_init_files()
    
    # Create entry point
    create_entry_point()
    
    # Create default configuration
    config_path = create_config_file()
    
    # Run all consolidation scripts
    success = run_consolidation_scripts()
    
    if success:
        print("\nSetup completed successfully!")
        print(f"Configuration file: {config_path}")
        print("You can now run the server with:")
        print(f"  python -m mcp_project_orchestrator --config {config_path}")
    else:
        print("\nSetup completed with warnings or errors.")
        print("Please review the output above and fix any issues before running the server.")
    
    return 0


if __name__ == "__main__":
    sys.exit(main()) 
```

--------------------------------------------------------------------------------
/printcast-agent/src/integrations/delivery.py:
--------------------------------------------------------------------------------

```python
"""
Delivery service integration for PrintCast Agent.

Handles shipping and delivery through various carriers:
- Czech Post (Česká pošta)
- Zásilkovna
- DPD
- PPL
- Generic courier services
"""

import asyncio
import hashlib
import json
from typing import Any, Dict, List, Optional
from datetime import datetime, timedelta
from enum import Enum

import httpx
import structlog
from pydantic import BaseModel, Field

logger = structlog.get_logger(__name__)


class DeliveryMethod(Enum):
    """Supported delivery methods."""
    POST = "post"           # Czech Post
    ZASILKOVNA = "zasilkovna"  # Zásilkovna
    DPD = "dpd"
    PPL = "ppl"
    COURIER = "courier"     # Generic courier


class Address(BaseModel):
    """Delivery address."""
    
    name: str
    street: str
    city: str
    postal_code: str
    country: str = "CZ"
    phone: Optional[str] = None
    email: Optional[str] = None
    company: Optional[str] = None


class Shipment(BaseModel):
    """Shipment information."""
    
    shipment_id: str
    tracking_number: Optional[str] = None
    carrier: str
    status: str = "pending"
    recipient: Address
    sender: Optional[Address] = None
    weight_grams: int = 100
    dimensions_cm: Optional[Dict[str, float]] = None
    created_at: datetime = Field(default_factory=datetime.now)
    shipped_at: Optional[datetime] = None
    delivered_at: Optional[datetime] = None
    price: Optional[float] = None
    currency: str = "CZK"
    metadata: Dict[str, Any] = Field(default_factory=dict)


class DeliveryService:
    """
    Manages delivery and shipping operations.
    
    Integrates with multiple Czech and international carriers.
    """
    
    def __init__(self, config: Dict[str, Any]):
        """
        Initialize delivery service.
        
        Args:
            config: Configuration including:
                - carriers: Carrier-specific configurations
                - default_carrier: Default carrier to use
                - sender_address: Default sender address
                - api_keys: API keys for each carrier
        """
        self.config = config
        self.carriers = config.get("carriers", {})
        self.default_carrier = config.get("default_carrier", "post")
        self.sender_address = config.get("sender_address", {})
        self.api_keys = config.get("api_keys", {})
        
        self.client: Optional[httpx.AsyncClient] = None
        self.shipments: Dict[str, Shipment] = {}
        
        logger.info(
            "Delivery service initialized",
            carriers=list(self.carriers.keys()),
            default_carrier=self.default_carrier
        )
    
    async def initialize(self):
        """Initialize HTTP client and verify carrier APIs."""
        try:
            self.client = httpx.AsyncClient(
                timeout=30.0,
                follow_redirects=True
            )
            
            # Verify carrier APIs
            for carrier in self.carriers:
                if carrier in self.api_keys:
                    await self._verify_carrier_api(carrier)
                    
        except Exception as e:
            logger.error("Failed to initialize delivery service", error=str(e))
    
    async def shutdown(self):
        """Cleanup resources."""
        if self.client:
            await self.client.aclose()
        logger.info("Delivery service shutdown")
    
    def is_configured(self) -> bool:
        """Check if delivery service is configured."""
        return bool(self.carriers and self.sender_address)
    
    async def _verify_carrier_api(self, carrier: str) -> bool:
        """
        Verify carrier API is accessible.
        
        Args:
            carrier: Carrier name
        
        Returns:
            True if API is accessible
        """
        try:
            if carrier == "zasilkovna":
                # Verify Zásilkovna API
                api_key = self.api_keys.get("zasilkovna")
                if api_key:
                    response = await self.client.get(
                        "https://api.packeta.com/v1/branches",
                        params={"apiPassword": api_key, "limit": 1}
                    )
                    if response.status_code == 200:
                        logger.info("Zásilkovna API verified")
                        return True
                        
            elif carrier == "post":
                # Czech Post doesn't require API verification for basic services
                logger.info("Czech Post service available")
                return True
                
            # Add other carrier verifications as needed
            
        except Exception as e:
            logger.warning(
                "Failed to verify carrier API",
                carrier=carrier,
                error=str(e)
            )
        
        return False
    
    async def get_quote(
        self,
        address: str,
        method: str = "post",
        weight: int = 100,
        dimensions: Optional[Dict[str, float]] = None
    ) -> Dict[str, Any]:
        """
        Get delivery quote.
        
        Args:
            address: Delivery address
            method: Delivery method
            weight: Package weight in grams
            dimensions: Package dimensions in cm
        
        Returns:
            Delivery quote with pricing and timing
        """
        try:
            # Parse address if string
            if isinstance(address, str):
                # Simple parsing - in production would use proper address parser
                parts = address.split(",")
                parsed_address = Address(
                    name="Recipient",
                    street=parts[0].strip() if len(parts) > 0 else "",
                    city=parts[1].strip() if len(parts) > 1 else "Praha",
                    postal_code=parts[2].strip() if len(parts) > 2 else "10000",
                    country="CZ"
                )
            else:
                parsed_address = address
            
            # Calculate price based on carrier and weight
            price = await self._calculate_price(method, weight, dimensions)
            
            # Estimate delivery time
            delivery_days = self._estimate_delivery_days(method)
            estimated_delivery = datetime.now() + timedelta(days=delivery_days)
            
            return {
                "price": price,
                "currency": "CZK",
                "estimated_delivery": estimated_delivery.isoformat(),
                "delivery_days": delivery_days,
                "carrier": method,
                "service_type": self._get_service_type(method, weight)
            }
            
        except Exception as e:
            logger.error("Failed to get delivery quote", error=str(e))
            raise
    
    async def _calculate_price(
        self,
        method: str,
        weight: int,
        dimensions: Optional[Dict[str, float]] = None
    ) -> float:
        """Calculate delivery price."""
        # Simplified pricing - in production would use carrier APIs
        base_prices = {
            "post": 89.0,      # Czech Post standard letter
            "zasilkovna": 65.0,  # Zásilkovna to pickup point
            "dpd": 120.0,      # DPD standard
            "ppl": 115.0,      # PPL standard
            "courier": 150.0   # Generic courier
        }
        
        price = base_prices.get(method, 100.0)
        
        # Add weight surcharge
        if weight > 1000:  # Over 1kg
            price += (weight // 1000) * 20
        elif weight > 500:  # Over 500g
            price += 15
        
        # Add dimension surcharge for large packages
        if dimensions:
            volume = dimensions.get("length", 0) * dimensions.get("width", 0) * dimensions.get("height", 0)
            if volume > 50000:  # Over 50x50x20 cm
                price += 30
        
        return price
    
    def _estimate_delivery_days(self, method: str) -> int:
        """Estimate delivery time in days."""
        estimates = {
            "post": 2,        # Czech Post D+2
            "zasilkovna": 1,  # Next day to pickup point
            "dpd": 1,         # Next day delivery
            "ppl": 1,         # Next day delivery
            "courier": 0      # Same day possible
        }
        return estimates.get(method, 3)
    
    def _get_service_type(self, method: str, weight: int) -> str:
        """Determine service type based on method and weight."""
        if method == "post":
            if weight <= 50:
                return "Obyčejné psaní"
            elif weight <= 500:
                return "Doporučené psaní"
            else:
                return "Balík Do ruky"
        elif method == "zasilkovna":
            return "Na výdejní místo"
        else:
            return "Standard delivery"
    
    async def create_shipment(
        self,
        recipient: Address,
        method: str = "post",
        weight: int = 100,
        dimensions: Optional[Dict[str, float]] = None,
        metadata: Optional[Dict[str, Any]] = None
    ) -> str:
        """
        Create a new shipment.
        
        Args:
            recipient: Recipient address
            method: Delivery method
            weight: Package weight in grams
            dimensions: Package dimensions
            metadata: Additional metadata
        
        Returns:
            Shipment ID
        """
        try:
            # Generate shipment ID
            shipment_id = f"ship_{datetime.now().strftime('%Y%m%d%H%M%S')}_{hashlib.md5(recipient.name.encode()).hexdigest()[:8]}"
            
            # Create shipment record
            shipment = Shipment(
                shipment_id=shipment_id,
                carrier=method,
                recipient=recipient,
                sender=Address(**self.sender_address) if self.sender_address else None,
                weight_grams=weight,
                dimensions_cm=dimensions,
                metadata=metadata or {}
            )
            
            # Calculate price
            shipment.price = await self._calculate_price(method, weight, dimensions)
            
            # Create shipment with carrier
            tracking_number = await self._create_carrier_shipment(shipment)
            if tracking_number:
                shipment.tracking_number = tracking_number
                shipment.status = "created"
            
            self.shipments[shipment_id] = shipment
            
            logger.info(
                "Shipment created",
                shipment_id=shipment_id,
                carrier=method,
                tracking=tracking_number
            )
            
            return shipment_id
            
        except Exception as e:
            logger.error("Failed to create shipment", error=str(e))
            raise
    
    async def _create_carrier_shipment(self, shipment: Shipment) -> Optional[str]:
        """
        Create shipment with specific carrier.
        
        Args:
            shipment: Shipment details
        
        Returns:
            Tracking number if available
        """
        try:
            if shipment.carrier == "zasilkovna":
                return await self._create_zasilkovna_shipment(shipment)
            elif shipment.carrier == "post":
                return await self._create_post_shipment(shipment)
            else:
                # Simulated tracking number for other carriers
                return f"{shipment.carrier.upper()}{datetime.now().strftime('%Y%m%d%H%M%S')}"
                
        except Exception as e:
            logger.error(
                "Failed to create carrier shipment",
                carrier=shipment.carrier,
                error=str(e)
            )
            return None
    
    async def _create_zasilkovna_shipment(self, shipment: Shipment) -> Optional[str]:
        """Create shipment with Zásilkovna."""
        api_key = self.api_keys.get("zasilkovna")
        if not api_key:
            logger.warning("Zásilkovna API key not configured")
            return None
        
        try:
            # Prepare packet data
            packet_data = {
                "apiPassword": api_key,
                "packet": {
                    "number": shipment.shipment_id,
                    "name": shipment.recipient.name,
                    "surname": "",  # Would need to parse from name
                    "email": shipment.recipient.email or "",
                    "phone": shipment.recipient.phone or "",
                    "addressId": 1,  # Would need to select pickup point
                    "currency": shipment.currency,
                    "value": shipment.price,
                    "weight": shipment.weight_grams / 1000,  # Convert to kg
                    "eshop": "PrintCast"
                }
            }
            
            response = await self.client.post(
                "https://api.packeta.com/v1/packets",
                json=packet_data
            )
            
            if response.status_code == 200:
                result = response.json()
                return result.get("barcode")
            else:
                logger.error(
                    "Zásilkovna API error",
                    status=response.status_code,
                    response=response.text
                )
                return None
                
        except Exception as e:
            logger.error("Failed to create Zásilkovna shipment", error=str(e))
            return None
    
    async def _create_post_shipment(self, shipment: Shipment) -> Optional[str]:
        """Create shipment with Czech Post."""
        # Czech Post integration would require their B2B API
        # For now, return simulated tracking number
        tracking = f"RR{datetime.now().strftime('%Y%m%d%H%M')}CZ"
        
        logger.info(
            "Czech Post shipment simulated",
            tracking=tracking,
            recipient=shipment.recipient.city
        )
        
        return tracking
    
    async def ship_package(
        self,
        shipment_id: str,
        pickup_time: Optional[datetime] = None
    ) -> Dict[str, Any]:
        """
        Mark package as shipped and arrange pickup.
        
        Args:
            shipment_id: Shipment ID
            pickup_time: Optional pickup time
        
        Returns:
            Shipping confirmation
        """
        shipment = self.shipments.get(shipment_id)
        if not shipment:
            raise ValueError(f"Shipment {shipment_id} not found")
        
        try:
            # Update shipment status
            shipment.status = "shipped"
            shipment.shipped_at = datetime.now()
            
            # Arrange pickup if needed
            pickup_confirmation = None
            if pickup_time:
                pickup_confirmation = await self._arrange_pickup(
                    shipment,
                    pickup_time
                )
            
            logger.info(
                "Package shipped",
                shipment_id=shipment_id,
                tracking=shipment.tracking_number
            )
            
            return {
                "shipment_id": shipment_id,
                "tracking_number": shipment.tracking_number,
                "carrier": shipment.carrier,
                "status": "shipped",
                "pickup_confirmation": pickup_confirmation,
                "estimated_delivery": (
                    shipment.shipped_at + timedelta(days=self._estimate_delivery_days(shipment.carrier))
                ).isoformat()
            }
            
        except Exception as e:
            logger.error("Failed to ship package", error=str(e))
            raise
    
    async def _arrange_pickup(
        self,
        shipment: Shipment,
        pickup_time: datetime
    ) -> Optional[str]:
        """Arrange carrier pickup."""
        # In production, would call carrier pickup APIs
        confirmation = f"PICKUP-{shipment.carrier.upper()}-{pickup_time.strftime('%Y%m%d')}"
        
        logger.info(
            "Pickup arranged",
            carrier=shipment.carrier,
            time=pickup_time.isoformat(),
            confirmation=confirmation
        )
        
        return confirmation
    
    async def track_shipment(self, shipment_id: str) -> Dict[str, Any]:
        """
        Track shipment status.
        
        Args:
            shipment_id: Shipment ID
        
        Returns:
            Tracking information
        """
        shipment = self.shipments.get(shipment_id)
        if not shipment:
            raise ValueError(f"Shipment {shipment_id} not found")
        
        try:
            # Get tracking from carrier
            tracking_info = await self._get_carrier_tracking(shipment)
            
            # Update local status if changed
            if tracking_info.get("delivered"):
                shipment.status = "delivered"
                shipment.delivered_at = datetime.now()
            elif tracking_info.get("in_transit"):
                shipment.status = "in_transit"
            
            return {
                "shipment_id": shipment_id,
                "tracking_number": shipment.tracking_number,
                "status": shipment.status,
                "carrier": shipment.carrier,
                "shipped_at": shipment.shipped_at.isoformat() if shipment.shipped_at else None,
                "delivered_at": shipment.delivered_at.isoformat() if shipment.delivered_at else None,
                "tracking_events": tracking_info.get("events", [])
            }
            
        except Exception as e:
            logger.error("Failed to track shipment", error=str(e))
            raise
    
    async def _get_carrier_tracking(self, shipment: Shipment) -> Dict[str, Any]:
        """Get tracking info from carrier."""
        # In production, would call carrier tracking APIs
        # For now, return simulated tracking
        
        events = []
        if shipment.shipped_at:
            events.append({
                "timestamp": shipment.shipped_at.isoformat(),
                "status": "Picked up",
                "location": "Praha"
            })
            
            if shipment.status == "in_transit":
                events.append({
                    "timestamp": (shipment.shipped_at + timedelta(hours=4)).isoformat(),
                    "status": "In transit",
                    "location": "Distribution center"
                })
            
            if shipment.status == "delivered":
                events.append({
                    "timestamp": shipment.delivered_at.isoformat(),
                    "status": "Delivered",
                    "location": shipment.recipient.city
                })
        
        return {
            "events": events,
            "in_transit": shipment.status == "in_transit",
            "delivered": shipment.status == "delivered"
        }
    
    async def cancel_shipment(self, shipment_id: str) -> bool:
        """
        Cancel a shipment.
        
        Args:
            shipment_id: Shipment ID
        
        Returns:
            True if cancelled successfully
        """
        shipment = self.shipments.get(shipment_id)
        if not shipment:
            return False
        
        if shipment.status in ["delivered", "cancelled"]:
            return False
        
        try:
            # Cancel with carrier
            if shipment.tracking_number:
                await self._cancel_carrier_shipment(shipment)
            
            shipment.status = "cancelled"
            
            logger.info(
                "Shipment cancelled",
                shipment_id=shipment_id
            )
            
            return True
            
        except Exception as e:
            logger.error(
                "Failed to cancel shipment",
                shipment_id=shipment_id,
                error=str(e)
            )
            return False
    
    async def _cancel_carrier_shipment(self, shipment: Shipment):
        """Cancel shipment with carrier."""
        # In production, would call carrier cancellation APIs
        logger.info(
            "Carrier shipment cancellation simulated",
            carrier=shipment.carrier,
            tracking=shipment.tracking_number
        )
    
    def get_shipment_status(self, shipment_id: str) -> Optional[Dict[str, Any]]:
        """Get shipment status."""
        shipment = self.shipments.get(shipment_id)
        if not shipment:
            return None
        
        return {
            "shipment_id": shipment.shipment_id,
            "status": shipment.status,
            "tracking_number": shipment.tracking_number,
            "carrier": shipment.carrier,
            "recipient": {
                "name": shipment.recipient.name,
                "city": shipment.recipient.city,
                "country": shipment.recipient.country
            },
            "price": shipment.price,
            "currency": shipment.currency,
            "created": shipment.created_at.isoformat(),
            "shipped": shipment.shipped_at.isoformat() if shipment.shipped_at else None,
            "delivered": shipment.delivered_at.isoformat() if shipment.delivered_at else None
        }
    
    async def generate_shipping_label(
        self,
        shipment_id: str,
        format: str = "pdf"
    ) -> bytes:
        """
        Generate shipping label.
        
        Args:
            shipment_id: Shipment ID
            format: Label format (pdf, zpl, png)
        
        Returns:
            Label data
        """
        shipment = self.shipments.get(shipment_id)
        if not shipment:
            raise ValueError(f"Shipment {shipment_id} not found")
        
        # In production, would generate actual label
        # For now, return placeholder
        label_content = f"""
        SHIPPING LABEL
        ===============
        From: {shipment.sender.name if shipment.sender else 'PrintCast'}
        To: {shipment.recipient.name}
            {shipment.recipient.street}
            {shipment.recipient.postal_code} {shipment.recipient.city}
            {shipment.recipient.country}
        
        Tracking: {shipment.tracking_number or 'N/A'}
        Carrier: {shipment.carrier.upper()}
        Weight: {shipment.weight_grams}g
        """
        
        return label_content.encode("utf-8")
```

--------------------------------------------------------------------------------
/data/prompts/templates/docker-mcp-servers-orchestration.json:
--------------------------------------------------------------------------------

```json
{
  "id": "docker-mcp-servers-orchestration",
  "name": "Docker MCP Servers Orchestration Guide",
  "description": "A comprehensive guide for setting up, configuring, and orchestrating multiple MCP servers in a Docker environment",
  "content": "# Docker MCP Servers Orchestration Guide\\n\\n## Overview\\n\\nThis guide will help you set up a containerized environment with multiple integrated MCP servers for {{use_case}}. The architecture leverages Docker Compose to orchestrate these servers, providing a robust foundation for AI-powered applications with enhanced context capabilities.\\n\\n## Prerequisites\\n\\n- Docker and Docker Compose installed\\n- Basic understanding of containerization concepts\\n- Git for cloning repositories\\n- {{additional_prerequisites}}\\n\\n## Core MCP Servers Architecture\\n\\n```mermaid\\ngraph TD\\n    subgraph \\\\\\\"Docker Compose Network\\\\\\\"\\n        subgraph \\\\\\\"Core Service\\\\\\\"\\n            MCP[MCP Prompts Server]\\n        end\\n        \\n        subgraph \\\\\\\"MCP Resource Servers\\\\\\\"\\n            FS[Filesystem Server]\\n            MEM[Memory Server]\\n            GH[GitHub Server]\\n            ST[Sequential Thinking]\\n            EL[ElevenLabs Server]\\n            {{additional_servers}}\\n        end\\n        \\n        subgraph \\\\\\\"Storage Options\\\\\\\"\\n            File[(File Storage)]\\n            PG[(PostgreSQL)]\\n            PGAI[(PGAI/TimescaleDB)]\\n        end\\n    end\\n    \\n    Client[AI Client] -->|Requests| MCP\\n    MCP -->|Resource URI Requests| FS\\n    MCP -->|Resource URI Requests| MEM\\n    MCP -->|Resource URI Requests| GH\\n    MCP -->|Resource URI Requests| ST\\n    MCP -->|Resource URI Requests| EL\\n    \\n    MCP -->|Storage| File\\n    MCP -->|Storage| PG\\n    MCP -->|Storage| PGAI\\n    \\n    FS -->|Access| LocalFiles[(Local Files)]\\n    GH -->|API Calls| GitHub[(GitHub API)]\\n    EL -->|API Calls| ElevenLabsAPI[(ElevenLabs API)]\\n    \\n    classDef core fill:#f9a,stroke:#d87,stroke-width:2px\\n    classDef server fill:#adf,stroke:#7ad,stroke-width:1px\\n    classDef storage fill:#ad8,stroke:#7a6,stroke-width:1px\\n    classDef external fill:#ddd,stroke:#999,stroke-width:1px\\n    \\n    class MCP core\\n    class FS,MEM,GH,ST,EL server\\n    class File,PG,PGAI storage\\n    class Client,LocalFiles,GitHub,ElevenLabsAPI external\\n```\\n\\n## Setting Up Your Environment\\n\\n### 1. Base Docker Compose Configuration\\n\\nCreate a base Docker Compose file (`docker-compose.base.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  mcp-prompts:\\n    image: {{registry}}/mcp-prompts:latest\\n    container_name: mcp-prompts\\n    environment:\\n      - NODE_ENV=production\\n      - PORT=3000\\n      - HOST=0.0.0.0\\n      - STORAGE_TYPE=file\\n      - PROMPTS_DIR=/app/data/prompts\\n      - BACKUPS_DIR=/app/data/backups\\n      - LOG_LEVEL=info\\n    volumes:\\n      - mcp-data:/app/data\\n    ports:\\n      - \\\\\\\"3000:3000\\\\\\\"\\n    healthcheck:\\n      test: [\\\\\\\"CMD\\\\\\\", \\\\\\\"node\\\\\\\", \\\\\\\"-e\\\\\\\", \\\\\\\"require('http').request({hostname: 'localhost', port: 3000, path: '/health', timeout: 2000}, (res) => process.exit(res.statusCode !== 200)).end()\\\\\\\"]\\n      interval: 30s\\n      timeout: 10s\\n      retries: 3\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nnetworks:\\n  mcp-network:\\n    driver: bridge\\n\\nvolumes:\\n  mcp-data:\\n    name: mcp-data\\n```\\n\\n### 2. Resource Servers Configuration\\n\\nCreate an integration configuration file (`docker-compose.integration.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  # Override the base service with integration configuration\\n  mcp-prompts:\\n    environment:\\n      - MCP_INTEGRATION=true\\n      - MCP_MEMORY_URL=http://mcp-memory:3000\\n      - MCP_FILESYSTEM_URL=http://mcp-filesystem:3000\\n      - MCP_GITHUB_URL=http://mcp-github:3000\\n      - MCP_THINKING_URL=http://mcp-sequential-thinking:3000\\n      - MCP_ELEVENLABS_URL=http://mcp-elevenlabs:3000\\n    depends_on:\\n      - mcp-memory\\n      - mcp-filesystem\\n      - mcp-github\\n      - mcp-sequential-thinking\\n      - mcp-elevenlabs\\n\\n  # MCP Memory Server\\n  mcp-memory:\\n    image: node:20-alpine\\n    container_name: mcp-memory\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-memory && npx -y @modelcontextprotocol/server-memory\\\\\\\"\\n    ports:\\n      - \\\\\\\"3020:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP Filesystem Server\\n  mcp-filesystem:\\n    image: node:20-alpine\\n    container_name: mcp-filesystem\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-filesystem && npx -y @modelcontextprotocol/server-filesystem /data\\\\\\\"\\n    volumes:\\n      - mcp-filesystem-data:/data\\n    ports:\\n      - \\\\\\\"3021:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP GitHub Server\\n  mcp-github:\\n    image: node:20-alpine\\n    container_name: mcp-github\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-github && npx -y @modelcontextprotocol/server-github\\\\\\\"\\n    environment:\\n      - GITHUB_PERSONAL_ACCESS_TOKEN={{github_token}}\\n    ports:\\n      - \\\\\\\"3022:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP Sequential Thinking Server\\n  mcp-sequential-thinking:\\n    image: node:20-alpine\\n    container_name: mcp-sequential-thinking\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-sequential-thinking && npx -y @modelcontextprotocol/server-sequential-thinking\\\\\\\"\\n    ports:\\n      - \\\\\\\"3023:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP ElevenLabs Server\\n  mcp-elevenlabs:\\n    image: node:20-alpine\\n    container_name: mcp-elevenlabs\\n    command: sh -c \\\\\\\"npm install -g elevenlabs-mcp-server && npx -y elevenlabs-mcp-server\\\\\\\"\\n    environment:\\n      - ELEVENLABS_API_KEY={{elevenlabs_api_key}}\\n      - ELEVENLABS_VOICE_ID={{elevenlabs_voice_id}}\\n      - ELEVENLABS_MODEL_ID={{elevenlabs_model_id}}\\n      - ELEVENLABS_OUTPUT_DIR=/data/audio\\n    volumes:\\n      - mcp-elevenlabs-data:/data\\n    ports:\\n      - \\\\\\\"3024:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nvolumes:\\n  mcp-filesystem-data:\\n    name: mcp-filesystem-data\\n  mcp-elevenlabs-data:\\n    name: mcp-elevenlabs-data\\n```\\n\\n### 3. Storage Options\\n\\n#### File Storage (Default)\\nUses the default file storage mounted as a Docker volume.\\n\\n#### PostgreSQL Storage\\nCreate a PostgreSQL configuration file (`docker-compose.postgres.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  # Override the base service to use PostgreSQL\\n  mcp-prompts:\\n    environment:\\n      - STORAGE_TYPE=postgres\\n      - POSTGRES_HOST=postgres\\n      - POSTGRES_PORT=5432\\n      - POSTGRES_USER={{postgres_user}}\\n      - POSTGRES_PASSWORD={{postgres_password}}\\n      - POSTGRES_DATABASE={{postgres_database}}\\n    depends_on:\\n      postgres:\\n        condition: service_healthy\\n\\n  # PostgreSQL Database\\n  postgres:\\n    image: postgres:14-alpine\\n    container_name: mcp-prompts-postgres\\n    environment:\\n      - POSTGRES_USER={{postgres_user}}\\n      - POSTGRES_PASSWORD={{postgres_password}}\\n      - POSTGRES_DB={{postgres_database}}\\n    volumes:\\n      - mcp-prompts-postgres-data:/var/lib/postgresql/data\\n      - ./postgres/init:/docker-entrypoint-initdb.d\\n    ports:\\n      - \\\\\\\"5432:5432\\\\\\\"\\n    healthcheck:\\n      test: [\\\\\\\"CMD-SHELL\\\\\\\", \\\\\\\"pg_isready -U {{postgres_user}}\\\\\\\"]\\n      interval: 10s\\n      timeout: 5s\\n      retries: 5\\n      start_period: 10s\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # Adminer for database management\\n  adminer:\\n    image: adminer:latest\\n    container_name: mcp-prompts-adminer\\n    ports:\\n      - \\\\\\\"8080:8080\\\\\\\"\\n    depends_on:\\n      - postgres\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nvolumes:\\n  mcp-prompts-postgres-data:\\n    name: mcp-prompts-postgres-data\\n```\\n\\n#### PGAI/TimescaleDB (Vector Storage)\\nCreate a PGAI configuration file (`docker-compose.pgai.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  # Override the base service to use PGAI\\n  mcp-prompts:\\n    environment:\\n      - STORAGE_TYPE=pgai\\n      - PGAI_HOST=pgai\\n      - PGAI_PORT=5432\\n      - PGAI_USER=postgres\\n      - PGAI_PASSWORD=postgres\\n      - PGAI_DATABASE=mcp_prompts\\n      - PGAI_API_KEY={{pgai_api_key}}\\n      - PGAI_COLLECTION=mcp_prompts\\n    depends_on:\\n      pgai:\\n        condition: service_healthy\\n\\n  # TimescaleDB with PGAI extension\\n  pgai:\\n    image: timescale/timescaledb-pgai:pg15\\n    container_name: mcp-prompts-pgai\\n    environment:\\n      - POSTGRES_USER=postgres\\n      - POSTGRES_PASSWORD=postgres\\n      - POSTGRES_DB=mcp_prompts\\n    volumes:\\n      - mcp-prompts-pgai-data:/var/lib/postgresql/data\\n      - ./postgres/pgai-init:/docker-entrypoint-initdb.d\\n    ports:\\n      - \\\\\\\"5433:5432\\\\\\\"\\n    healthcheck:\\n      test: [\\\\\\\"CMD-SHELL\\\\\\\", \\\\\\\"pg_isready -U postgres\\\\\\\"]\\n      interval: 10s\\n      timeout: 5s\\n      retries: 5\\n      start_period: 30s\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # Adminer for PGAI database management\\n  pgai-adminer:\\n    image: adminer:latest\\n    container_name: mcp-prompts-pgai-adminer\\n    ports:\\n      - \\\\\\\"8081:8080\\\\\\\"\\n    environment:\\n      - ADMINER_DEFAULT_SERVER=pgai\\n    depends_on:\\n      - pgai\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nvolumes:\\n  mcp-prompts-pgai-data:\\n    name: mcp-prompts-pgai-data\\n```\\n\\n## Deployment and Management\\n\\n### Docker Compose Manager Script\\n\\nCreate a management script (`docker-compose-manager.sh`) for easier orchestration:\\n\\n```bash\\n#!/bin/bash\\n\\n# Colors for output\\nGREEN=\\\\\\\"\\\\\\\\033[0;32m\\\\\\\"\\nYELLOW=\\\\\\\"\\\\\\\\033[1;33m\\\\\\\"\\nBLUE=\\\\\\\"\\\\\\\\033[0;34m\\\\\\\"\\nRED=\\\\\\\"\\\\\\\\033[0;31m\\\\\\\"\\nNC=\\\\\\\"\\\\\\\\033[0m\\\\\\\" # No Color\\n\\n# Base directory for Docker Compose files\\nCOMPOSE_DIR=\\\\\\\"docker/compose\\\\\\\"\\nBASE_COMPOSE=\\\\\\\"$COMPOSE_DIR/docker-compose.base.yml\\\\\\\"\\n\\n# Display help message\\nfunction show_help {\\n  echo -e \\\\\\\"${BLUE}MCP Prompts Docker Compose Manager${NC}\\\\\\\"\\n  echo -e \\\\\\\"${YELLOW}Usage:${NC} $0 [command] [environment] [options]\\\\\\\"\\n  echo\\n  echo -e \\\\\\\"${YELLOW}Commands:${NC}\\\\\\\"\\n  echo -e \\\\\\\"  up        Start services\\\\\\\"\\n  echo -e \\\\\\\"  down      Stop services and remove containers\\\\\\\"\\n  echo -e \\\\\\\"  ps        List running services\\\\\\\"\\n  echo -e \\\\\\\"  logs      View logs\\\\\\\"\\n  echo -e \\\\\\\"  restart   Restart services\\\\\\\"\\n  echo -e \\\\\\\"  image     Build Docker images\\\\\\\"\\n  echo -e \\\\\\\"  publish   Build and publish Docker images\\\\\\\"\\n  echo\\n  echo -e \\\\\\\"${YELLOW}Environments:${NC}\\\\\\\"\\n  echo -e \\\\\\\"  base       Base MCP Prompts service\\\\\\\"\\n  echo -e \\\\\\\"  development Development environment with hot-reloading\\\\\\\"\\n  echo -e \\\\\\\"  postgres   PostgreSQL storage\\\\\\\"\\n  echo -e \\\\\\\"  pgai       PGAI TimescaleDB storage\\\\\\\"\\n  echo -e \\\\\\\"  test       Testing environment\\\\\\\"\\n  echo -e \\\\\\\"  integration Multiple MCP servers integration\\\\\\\"\\n  echo -e \\\\\\\"  sse        Server-Sent Events transport\\\\\\\"\\n  echo\\n  echo -e \\\\\\\"${YELLOW}Options:${NC}\\\\\\\"\\n  echo -e \\\\\\\"  -d, --detach     Run in detached mode\\\\\\\"\\n  echo -e \\\\\\\"  -t, --tag TAG    Specify tag for Docker images\\\\\\\"\\n  echo -e \\\\\\\"  -h, --help       Show this help message\\\\\\\"\\n}\\n\\n# Default values\\nDETACHED=\\\\\\\"\\\\\\\"\\nTAG=\\\\\\\"latest\\\\\\\"\\n\\n# Parse options\\nwhile [[ $# -gt 0 ]]; do\\n  case $1 in\\n    -h|--help)\\n      show_help\\n      exit 0\\n      ;;\\n    -d|--detach)\\n      DETACHED=\\\\\\\"-d\\\\\\\"\\n      shift\\n      ;;\\n    -t|--tag)\\n      TAG=\\\\\\\"$2\\\\\\\"\\n      shift 2\\n      ;;\\n    *)\\n      break\\n      ;;\\n  esac\\ndone\\n\\n# Check if at least command and environment are provided\\nif [ $# -lt 2 ]; then\\n  show_help\\n  exit 1\\nfi\\n\\nCOMMAND=$1\\nENV=$2\\n\\n# Validate environment\\nCOMPOSE_FILE=\\\\\\\"\\\\\\\"\\ncase $ENV in\\n  base)\\n    COMPOSE_FILE=\\\\\\\"$BASE_COMPOSE\\\\\\\"\\n    ;;\\n  development)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.development.yml\\\\\\\"\\n    ;;\\n  postgres)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.postgres.yml\\\\\\\"\\n    ;;\\n  pgai)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.pgai.yml\\\\\\\"\\n    ;;\\n  test)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.test.yml\\\\\\\"\\n    ;;\\n  integration)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.integration.yml\\\\\\\"\\n    ;;\\n  sse)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.sse.yml\\\\\\\"\\n    ;;\\n  *)\\n    echo -e \\\\\\\"${RED}Invalid environment: $ENV${NC}\\\\\\\"\\n    show_help\\n    exit 1\\n    ;;\\nesac\\n\\n# Execute the appropriate command\\ncase $COMMAND in\\n  up)\\n    echo -e \\\\\\\"${GREEN}Starting MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE up $DETACHED\\n    ;;\\n  down)\\n    echo -e \\\\\\\"${GREEN}Stopping MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE down\\n    ;;\\n  ps)\\n    echo -e \\\\\\\"${GREEN}Listing MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE ps\\n    ;;\\n  logs)\\n    echo -e \\\\\\\"${GREEN}Showing logs for MCP Prompts services in environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE logs -f\\n    ;;\\n  restart)\\n    echo -e \\\\\\\"${GREEN}Restarting MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE restart\\n    ;;\\n  image)\\n    echo -e \\\\\\\"${GREEN}Building Docker image for environment: $ENV with tag: $TAG${NC}\\\\\\\"\\n    case $ENV in\\n      base|production)\\n        docker build -t {{registry}}/mcp-prompts:$TAG -f docker/Dockerfile.prod .\\n        echo -e \\\\\\\"${GREEN}Built: {{registry}}/mcp-prompts:$TAG${NC}\\\\\\\"\\n        ;;\\n      development)\\n        docker build -t {{registry}}/mcp-prompts:$TAG-dev -f docker/Dockerfile.development .\\n        echo -e \\\\\\\"${GREEN}Built: {{registry}}/mcp-prompts:$TAG-dev${NC}\\\\\\\"\\n        ;;\\n      test)\\n        docker build -t {{registry}}/mcp-prompts:$TAG-test -f docker/Dockerfile.testing .\\n        echo -e \\\\\\\"${GREEN}Built: {{registry}}/mcp-prompts:$TAG-test${NC}\\\\\\\"\\n        ;;\\n      *)\\n        echo -e \\\\\\\"${RED}Image building not supported for environment: $ENV${NC}\\\\\\\"\\n        exit 1\\n        ;;\\n    esac\\n    ;;\\n  publish)\\n    echo -e \\\\\\\"${GREEN}Building and publishing Docker images with tag: $TAG${NC}\\\\\\\"\\n    \\n    # Build images\\n    docker build -t {{registry}}/mcp-prompts:$TAG -f docker/Dockerfile.prod .\\n    docker build -t {{registry}}/mcp-prompts:$TAG-dev -f docker/Dockerfile.development .\\n    docker build -t {{registry}}/mcp-prompts:$TAG-test -f docker/Dockerfile.testing .\\n    \\n    # Push images\\n    echo -e \\\\\\\"${GREEN}Publishing images to Docker registry${NC}\\\\\\\"\\n    docker push {{registry}}/mcp-prompts:$TAG\\n    docker push {{registry}}/mcp-prompts:$TAG-dev\\n    docker push {{registry}}/mcp-prompts:$TAG-test\\n    \\n    echo -e \\\\\\\"${GREEN}Published images:${NC}\\\\\\\"\\n    echo -e \\\\\\\"  {{registry}}/mcp-prompts:$TAG\\\\\\\"\\n    echo -e \\\\\\\"  {{registry}}/mcp-prompts:$TAG-dev\\\\\\\"\\n    echo -e \\\\\\\"  {{registry}}/mcp-prompts:$TAG-test\\\\\\\"\\n    ;;\\n  *)\\n    echo -e \\\\\\\"${RED}Invalid command: $COMMAND${NC}\\\\\\\"\\n    show_help\\n    exit 1\\n    ;;\\nesac\\n```\\n\\nMake the script executable:\\n\\n```bash\\nchmod +x docker-compose-manager.sh\\n```\\n\\n## Launching the Environment\\n\\n### 1. Start the Base Environment\\n\\n```bash\\n./docker-compose-manager.sh up base -d\\n```\\n\\n### 2. Start with MCP Integration\\n\\n```bash\\n./docker-compose-manager.sh up integration -d\\n```\\n\\n### 3. Start with PostgreSQL Storage\\n\\n```bash\\n./docker-compose-manager.sh up postgres -d\\n```\\n\\n### 4. Start with PGAI Vector Storage\\n\\n```bash\\n./docker-compose-manager.sh up pgai -d\\n```\\n\\n## Environment Configuration\\n\\n### Core Services Configuration\\n\\n1. **MCP Prompts Server Configuration**\\n   ```\\n   # Server Configuration\\n   PORT=3000\\n   HOST=0.0.0.0\\n   NODE_ENV=production\\n   LOG_LEVEL=info\\n   \\n   # Storage Configuration\\n   STORAGE_TYPE=file  # Options: file, postgres, pgai\\n   PROMPTS_DIR=/app/data/prompts\\n   BACKUPS_DIR=/app/data/backups\\n   \\n   # Integration Configuration\\n   MCP_INTEGRATION=true\\n   MCP_MEMORY_URL=http://mcp-memory:3000\\n   MCP_FILESYSTEM_URL=http://mcp-filesystem:3000\\n   MCP_GITHUB_URL=http://mcp-github:3000\\n   MCP_THINKING_URL=http://mcp-sequential-thinking:3000\\n   MCP_ELEVENLABS_URL=http://mcp-elevenlabs:3000\\n   ```\\n\\n2. **GitHub Integration**\\n   ```\\n   # GitHub API Configuration\\n   GITHUB_PERSONAL_ACCESS_TOKEN=your_token_here\\n   ```\\n\\n3. **ElevenLabs Integration**\\n   ```\\n   # ElevenLabs API Configuration\\n   ELEVENLABS_API_KEY=your_api_key_here\\n   ELEVENLABS_VOICE_ID=your_voice_id\\n   ELEVENLABS_MODEL_ID=eleven_monolingual_v1\\n   ELEVENLABS_OUTPUT_DIR=/data/audio\\n   ```\\n\\n### PostgreSQL Configuration\\n\\n```\\n# PostgreSQL Configuration\\nPOSTGRES_USER=postgres\\nPOSTGRES_PASSWORD=secure_password_here\\nPOSTGRES_DATABASE=mcp_prompts\\n```\\n\\n### PGAI/TimescaleDB Configuration\\n\\n```\\n# PGAI Configuration\\nPGAI_HOST=pgai\\nPGAI_PORT=5432\\nPGAI_USER=postgres\\nPGAI_PASSWORD=postgres\\nPGAI_DATABASE=mcp_prompts\\nPGAI_API_KEY=your_pgai_key_here\\nPGAI_COLLECTION=mcp_prompts\\n```\\n\\n## Integration Verification\\n\\n### 1. Health Check\\n\\nCheck if all services are running:\\n\\n```bash\\n./docker-compose-manager.sh ps integration\\n```\\n\\n### 2. Test MCP Prompts Server\\n\\n```bash\\ncurl http://localhost:3000/health\\n```\\n\\n### 3. Test Resource Servers\\n\\n```bash\\n# Test Memory Server\\ncurl http://localhost:3020/health\\n\\n# Test Filesystem Server\\ncurl http://localhost:3021/health\\n\\n# Test GitHub Server\\ncurl http://localhost:3022/health\\n\\n# Test Sequential Thinking Server\\ncurl http://localhost:3023/health\\n\\n# Test ElevenLabs Server\\ncurl http://localhost:3024/health\\n```\\n\\n## Troubleshooting Common Issues\\n\\n### Container Startup Issues\\n\\n1. **Container fails to start**\\n   - Check logs: `./docker-compose-manager.sh logs integration`\\n   - Verify environment variables are correctly set\\n   - Ensure ports are not already in use\\n\\n2. **Network connectivity issues**\\n   - Verify all containers are on the same network\\n   - Check Docker network configuration: `docker network inspect mcp-network`\\n\\n3. **Storage issues**\\n   - Ensure volume permissions are correctly set\\n   - Verify database initialization scripts are valid\\n\\n## Resource Management\\n\\n### Clean Up Unused Resources\\n\\n```bash\\n# Remove stopped containers\\ndocker container prune\\n\\n# Remove unused volumes\\ndocker volume prune\\n\\n# Remove unused networks\\ndocker network prune\\n\\n# Remove dangling images\\ndocker image prune\\n```\\n\\n### Data Persistence\\n\\nDocker volumes ensure your data persists across container restarts:\\n\\n```\\nvolumes:\\n  mcp-data:              # MCP Prompts data\\n  mcp-filesystem-data:   # Filesystem server data\\n  mcp-elevenlabs-data:   # Audio output data\\n  mcp-prompts-postgres-data:  # PostgreSQL data\\n  mcp-prompts-pgai-data:      # PGAI/TimescaleDB data\\n```\\n\\n## Best Practices for Production\\n\\n1. **Security Considerations**\\n   - Use environment files for secrets\\n   - Configure proper network isolation\\n   - Set up user permissions for service accounts\\n   - Enable HTTPS with proper certificates\\n\\n2. **High Availability**\\n   - Implement container restart policies\\n   - Consider Docker Swarm or Kubernetes for clustering\\n   - Set up monitoring and alerting\\n   - Establish backup and recovery procedures\\n\\n3. **Performance Optimization**\\n   - Tune PostgreSQL/PGAI for your workload\\n   - Configure appropriate resource limits\\n   - Implement caching strategies\\n   - Monitor resource usage\\n\\n## Advanced Customization\\n\\n### Adding Custom MCP Servers\\n\\n1. Create a Dockerfile for your custom server\\n2. Add the service to your Docker Compose file\\n3. Configure environment variables for integration\\n4. Update the MCP Prompts server configuration\\n\\n### Extending with Additional Services\\n\\n```yaml\\nservices:\\n  # Your custom MCP server\\n  mcp-custom:\\n    image: node:20-alpine\\n    container_name: mcp-custom\\n    command: sh -c \\\\\\\"npm install -g your-custom-mcp-server && npx -y your-custom-mcp-server\\\\\\\"\\n    environment:\\n      - CUSTOM_API_KEY={{custom_api_key}}\\n    ports:\\n      - \\\\\\\"3025:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n```\\n\\n## Next Steps\\n\\n1. Explore integration with AI clients like Claude Desktop, Zed, and LibreChat\\n2. Implement monitoring and logging solutions\\n3. Set up CI/CD pipelines for deployment\\n4. Explore advanced use cases for your specific domain\\n\\n## Additional Resources\\n\\n- [MCP Protocol Documentation](https://modelcontextprotocol.io/)\\n- [Docker Documentation](https://docs.docker.com/)\\n- [MCP Servers Repository](https://github.com/modelcontextprotocol/servers)\\n- {{additional_resources}}\\n\\nWhat specific aspect of this Docker-based MCP integration would you like me to elaborate on further?\\\",\\n  \\\"isTemplate\\\": true,\\n  \\\"variables\\\": [\\n    \\\"use_case\\\",\\n    \\\"additional_prerequisites\\\",\\n    \\\"additional_servers\\\",\\n    \\\"registry\\\",\\n    \\\"github_token\\\",\\n    \\\"elevenlabs_api_key\\\",\\n    \\\"elevenlabs_voice_id\\\",\\n    \\\"elevenlabs_model_id\\\",\\n    \\\"postgres_user\\\",\\n    \\\"postgres_password\\\",\\n    \\\"postgres_database\\\",\\n    \\\"pgai_api_key\\\",\\n    \\\"custom_api_key\\\",\\n    \\\"additional_resources\\\"\\n  ],\\n  \\\"tags\\\": [\\n    \\\"docker\\\",\\n    \\\"mcp-integration\\\",\\n    \\\"multi-server\\\",\\n    \\\"orchestration\\\",\\n    \\\"containerization\\\",\\n    \\\"devops\\\",\\n    \\\"tutorial\\\"\\n  ],\\n  \\\"createdAt\\\": \\\"2025-03-15T21:00:00.000Z\\\",\\n  \\\"updatedAt\\\": \\\"2025-03-15T21:00:00.000Z\\\",\\n  \\\"version\\\": 1,\\n  \\\"metadata\\\": {\\n    \\\"recommended_servers\\\": [\\n      \\\"@modelcontextprotocol/server-filesystem\\\",\\n      \\\"@modelcontextprotocol/server-memory\\\",\\n      \\\"@modelcontextprotocol/server-github\\\",\\n      \\\"@modelcontextprotocol/server-sequential-thinking\\\",\\n      \\\"elevenlabs-mcp-server\\\"\\n    ],\\n    \\\"example_values\\\": {\\n      \\\"use_case\\\": \\\"AI-powered code analysis and documentation\\\",\\n      \\\"additional_prerequisites\\\": \\\"Node.js 18+ for local development\\\",\\n      \\\"registry\\\": \\\"sparesparrow\\\",\\n      \\\"postgres_user\\\": \\\"postgres\\\",\\n      \\\"postgres_password\\\": \\\"secure_password_here\\\",\\n      \\\"postgres_database\\\": \\\"mcp_prompts"
}
```

--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/Docker_MCP_Servers_Orchestration_Guide.json:
--------------------------------------------------------------------------------

```json
{
  "name": "Docker MCP Servers Orchestration Guide",
  "description": "A comprehensive guide for setting up, configuring, and orchestrating multiple MCP servers in a Docker environment",
  "type": "prompt",
  "category": "other",
  "content": "# Docker MCP Servers Orchestration Guide\\n\\n## Overview\\n\\nThis guide will help you set up a containerized environment with multiple integrated MCP servers for {{use_case}}. The architecture leverages Docker Compose to orchestrate these servers, providing a robust foundation for AI-powered applications with enhanced context capabilities.\\n\\n## Prerequisites\\n\\n- Docker and Docker Compose installed\\n- Basic understanding of containerization concepts\\n- Git for cloning repositories\\n- {{additional_prerequisites}}\\n\\n## Core MCP Servers Architecture\\n\\n```mermaid\\ngraph TD\\n    subgraph \\\\\\\"Docker Compose Network\\\\\\\"\\n        subgraph \\\\\\\"Core Service\\\\\\\"\\n            MCP[MCP Prompts Server]\\n        end\\n        \\n        subgraph \\\\\\\"MCP Resource Servers\\\\\\\"\\n            FS[Filesystem Server]\\n            MEM[Memory Server]\\n            GH[GitHub Server]\\n            ST[Sequential Thinking]\\n            EL[ElevenLabs Server]\\n            {{additional_servers}}\\n        end\\n        \\n        subgraph \\\\\\\"Storage Options\\\\\\\"\\n            File[(File Storage)]\\n            PG[(PostgreSQL)]\\n            PGAI[(PGAI/TimescaleDB)]\\n        end\\n    end\\n    \\n    Client[AI Client] -->|Requests| MCP\\n    MCP -->|Resource URI Requests| FS\\n    MCP -->|Resource URI Requests| MEM\\n    MCP -->|Resource URI Requests| GH\\n    MCP -->|Resource URI Requests| ST\\n    MCP -->|Resource URI Requests| EL\\n    \\n    MCP -->|Storage| File\\n    MCP -->|Storage| PG\\n    MCP -->|Storage| PGAI\\n    \\n    FS -->|Access| LocalFiles[(Local Files)]\\n    GH -->|API Calls| GitHub[(GitHub API)]\\n    EL -->|API Calls| ElevenLabsAPI[(ElevenLabs API)]\\n    \\n    classDef core fill:#f9a,stroke:#d87,stroke-width:2px\\n    classDef server fill:#adf,stroke:#7ad,stroke-width:1px\\n    classDef storage fill:#ad8,stroke:#7a6,stroke-width:1px\\n    classDef external fill:#ddd,stroke:#999,stroke-width:1px\\n    \\n    class MCP core\\n    class FS,MEM,GH,ST,EL server\\n    class File,PG,PGAI storage\\n    class Client,LocalFiles,GitHub,ElevenLabsAPI external\\n```\\n\\n## Setting Up Your Environment\\n\\n### 1. Base Docker Compose Configuration\\n\\nCreate a base Docker Compose file (`docker-compose.base.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  mcp-prompts:\\n    image: {{registry}}/mcp-prompts:latest\\n    container_name: mcp-prompts\\n    environment:\\n      - NODE_ENV=production\\n      - PORT=3000\\n      - HOST=0.0.0.0\\n      - STORAGE_TYPE=file\\n      - PROMPTS_DIR=/app/data/prompts\\n      - BACKUPS_DIR=/app/data/backups\\n      - LOG_LEVEL=info\\n    volumes:\\n      - mcp-data:/app/data\\n    ports:\\n      - \\\\\\\"3000:3000\\\\\\\"\\n    healthcheck:\\n      test: [\\\\\\\"CMD\\\\\\\", \\\\\\\"node\\\\\\\", \\\\\\\"-e\\\\\\\", \\\\\\\"require('http').request({hostname: 'localhost', port: 3000, path: '/health', timeout: 2000}, (res) => process.exit(res.statusCode !== 200)).end()\\\\\\\"]\\n      interval: 30s\\n      timeout: 10s\\n      retries: 3\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nnetworks:\\n  mcp-network:\\n    driver: bridge\\n\\nvolumes:\\n  mcp-data:\\n    name: mcp-data\\n```\\n\\n### 2. Resource Servers Configuration\\n\\nCreate an integration configuration file (`docker-compose.integration.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  # Override the base service with integration configuration\\n  mcp-prompts:\\n    environment:\\n      - MCP_INTEGRATION=true\\n      - MCP_MEMORY_URL=http://mcp-memory:3000\\n      - MCP_FILESYSTEM_URL=http://mcp-filesystem:3000\\n      - MCP_GITHUB_URL=http://mcp-github:3000\\n      - MCP_THINKING_URL=http://mcp-sequential-thinking:3000\\n      - MCP_ELEVENLABS_URL=http://mcp-elevenlabs:3000\\n    depends_on:\\n      - mcp-memory\\n      - mcp-filesystem\\n      - mcp-github\\n      - mcp-sequential-thinking\\n      - mcp-elevenlabs\\n\\n  # MCP Memory Server\\n  mcp-memory:\\n    image: node:20-alpine\\n    container_name: mcp-memory\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-memory && npx -y @modelcontextprotocol/server-memory\\\\\\\"\\n    ports:\\n      - \\\\\\\"3020:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP Filesystem Server\\n  mcp-filesystem:\\n    image: node:20-alpine\\n    container_name: mcp-filesystem\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-filesystem && npx -y @modelcontextprotocol/server-filesystem /data\\\\\\\"\\n    volumes:\\n      - mcp-filesystem-data:/data\\n    ports:\\n      - \\\\\\\"3021:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP GitHub Server\\n  mcp-github:\\n    image: node:20-alpine\\n    container_name: mcp-github\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-github && npx -y @modelcontextprotocol/server-github\\\\\\\"\\n    environment:\\n      - GITHUB_PERSONAL_ACCESS_TOKEN={{github_token}}\\n    ports:\\n      - \\\\\\\"3022:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP Sequential Thinking Server\\n  mcp-sequential-thinking:\\n    image: node:20-alpine\\n    container_name: mcp-sequential-thinking\\n    command: sh -c \\\\\\\"npm install -g @modelcontextprotocol/server-sequential-thinking && npx -y @modelcontextprotocol/server-sequential-thinking\\\\\\\"\\n    ports:\\n      - \\\\\\\"3023:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # MCP ElevenLabs Server\\n  mcp-elevenlabs:\\n    image: node:20-alpine\\n    container_name: mcp-elevenlabs\\n    command: sh -c \\\\\\\"npm install -g elevenlabs-mcp-server && npx -y elevenlabs-mcp-server\\\\\\\"\\n    environment:\\n      - ELEVENLABS_API_KEY={{elevenlabs_api_key}}\\n      - ELEVENLABS_VOICE_ID={{elevenlabs_voice_id}}\\n      - ELEVENLABS_MODEL_ID={{elevenlabs_model_id}}\\n      - ELEVENLABS_OUTPUT_DIR=/data/audio\\n    volumes:\\n      - mcp-elevenlabs-data:/data\\n    ports:\\n      - \\\\\\\"3024:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nvolumes:\\n  mcp-filesystem-data:\\n    name: mcp-filesystem-data\\n  mcp-elevenlabs-data:\\n    name: mcp-elevenlabs-data\\n```\\n\\n### 3. Storage Options\\n\\n#### File Storage (Default)\\nUses the default file storage mounted as a Docker volume.\\n\\n#### PostgreSQL Storage\\nCreate a PostgreSQL configuration file (`docker-compose.postgres.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  # Override the base service to use PostgreSQL\\n  mcp-prompts:\\n    environment:\\n      - STORAGE_TYPE=postgres\\n      - POSTGRES_HOST=postgres\\n      - POSTGRES_PORT=5432\\n      - POSTGRES_USER={{postgres_user}}\\n      - POSTGRES_PASSWORD={{postgres_password}}\\n      - POSTGRES_DATABASE={{postgres_database}}\\n    depends_on:\\n      postgres:\\n        condition: service_healthy\\n\\n  # PostgreSQL Database\\n  postgres:\\n    image: postgres:14-alpine\\n    container_name: mcp-prompts-postgres\\n    environment:\\n      - POSTGRES_USER={{postgres_user}}\\n      - POSTGRES_PASSWORD={{postgres_password}}\\n      - POSTGRES_DB={{postgres_database}}\\n    volumes:\\n      - mcp-prompts-postgres-data:/var/lib/postgresql/data\\n      - ./postgres/init:/docker-entrypoint-initdb.d\\n    ports:\\n      - \\\\\\\"5432:5432\\\\\\\"\\n    healthcheck:\\n      test: [\\\\\\\"CMD-SHELL\\\\\\\", \\\\\\\"pg_isready -U {{postgres_user}}\\\\\\\"]\\n      interval: 10s\\n      timeout: 5s\\n      retries: 5\\n      start_period: 10s\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # Adminer for database management\\n  adminer:\\n    image: adminer:latest\\n    container_name: mcp-prompts-adminer\\n    ports:\\n      - \\\\\\\"8080:8080\\\\\\\"\\n    depends_on:\\n      - postgres\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nvolumes:\\n  mcp-prompts-postgres-data:\\n    name: mcp-prompts-postgres-data\\n```\\n\\n#### PGAI/TimescaleDB (Vector Storage)\\nCreate a PGAI configuration file (`docker-compose.pgai.yml`):\\n\\n```yaml\\nversion: '3'\\n\\nservices:\\n  # Override the base service to use PGAI\\n  mcp-prompts:\\n    environment:\\n      - STORAGE_TYPE=pgai\\n      - PGAI_HOST=pgai\\n      - PGAI_PORT=5432\\n      - PGAI_USER=postgres\\n      - PGAI_PASSWORD=postgres\\n      - PGAI_DATABASE=mcp_prompts\\n      - PGAI_API_KEY={{pgai_api_key}}\\n      - PGAI_COLLECTION=mcp_prompts\\n    depends_on:\\n      pgai:\\n        condition: service_healthy\\n\\n  # TimescaleDB with PGAI extension\\n  pgai:\\n    image: timescale/timescaledb-pgai:pg15\\n    container_name: mcp-prompts-pgai\\n    environment:\\n      - POSTGRES_USER=postgres\\n      - POSTGRES_PASSWORD=postgres\\n      - POSTGRES_DB=mcp_prompts\\n    volumes:\\n      - mcp-prompts-pgai-data:/var/lib/postgresql/data\\n      - ./postgres/pgai-init:/docker-entrypoint-initdb.d\\n    ports:\\n      - \\\\\\\"5433:5432\\\\\\\"\\n    healthcheck:\\n      test: [\\\\\\\"CMD-SHELL\\\\\\\", \\\\\\\"pg_isready -U postgres\\\\\\\"]\\n      interval: 10s\\n      timeout: 5s\\n      retries: 5\\n      start_period: 30s\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\n  # Adminer for PGAI database management\\n  pgai-adminer:\\n    image: adminer:latest\\n    container_name: mcp-prompts-pgai-adminer\\n    ports:\\n      - \\\\\\\"8081:8080\\\\\\\"\\n    environment:\\n      - ADMINER_DEFAULT_SERVER=pgai\\n    depends_on:\\n      - pgai\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n\\nvolumes:\\n  mcp-prompts-pgai-data:\\n    name: mcp-prompts-pgai-data\\n```\\n\\n## Deployment and Management\\n\\n### Docker Compose Manager Script\\n\\nCreate a management script (`docker-compose-manager.sh`) for easier orchestration:\\n\\n```bash\\n#!/bin/bash\\n\\n# Colors for output\\nGREEN=\\\\\\\"\\\\\\\\033[0;32m\\\\\\\"\\nYELLOW=\\\\\\\"\\\\\\\\033[1;33m\\\\\\\"\\nBLUE=\\\\\\\"\\\\\\\\033[0;34m\\\\\\\"\\nRED=\\\\\\\"\\\\\\\\033[0;31m\\\\\\\"\\nNC=\\\\\\\"\\\\\\\\033[0m\\\\\\\" # No Color\\n\\n# Base directory for Docker Compose files\\nCOMPOSE_DIR=\\\\\\\"docker/compose\\\\\\\"\\nBASE_COMPOSE=\\\\\\\"$COMPOSE_DIR/docker-compose.base.yml\\\\\\\"\\n\\n# Display help message\\nfunction show_help {\\n  echo -e \\\\\\\"${BLUE}MCP Prompts Docker Compose Manager${NC}\\\\\\\"\\n  echo -e \\\\\\\"${YELLOW}Usage:${NC} $0 [command] [environment] [options]\\\\\\\"\\n  echo\\n  echo -e \\\\\\\"${YELLOW}Commands:${NC}\\\\\\\"\\n  echo -e \\\\\\\"  up        Start services\\\\\\\"\\n  echo -e \\\\\\\"  down      Stop services and remove containers\\\\\\\"\\n  echo -e \\\\\\\"  ps        List running services\\\\\\\"\\n  echo -e \\\\\\\"  logs      View logs\\\\\\\"\\n  echo -e \\\\\\\"  restart   Restart services\\\\\\\"\\n  echo -e \\\\\\\"  image     Build Docker images\\\\\\\"\\n  echo -e \\\\\\\"  publish   Build and publish Docker images\\\\\\\"\\n  echo\\n  echo -e \\\\\\\"${YELLOW}Environments:${NC}\\\\\\\"\\n  echo -e \\\\\\\"  base       Base MCP Prompts service\\\\\\\"\\n  echo -e \\\\\\\"  development Development environment with hot-reloading\\\\\\\"\\n  echo -e \\\\\\\"  postgres   PostgreSQL storage\\\\\\\"\\n  echo -e \\\\\\\"  pgai       PGAI TimescaleDB storage\\\\\\\"\\n  echo -e \\\\\\\"  test       Testing environment\\\\\\\"\\n  echo -e \\\\\\\"  integration Multiple MCP servers integration\\\\\\\"\\n  echo -e \\\\\\\"  sse        Server-Sent Events transport\\\\\\\"\\n  echo\\n  echo -e \\\\\\\"${YELLOW}Options:${NC}\\\\\\\"\\n  echo -e \\\\\\\"  -d, --detach     Run in detached mode\\\\\\\"\\n  echo -e \\\\\\\"  -t, --tag TAG    Specify tag for Docker images\\\\\\\"\\n  echo -e \\\\\\\"  -h, --help       Show this help message\\\\\\\"\\n}\\n\\n# Default values\\nDETACHED=\\\\\\\"\\\\\\\"\\nTAG=\\\\\\\"latest\\\\\\\"\\n\\n# Parse options\\nwhile [[ $# -gt 0 ]]; do\\n  case $1 in\\n    -h|--help)\\n      show_help\\n      exit 0\\n      ;;\\n    -d|--detach)\\n      DETACHED=\\\\\\\"-d\\\\\\\"\\n      shift\\n      ;;\\n    -t|--tag)\\n      TAG=\\\\\\\"$2\\\\\\\"\\n      shift 2\\n      ;;\\n    *)\\n      break\\n      ;;\\n  esac\\ndone\\n\\n# Check if at least command and environment are provided\\nif [ $# -lt 2 ]; then\\n  show_help\\n  exit 1\\nfi\\n\\nCOMMAND=$1\\nENV=$2\\n\\n# Validate environment\\nCOMPOSE_FILE=\\\\\\\"\\\\\\\"\\ncase $ENV in\\n  base)\\n    COMPOSE_FILE=\\\\\\\"$BASE_COMPOSE\\\\\\\"\\n    ;;\\n  development)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.development.yml\\\\\\\"\\n    ;;\\n  postgres)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.postgres.yml\\\\\\\"\\n    ;;\\n  pgai)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.pgai.yml\\\\\\\"\\n    ;;\\n  test)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.test.yml\\\\\\\"\\n    ;;\\n  integration)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.integration.yml\\\\\\\"\\n    ;;\\n  sse)\\n    COMPOSE_FILE=\\\\\\\"-f $BASE_COMPOSE -f $COMPOSE_DIR/docker-compose.sse.yml\\\\\\\"\\n    ;;\\n  *)\\n    echo -e \\\\\\\"${RED}Invalid environment: $ENV${NC}\\\\\\\"\\n    show_help\\n    exit 1\\n    ;;\\nesac\\n\\n# Execute the appropriate command\\ncase $COMMAND in\\n  up)\\n    echo -e \\\\\\\"${GREEN}Starting MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE up $DETACHED\\n    ;;\\n  down)\\n    echo -e \\\\\\\"${GREEN}Stopping MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE down\\n    ;;\\n  ps)\\n    echo -e \\\\\\\"${GREEN}Listing MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE ps\\n    ;;\\n  logs)\\n    echo -e \\\\\\\"${GREEN}Showing logs for MCP Prompts services in environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE logs -f\\n    ;;\\n  restart)\\n    echo -e \\\\\\\"${GREEN}Restarting MCP Prompts services for environment: $ENV${NC}\\\\\\\"\\n    docker compose $COMPOSE_FILE restart\\n    ;;\\n  image)\\n    echo -e \\\\\\\"${GREEN}Building Docker image for environment: $ENV with tag: $TAG${NC}\\\\\\\"\\n    case $ENV in\\n      base|production)\\n        docker build -t {{registry}}/mcp-prompts:$TAG -f docker/Dockerfile.prod .\\n        echo -e \\\\\\\"${GREEN}Built: {{registry}}/mcp-prompts:$TAG${NC}\\\\\\\"\\n        ;;\\n      development)\\n        docker build -t {{registry}}/mcp-prompts:$TAG-dev -f docker/Dockerfile.development .\\n        echo -e \\\\\\\"${GREEN}Built: {{registry}}/mcp-prompts:$TAG-dev${NC}\\\\\\\"\\n        ;;\\n      test)\\n        docker build -t {{registry}}/mcp-prompts:$TAG-test -f docker/Dockerfile.testing .\\n        echo -e \\\\\\\"${GREEN}Built: {{registry}}/mcp-prompts:$TAG-test${NC}\\\\\\\"\\n        ;;\\n      *)\\n        echo -e \\\\\\\"${RED}Image building not supported for environment: $ENV${NC}\\\\\\\"\\n        exit 1\\n        ;;\\n    esac\\n    ;;\\n  publish)\\n    echo -e \\\\\\\"${GREEN}Building and publishing Docker images with tag: $TAG${NC}\\\\\\\"\\n    \\n    # Build images\\n    docker build -t {{registry}}/mcp-prompts:$TAG -f docker/Dockerfile.prod .\\n    docker build -t {{registry}}/mcp-prompts:$TAG-dev -f docker/Dockerfile.development .\\n    docker build -t {{registry}}/mcp-prompts:$TAG-test -f docker/Dockerfile.testing .\\n    \\n    # Push images\\n    echo -e \\\\\\\"${GREEN}Publishing images to Docker registry${NC}\\\\\\\"\\n    docker push {{registry}}/mcp-prompts:$TAG\\n    docker push {{registry}}/mcp-prompts:$TAG-dev\\n    docker push {{registry}}/mcp-prompts:$TAG-test\\n    \\n    echo -e \\\\\\\"${GREEN}Published images:${NC}\\\\\\\"\\n    echo -e \\\\\\\"  {{registry}}/mcp-prompts:$TAG\\\\\\\"\\n    echo -e \\\\\\\"  {{registry}}/mcp-prompts:$TAG-dev\\\\\\\"\\n    echo -e \\\\\\\"  {{registry}}/mcp-prompts:$TAG-test\\\\\\\"\\n    ;;\\n  *)\\n    echo -e \\\\\\\"${RED}Invalid command: $COMMAND${NC}\\\\\\\"\\n    show_help\\n    exit 1\\n    ;;\\nesac\\n```\\n\\nMake the script executable:\\n\\n```bash\\nchmod +x docker-compose-manager.sh\\n```\\n\\n## Launching the Environment\\n\\n### 1. Start the Base Environment\\n\\n```bash\\n./docker-compose-manager.sh up base -d\\n```\\n\\n### 2. Start with MCP Integration\\n\\n```bash\\n./docker-compose-manager.sh up integration -d\\n```\\n\\n### 3. Start with PostgreSQL Storage\\n\\n```bash\\n./docker-compose-manager.sh up postgres -d\\n```\\n\\n### 4. Start with PGAI Vector Storage\\n\\n```bash\\n./docker-compose-manager.sh up pgai -d\\n```\\n\\n## Environment Configuration\\n\\n### Core Services Configuration\\n\\n1. **MCP Prompts Server Configuration**\\n   ```\\n   # Server Configuration\\n   PORT=3000\\n   HOST=0.0.0.0\\n   NODE_ENV=production\\n   LOG_LEVEL=info\\n   \\n   # Storage Configuration\\n   STORAGE_TYPE=file  # Options: file, postgres, pgai\\n   PROMPTS_DIR=/app/data/prompts\\n   BACKUPS_DIR=/app/data/backups\\n   \\n   # Integration Configuration\\n   MCP_INTEGRATION=true\\n   MCP_MEMORY_URL=http://mcp-memory:3000\\n   MCP_FILESYSTEM_URL=http://mcp-filesystem:3000\\n   MCP_GITHUB_URL=http://mcp-github:3000\\n   MCP_THINKING_URL=http://mcp-sequential-thinking:3000\\n   MCP_ELEVENLABS_URL=http://mcp-elevenlabs:3000\\n   ```\\n\\n2. **GitHub Integration**\\n   ```\\n   # GitHub API Configuration\\n   GITHUB_PERSONAL_ACCESS_TOKEN=your_token_here\\n   ```\\n\\n3. **ElevenLabs Integration**\\n   ```\\n   # ElevenLabs API Configuration\\n   ELEVENLABS_API_KEY=your_api_key_here\\n   ELEVENLABS_VOICE_ID=your_voice_id\\n   ELEVENLABS_MODEL_ID=eleven_monolingual_v1\\n   ELEVENLABS_OUTPUT_DIR=/data/audio\\n   ```\\n\\n### PostgreSQL Configuration\\n\\n```\\n# PostgreSQL Configuration\\nPOSTGRES_USER=postgres\\nPOSTGRES_PASSWORD=secure_password_here\\nPOSTGRES_DATABASE=mcp_prompts\\n```\\n\\n### PGAI/TimescaleDB Configuration\\n\\n```\\n# PGAI Configuration\\nPGAI_HOST=pgai\\nPGAI_PORT=5432\\nPGAI_USER=postgres\\nPGAI_PASSWORD=postgres\\nPGAI_DATABASE=mcp_prompts\\nPGAI_API_KEY=your_pgai_key_here\\nPGAI_COLLECTION=mcp_prompts\\n```\\n\\n## Integration Verification\\n\\n### 1. Health Check\\n\\nCheck if all services are running:\\n\\n```bash\\n./docker-compose-manager.sh ps integration\\n```\\n\\n### 2. Test MCP Prompts Server\\n\\n```bash\\ncurl http://localhost:3000/health\\n```\\n\\n### 3. Test Resource Servers\\n\\n```bash\\n# Test Memory Server\\ncurl http://localhost:3020/health\\n\\n# Test Filesystem Server\\ncurl http://localhost:3021/health\\n\\n# Test GitHub Server\\ncurl http://localhost:3022/health\\n\\n# Test Sequential Thinking Server\\ncurl http://localhost:3023/health\\n\\n# Test ElevenLabs Server\\ncurl http://localhost:3024/health\\n```\\n\\n## Troubleshooting Common Issues\\n\\n### Container Startup Issues\\n\\n1. **Container fails to start**\\n   - Check logs: `./docker-compose-manager.sh logs integration`\\n   - Verify environment variables are correctly set\\n   - Ensure ports are not already in use\\n\\n2. **Network connectivity issues**\\n   - Verify all containers are on the same network\\n   - Check Docker network configuration: `docker network inspect mcp-network`\\n\\n3. **Storage issues**\\n   - Ensure volume permissions are correctly set\\n   - Verify database initialization scripts are valid\\n\\n## Resource Management\\n\\n### Clean Up Unused Resources\\n\\n```bash\\n# Remove stopped containers\\ndocker container prune\\n\\n# Remove unused volumes\\ndocker volume prune\\n\\n# Remove unused networks\\ndocker network prune\\n\\n# Remove dangling images\\ndocker image prune\\n```\\n\\n### Data Persistence\\n\\nDocker volumes ensure your data persists across container restarts:\\n\\n```\\nvolumes:\\n  mcp-data:              # MCP Prompts data\\n  mcp-filesystem-data:   # Filesystem server data\\n  mcp-elevenlabs-data:   # Audio output data\\n  mcp-prompts-postgres-data:  # PostgreSQL data\\n  mcp-prompts-pgai-data:      # PGAI/TimescaleDB data\\n```\\n\\n## Best Practices for Production\\n\\n1. **Security Considerations**\\n   - Use environment files for secrets\\n   - Configure proper network isolation\\n   - Set up user permissions for service accounts\\n   - Enable HTTPS with proper certificates\\n\\n2. **High Availability**\\n   - Implement container restart policies\\n   - Consider Docker Swarm or Kubernetes for clustering\\n   - Set up monitoring and alerting\\n   - Establish backup and recovery procedures\\n\\n3. **Performance Optimization**\\n   - Tune PostgreSQL/PGAI for your workload\\n   - Configure appropriate resource limits\\n   - Implement caching strategies\\n   - Monitor resource usage\\n\\n## Advanced Customization\\n\\n### Adding Custom MCP Servers\\n\\n1. Create a Dockerfile for your custom server\\n2. Add the service to your Docker Compose file\\n3. Configure environment variables for integration\\n4. Update the MCP Prompts server configuration\\n\\n### Extending with Additional Services\\n\\n```yaml\\nservices:\\n  # Your custom MCP server\\n  mcp-custom:\\n    image: node:20-alpine\\n    container_name: mcp-custom\\n    command: sh -c \\\\\\\"npm install -g your-custom-mcp-server && npx -y your-custom-mcp-server\\\\\\\"\\n    environment:\\n      - CUSTOM_API_KEY={{custom_api_key}}\\n    ports:\\n      - \\\\\\\"3025:3000\\\\\\\"\\n    restart: unless-stopped\\n    networks:\\n      - mcp-network\\n```\\n\\n## Next Steps\\n\\n1. Explore integration with AI clients like Claude Desktop, Zed, and LibreChat\\n2. Implement monitoring and logging solutions\\n3. Set up CI/CD pipelines for deployment\\n4. Explore advanced use cases for your specific domain\\n\\n## Additional Resources\\n\\n- [MCP Protocol Documentation](https://modelcontextprotocol.io/)\\n- [Docker Documentation](https://docs.docker.com/)\\n- [MCP Servers Repository](https://github.com/modelcontextprotocol/servers)\\n- {{additional_resources}}\\n\\nWhat specific aspect of this Docker-based MCP integration would you like me to elaborate on further?\\\",\\n  \\\"isTemplate\\\": true,\\n  \\\"variables\\\": [\\n    \\\"use_case\\\",\\n    \\\"additional_prerequisites\\\",\\n    \\\"additional_servers\\\",\\n    \\\"registry\\\",\\n    \\\"github_token\\\",\\n    \\\"elevenlabs_api_key\\\",\\n    \\\"elevenlabs_voice_id\\\",\\n    \\\"elevenlabs_model_id\\\",\\n    \\\"postgres_user\\\",\\n    \\\"postgres_password\\\",\\n    \\\"postgres_database\\\",\\n    \\\"pgai_api_key\\\",\\n    \\\"custom_api_key\\\",\\n    \\\"additional_resources\\\"\\n  ],\\n  \\\"tags\\\": [\\n    \\\"docker\\\",\\n    \\\"mcp-integration\\\",\\n    \\\"multi-server\\\",\\n    \\\"orchestration\\\",\\n    \\\"containerization\\\",\\n    \\\"devops\\\",\\n    \\\"tutorial\\\"\\n  ],\\n  \\\"createdAt\\\": \\\"2025-03-15T21:00:00.000Z\\\",\\n  \\\"updatedAt\\\": \\\"2025-03-15T21:00:00.000Z\\\",\\n  \\\"version\\\": 1,\\n  \\\"metadata\\\": {\\n    \\\"recommended_servers\\\": [\\n      \\\"@modelcontextprotocol/server-filesystem\\\",\\n      \\\"@modelcontextprotocol/server-memory\\\",\\n      \\\"@modelcontextprotocol/server-github\\\",\\n      \\\"@modelcontextprotocol/server-sequential-thinking\\\",\\n      \\\"elevenlabs-mcp-server\\\"\\n    ],\\n    \\\"example_values\\\": {\\n      \\\"use_case\\\": \\\"AI-powered code analysis and documentation\\\",\\n      \\\"additional_prerequisites\\\": \\\"Node.js 18+ for local development\\\",\\n      \\\"registry\\\": \\\"sparesparrow\\\",\\n      \\\"postgres_user\\\": \\\"postgres\\\",\\n      \\\"postgres_password\\\": \\\"secure_password_here\\\",\\n      \\\"postgres_database\\\": \\\"mcp_prompts",
  "variables": {},
  "metadata": {
    "source": "/home/sparrow/projects/mcp-prompts/prompts/docker-mcp-servers-orchestration.json",
    "imported": true
  }
}
```
Page 14/21FirstPrevNextLast