#
tokens: 49833/50000 9/1179 files (page 15/24)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 15 of 24. Use http://codebase.md/sparesparrow/mcp-project-orchestrator?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .cursorrules
├── .env.example
├── .github
│   └── workflows
│       ├── build.yml
│       ├── ci-cd.yml
│       ├── ci.yml
│       ├── deploy.yml
│       ├── ecosystem-monitor.yml
│       ├── fan-out-orchestrator.yml
│       └── release.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AUTOMOTIVE_CAMERA_SYSTEM_SUMMARY.md
├── automotive-camera-system
│   ├── docs
│   │   └── IMPLEMENTACE_CS.md
│   └── README.md
├── AWS_MCP_IMPLEMENTATION_SUMMARY.md
├── AWS_MCP_QUICKSTART.md
├── AWS_SIP_TRUNK_DEPLOYMENT_COMPLETE.md
├── aws-sip-trunk
│   ├── .gitignore
│   ├── config
│   │   ├── extensions.conf.j2
│   │   └── pjsip.conf.j2
│   ├── DEPLOYMENT_SUMMARY.md
│   ├── docs
│   │   ├── DEPLOYMENT.md
│   │   └── TROUBLESHOOTING.md
│   ├── PROJECT_INDEX.md
│   ├── pyproject.toml
│   ├── QUICKSTART.md
│   ├── README.md
│   ├── scripts
│   │   ├── deploy-asterisk-aws.sh
│   │   └── user-data.sh
│   ├── terraform
│   │   ├── ec2.tf
│   │   ├── main.tf
│   │   ├── monitoring.tf
│   │   ├── networking.tf
│   │   ├── outputs.tf
│   │   ├── storage.tf
│   │   ├── terraform.tfvars.example
│   │   └── variables.tf
│   ├── tests
│   │   └── test_sip_connectivity.py
│   └── VERIFICATION_CHECKLIST.md
├── CLAUDE.md
├── component_templates.json
├── conanfile.py
├── config
│   ├── default.json
│   └── project_orchestration.json
├── Containerfile
├── cursor-templates
│   └── openssl
│       ├── linux-dev.mdc.jinja2
│       └── shared.mdc.jinja2
├── data
│   └── prompts
│       └── templates
│           ├── advanced-multi-server-template.json
│           ├── analysis-assistant.json
│           ├── analyze-mermaid-diagram.json
│           ├── architecture-design-assistant.json
│           ├── code-diagram-documentation-creator.json
│           ├── code-refactoring-assistant.json
│           ├── code-review-assistant.json
│           ├── collaborative-development.json
│           ├── consolidated-interfaces-template.json
│           ├── could-you-interpret-the-assumed-applicat.json
│           ├── data-analysis-template.json
│           ├── database-query-assistant.json
│           ├── debugging-assistant.json
│           ├── development-system-prompt-zcna0.json
│           ├── development-system-prompt.json
│           ├── development-workflow.json
│           ├── docker-compose-prompt-combiner.json
│           ├── docker-containerization-guide.json
│           ├── docker-mcp-servers-orchestration.json
│           ├── foresight-assistant.json
│           ├── generate-different-types-of-questions-ab.json
│           ├── generate-mermaid-diagram.json
│           ├── image-1-describe-the-icon-in-one-sen.json
│           ├── initialize-project-setup-for-a-new-micro.json
│           ├── install-dependencies-build-run-test.json
│           ├── mcp-code-generator.json
│           ├── mcp-integration-assistant.json
│           ├── mcp-resources-explorer.json
│           ├── mcp-resources-integration.json
│           ├── mcp-server-configurator.json
│           ├── mcp-server-dev-prompt-combiner.json
│           ├── mcp-server-integration-template.json
│           ├── mcp-template-system.json
│           ├── mermaid-analysis-expert.json
│           ├── mermaid-class-diagram-generator.json
│           ├── mermaid-diagram-generator.json
│           ├── mermaid-diagram-modifier.json
│           ├── modify-mermaid-diagram.json
│           ├── monorepo-migration-guide.json
│           ├── multi-resource-context.json
│           ├── project-analysis-assistant.json
│           ├── prompt-combiner-interface.json
│           ├── prompt-templates.json
│           ├── repository-explorer.json
│           ├── research-assistant.json
│           ├── sequential-data-analysis.json
│           ├── solid-code-analysis-visualizer.json
│           ├── task-list-helper-8ithy.json
│           ├── template-based-mcp-integration.json
│           ├── templates.json
│           ├── test-prompt.json
│           └── you-are-limited-to-respond-yes-or-no-onl.json
├── docs
│   ├── AWS_MCP.md
│   ├── AWS.md
│   ├── CONAN.md
│   └── integration.md
├── elevenlabs-agents
│   ├── agent-prompts.json
│   └── README.md
├── IMPLEMENTATION_STATUS.md
├── integration_plan.md
├── LICENSE
├── MANIFEST.in
├── mcp-project-orchestrator
│   └── openssl
│       ├── .github
│       │   └── workflows
│       │       └── validate-cursor-config.yml
│       ├── conanfile.py
│       ├── CURSOR_DEPLOYMENT_POLISH.md
│       ├── cursor-rules
│       │   ├── mcp.json.jinja2
│       │   ├── prompts
│       │   │   ├── fips-compliance.md.jinja2
│       │   │   ├── openssl-coding-standards.md.jinja2
│       │   │   └── pr-review.md.jinja2
│       │   └── rules
│       │       ├── ci-linux.mdc.jinja2
│       │       ├── linux-dev.mdc.jinja2
│       │       ├── macos-dev.mdc.jinja2
│       │       ├── shared.mdc.jinja2
│       │       └── windows-dev.mdc.jinja2
│       ├── docs
│       │   └── cursor-configuration-management.md
│       ├── examples
│       │   └── example-workspace
│       │       ├── .cursor
│       │       │   ├── mcp.json
│       │       │   └── rules
│       │       │       ├── linux-dev.mdc
│       │       │       └── shared.mdc
│       │       ├── .gitignore
│       │       ├── CMakeLists.txt
│       │       ├── conanfile.py
│       │       ├── profiles
│       │       │   ├── linux-gcc-debug.profile
│       │       │   └── linux-gcc-release.profile
│       │       ├── README.md
│       │       └── src
│       │           ├── crypto_utils.cpp
│       │           ├── crypto_utils.h
│       │           └── main.cpp
│       ├── IMPLEMENTATION_SUMMARY.md
│       ├── mcp_orchestrator
│       │   ├── __init__.py
│       │   ├── cli.py
│       │   ├── conan_integration.py
│       │   ├── cursor_config.py
│       │   ├── cursor_deployer.py
│       │   ├── deploy_cursor.py
│       │   ├── env_config.py
│       │   ├── platform_detector.py
│       │   └── yaml_validator.py
│       ├── openssl-cursor-example-workspace-20251014_121133.zip
│       ├── pyproject.toml
│       ├── README.md
│       ├── requirements.txt
│       ├── scripts
│       │   └── create_example_workspace.py
│       ├── setup.py
│       ├── test_deployment.py
│       └── tests
│           ├── __init__.py
│           ├── test_cursor_deployer.py
│           └── test_template_validation.py
├── printcast-agent
│   ├── .env.example
│   ├── config
│   │   └── asterisk
│   │       └── extensions.conf
│   ├── Containerfile
│   ├── docker-compose.yml
│   ├── pyproject.toml
│   ├── README.md
│   ├── scripts
│   │   └── docker-entrypoint.sh
│   ├── src
│   │   ├── integrations
│   │   │   ├── __init__.py
│   │   │   ├── asterisk.py
│   │   │   ├── content.py
│   │   │   ├── delivery.py
│   │   │   ├── elevenlabs.py
│   │   │   └── printing.py
│   │   ├── mcp_server
│   │   │   ├── __init__.py
│   │   │   ├── main.py
│   │   │   └── server.py
│   │   └── orchestration
│   │       ├── __init__.py
│   │       └── workflow.py
│   └── tests
│       └── test_mcp_server.py
├── project_orchestration.json
├── project_templates.json
├── pyproject.toml
├── README.md
├── REFACTORING_COMPLETED.md
├── REFACTORING_RECOMMENDATIONS.md
├── requirements.txt
├── scripts
│   ├── archive
│   │   ├── init_claude_test.sh
│   │   ├── init_postgres.sh
│   │   ├── start_mcp_servers.sh
│   │   └── test_claude_desktop.sh
│   ├── consolidate_mermaid.py
│   ├── consolidate_prompts.py
│   ├── consolidate_resources.py
│   ├── consolidate_templates.py
│   ├── INSTRUCTIONS.md
│   ├── README.md
│   ├── setup_aws_mcp.sh
│   ├── setup_mcp.sh
│   ├── setup_orchestrator.sh
│   ├── setup_project.py
│   └── test_mcp.sh
├── src
│   └── mcp_project_orchestrator
│       ├── __init__.py
│       ├── __main__.py
│       ├── aws_mcp.py
│       ├── cli
│       │   └── __init__.py
│       ├── cli.py
│       ├── commands
│       │   └── openssl_cli.py
│       ├── core
│       │   ├── __init__.py
│       │   ├── base.py
│       │   ├── config.py
│       │   ├── exceptions.py
│       │   ├── fastmcp.py
│       │   ├── logging.py
│       │   └── managers.py
│       ├── cursor_deployer.py
│       ├── ecosystem_monitor.py
│       ├── fan_out_orchestrator.py
│       ├── fastmcp.py
│       ├── mcp-py
│       │   ├── AggregateVersions.py
│       │   ├── CustomBashTool.py
│       │   ├── FileAnnotator.py
│       │   ├── mcp-client.py
│       │   ├── mcp-server.py
│       │   ├── MermaidDiagramGenerator.py
│       │   ├── NamingAgent.py
│       │   └── solid-analyzer-agent.py
│       ├── mermaid
│       │   ├── __init__.py
│       │   ├── generator.py
│       │   ├── mermaid_orchestrator.py
│       │   ├── renderer.py
│       │   ├── templates
│       │   │   ├── AbstractFactory-diagram.json
│       │   │   ├── Adapter-diagram.json
│       │   │   ├── Analyze_Mermaid_Diagram.json
│       │   │   ├── Builder-diagram.json
│       │   │   ├── Chain-diagram.json
│       │   │   ├── Code_Diagram_Documentation_Creator.json
│       │   │   ├── Command-diagram.json
│       │   │   ├── Decorator-diagram.json
│       │   │   ├── Facade-diagram.json
│       │   │   ├── Factory-diagram.json
│       │   │   ├── flowchart
│       │   │   │   ├── AbstractFactory-diagram.json
│       │   │   │   ├── Adapter-diagram.json
│       │   │   │   ├── Analyze_Mermaid_Diagram.json
│       │   │   │   ├── Builder-diagram.json
│       │   │   │   ├── Chain-diagram.json
│       │   │   │   ├── Code_Diagram_Documentation_Creator.json
│       │   │   │   ├── Command-diagram.json
│       │   │   │   ├── Decorator-diagram.json
│       │   │   │   ├── Facade-diagram.json
│       │   │   │   ├── Factory-diagram.json
│       │   │   │   ├── Generate_Mermaid_Diagram.json
│       │   │   │   ├── generated_diagram.json
│       │   │   │   ├── integration.json
│       │   │   │   ├── Iterator-diagram.json
│       │   │   │   ├── Mediator-diagram.json
│       │   │   │   ├── Memento-diagram.json
│       │   │   │   ├── Mermaid_Analysis_Expert.json
│       │   │   │   ├── Mermaid_Class_Diagram_Generator.json
│       │   │   │   ├── Mermaid_Diagram_Generator.json
│       │   │   │   ├── Mermaid_Diagram_Modifier.json
│       │   │   │   ├── Modify_Mermaid_Diagram.json
│       │   │   │   ├── Observer-diagram.json
│       │   │   │   ├── Prototype-diagram.json
│       │   │   │   ├── Proxy-diagram.json
│       │   │   │   ├── README.json
│       │   │   │   ├── Singleton-diagram.json
│       │   │   │   ├── State-diagram.json
│       │   │   │   ├── Strategy-diagram.json
│       │   │   │   ├── TemplateMethod-diagram.json
│       │   │   │   ├── theme_dark.json
│       │   │   │   ├── theme_default.json
│       │   │   │   ├── theme_pastel.json
│       │   │   │   ├── theme_vibrant.json
│       │   │   │   └── Visitor-diagram.json
│       │   │   ├── Generate_Mermaid_Diagram.json
│       │   │   ├── generated_diagram.json
│       │   │   ├── index.json
│       │   │   ├── integration.json
│       │   │   ├── Iterator-diagram.json
│       │   │   ├── Mediator-diagram.json
│       │   │   ├── Memento-diagram.json
│       │   │   ├── Mermaid_Analysis_Expert.json
│       │   │   ├── Mermaid_Class_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Modifier.json
│       │   │   ├── Modify_Mermaid_Diagram.json
│       │   │   ├── Observer-diagram.json
│       │   │   ├── Prototype-diagram.json
│       │   │   ├── Proxy-diagram.json
│       │   │   ├── README.json
│       │   │   ├── Singleton-diagram.json
│       │   │   ├── State-diagram.json
│       │   │   ├── Strategy-diagram.json
│       │   │   ├── TemplateMethod-diagram.json
│       │   │   ├── theme_dark.json
│       │   │   ├── theme_default.json
│       │   │   ├── theme_pastel.json
│       │   │   ├── theme_vibrant.json
│       │   │   └── Visitor-diagram.json
│       │   └── types.py
│       ├── project_orchestration.py
│       ├── prompt_manager
│       │   ├── __init__.py
│       │   ├── loader.py
│       │   ├── manager.py
│       │   └── template.py
│       ├── prompts
│       │   ├── __dirname.json
│       │   ├── __image_1___describe_the_icon_in_one_sen___.json
│       │   ├── __init__.py
│       │   ├── __type.json
│       │   ├── _.json
│       │   ├── _DEFAULT_OPEN_DELIMITER.json
│       │   ├── _emojiRegex.json
│       │   ├── _UUID_CHARS.json
│       │   ├── a.json
│       │   ├── A.json
│       │   ├── Aa.json
│       │   ├── aAnnotationPadding.json
│       │   ├── absoluteThresholdGroup.json
│       │   ├── add.json
│       │   ├── ADDITIONAL_PROPERTY_FLAG.json
│       │   ├── Advanced_Multi-Server_Integration_Template.json
│       │   ├── allOptionsList.json
│       │   ├── analysis
│       │   │   ├── Data_Analysis_Template.json
│       │   │   ├── index.json
│       │   │   ├── Mermaid_Analysis_Expert.json
│       │   │   ├── Sequential_Data_Analysis_with_MCP_Integration.json
│       │   │   └── SOLID_Code_Analysis_Visualizer.json
│       │   ├── Analysis_Assistant.json
│       │   ├── Analyze_Mermaid_Diagram.json
│       │   ├── ANDROID_EVERGREEN_FIRST.json
│       │   ├── ANSI_ESCAPE_BELL.json
│       │   ├── architecture
│       │   │   ├── index.json
│       │   │   └── PromptCombiner_Interface.json
│       │   ├── Architecture_Design_Assistant.json
│       │   ├── argsTag.json
│       │   ├── ARROW.json
│       │   ├── assistant
│       │   │   ├── Analysis_Assistant.json
│       │   │   ├── Architecture_Design_Assistant.json
│       │   │   ├── Code_Refactoring_Assistant.json
│       │   │   ├── Code_Review_Assistant.json
│       │   │   ├── Database_Query_Assistant.json
│       │   │   ├── Debugging_Assistant.json
│       │   │   ├── Foresight_Assistant.json
│       │   │   ├── index.json
│       │   │   ├── MCP_Integration_Assistant.json
│       │   │   ├── Project_Analysis_Assistant.json
│       │   │   └── Research_Assistant.json
│       │   ├── astralRange.json
│       │   ├── at.json
│       │   ├── authorization_endpoint.json
│       │   ├── b.json
│       │   ├── BABELIGNORE_FILENAME.json
│       │   ├── BACKSLASH.json
│       │   ├── backupId.json
│       │   ├── BANG.json
│       │   ├── BASE64_MAP.json
│       │   ├── baseFlags.json
│       │   ├── Basic_Template.json
│       │   ├── bgModel.json
│       │   ├── bignum.json
│       │   ├── blockKeywordsStr.json
│       │   ├── BOMChar.json
│       │   ├── boundary.json
│       │   ├── brackets.json
│       │   ├── BROWSER_VAR.json
│       │   ├── bt.json
│       │   ├── BUILTIN.json
│       │   ├── BULLET.json
│       │   ├── c.json
│       │   ├── C.json
│       │   ├── CACHE_VERSION.json
│       │   ├── cacheControl.json
│       │   ├── cacheProp.json
│       │   ├── category.py
│       │   ├── CHANGE_EVENT.json
│       │   ├── CHAR_CODE_0.json
│       │   ├── chars.json
│       │   ├── cjsPattern.json
│       │   ├── cKeywords.json
│       │   ├── classForPercent.json
│       │   ├── classStr.json
│       │   ├── clientFirstMessageBare.json
│       │   ├── cmd.json
│       │   ├── Code_Diagram_Documentation_Creator.json
│       │   ├── Code_Refactoring_Assistant.json
│       │   ├── Code_Review_Assistant.json
│       │   ├── code.json
│       │   ├── coding
│       │   │   ├── __dirname.json
│       │   │   ├── _.json
│       │   │   ├── _DEFAULT_OPEN_DELIMITER.json
│       │   │   ├── _emojiRegex.json
│       │   │   ├── _UUID_CHARS.json
│       │   │   ├── a.json
│       │   │   ├── A.json
│       │   │   ├── aAnnotationPadding.json
│       │   │   ├── absoluteThresholdGroup.json
│       │   │   ├── add.json
│       │   │   ├── ADDITIONAL_PROPERTY_FLAG.json
│       │   │   ├── allOptionsList.json
│       │   │   ├── ANDROID_EVERGREEN_FIRST.json
│       │   │   ├── ANSI_ESCAPE_BELL.json
│       │   │   ├── argsTag.json
│       │   │   ├── ARROW.json
│       │   │   ├── astralRange.json
│       │   │   ├── at.json
│       │   │   ├── authorization_endpoint.json
│       │   │   ├── BABELIGNORE_FILENAME.json
│       │   │   ├── BACKSLASH.json
│       │   │   ├── BANG.json
│       │   │   ├── BASE64_MAP.json
│       │   │   ├── baseFlags.json
│       │   │   ├── bgModel.json
│       │   │   ├── bignum.json
│       │   │   ├── blockKeywordsStr.json
│       │   │   ├── BOMChar.json
│       │   │   ├── boundary.json
│       │   │   ├── brackets.json
│       │   │   ├── BROWSER_VAR.json
│       │   │   ├── bt.json
│       │   │   ├── BUILTIN.json
│       │   │   ├── BULLET.json
│       │   │   ├── c.json
│       │   │   ├── C.json
│       │   │   ├── CACHE_VERSION.json
│       │   │   ├── cacheControl.json
│       │   │   ├── cacheProp.json
│       │   │   ├── CHANGE_EVENT.json
│       │   │   ├── CHAR_CODE_0.json
│       │   │   ├── chars.json
│       │   │   ├── cjsPattern.json
│       │   │   ├── cKeywords.json
│       │   │   ├── classForPercent.json
│       │   │   ├── classStr.json
│       │   │   ├── clientFirstMessageBare.json
│       │   │   ├── cmd.json
│       │   │   ├── code.json
│       │   │   ├── colorCode.json
│       │   │   ├── comma.json
│       │   │   ├── command.json
│       │   │   ├── configJsContent.json
│       │   │   ├── connectionString.json
│       │   │   ├── cssClassStr.json
│       │   │   ├── currentBoundaryParse.json
│       │   │   ├── d.json
│       │   │   ├── data.json
│       │   │   ├── DATA.json
│       │   │   ├── dataWebpackPrefix.json
│       │   │   ├── debug.json
│       │   │   ├── decodeStateVectorV2.json
│       │   │   ├── DEFAULT_DELIMITER.json
│       │   │   ├── DEFAULT_DIAGRAM_DIRECTION.json
│       │   │   ├── DEFAULT_JS_PATTERN.json
│       │   │   ├── DEFAULT_LOG_TARGET.json
│       │   │   ├── defaultHelpOpt.json
│       │   │   ├── defaultHost.json
│       │   │   ├── deferY18nLookupPrefix.json
│       │   │   ├── DELIM.json
│       │   │   ├── delimiter.json
│       │   │   ├── DEPRECATION.json
│       │   │   ├── destMain.json
│       │   │   ├── DID_NOT_THROW.json
│       │   │   ├── direction.json
│       │   │   ├── displayValue.json
│       │   │   ├── DNS.json
│       │   │   ├── doc.json
│       │   │   ├── DOCUMENTATION_NOTE.json
│       │   │   ├── DOT.json
│       │   │   ├── DOTS.json
│       │   │   ├── dummyCompoundId.json
│       │   │   ├── e.json
│       │   │   ├── E.json
│       │   │   ├── earlyHintsLink.json
│       │   │   ├── elide.json
│       │   │   ├── EMPTY.json
│       │   │   ├── end.json
│       │   │   ├── endpoint.json
│       │   │   ├── environment.json
│       │   │   ├── ERR_CODE.json
│       │   │   ├── errMessage.json
│       │   │   ├── errMsg.json
│       │   │   ├── ERROR_MESSAGE.json
│       │   │   ├── error.json
│       │   │   ├── ERROR.json
│       │   │   ├── ERRORCLASS.json
│       │   │   ├── errorMessage.json
│       │   │   ├── es6Default.json
│       │   │   ├── ESC.json
│       │   │   ├── Escapable.json
│       │   │   ├── escapedChar.json
│       │   │   ├── escapeFuncStr.json
│       │   │   ├── escSlash.json
│       │   │   ├── ev.json
│       │   │   ├── event.json
│       │   │   ├── execaMessage.json
│       │   │   ├── EXPECTED_LABEL.json
│       │   │   ├── expected.json
│       │   │   ├── expectedString.json
│       │   │   ├── expression1.json
│       │   │   ├── EXTENSION.json
│       │   │   ├── f.json
│       │   │   ├── FAIL_TEXT.json
│       │   │   ├── FILE_BROWSER_FACTORY.json
│       │   │   ├── fill.json
│       │   │   ├── findPackageJson.json
│       │   │   ├── fnKey.json
│       │   │   ├── FORMAT.json
│       │   │   ├── formatted.json
│       │   │   ├── from.json
│       │   │   ├── fullpaths.json
│       │   │   ├── FUNC_ERROR_TEXT.json
│       │   │   ├── GenStateSuspendedStart.json
│       │   │   ├── GENSYNC_EXPECTED_START.json
│       │   │   ├── gutter.json
│       │   │   ├── h.json
│       │   │   ├── handlerFuncName.json
│       │   │   ├── HASH_UNDEFINED.json
│       │   │   ├── head.json
│       │   │   ├── helpMessage.json
│       │   │   ├── HINT_ARG.json
│       │   │   ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│       │   │   ├── i.json
│       │   │   ├── id.json
│       │   │   ├── identifier.json
│       │   │   ├── Identifier.json
│       │   │   ├── INDENT.json
│       │   │   ├── indentation.json
│       │   │   ├── index.json
│       │   │   ├── INDIRECTION_FRAGMENT.json
│       │   │   ├── input.json
│       │   │   ├── inputText.json
│       │   │   ├── insert.json
│       │   │   ├── insertPromptQuery.json
│       │   │   ├── INSPECT_MAX_BYTES.json
│       │   │   ├── intToCharMap.json
│       │   │   ├── IS_ITERABLE_SENTINEL.json
│       │   │   ├── IS_KEYED_SENTINEL.json
│       │   │   ├── isConfigType.json
│       │   │   ├── isoSentinel.json
│       │   │   ├── isSourceNode.json
│       │   │   ├── j.json
│       │   │   ├── JAKE_CMD.json
│       │   │   ├── JEST_GLOBAL_NAME.json
│       │   │   ├── JEST_GLOBALS_MODULE_NAME.json
│       │   │   ├── JSON_SYNTAX_CHAR.json
│       │   │   ├── json.json
│       │   │   ├── jsonType.json
│       │   │   ├── jupyter_namespaceObject.json
│       │   │   ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│       │   │   ├── k.json
│       │   │   ├── KERNEL_STATUS_ERROR_CLASS.json
│       │   │   ├── key.json
│       │   │   ├── l.json
│       │   │   ├── labelId.json
│       │   │   ├── LATEST_PROTOCOL_VERSION.json
│       │   │   ├── LETTERDASHNUMBER.json
│       │   │   ├── LF.json
│       │   │   ├── LIMIT_REPLACE_NODE.json
│       │   │   ├── logTime.json
│       │   │   ├── lstatkey.json
│       │   │   ├── lt.json
│       │   │   ├── m.json
│       │   │   ├── maliciousPayload.json
│       │   │   ├── mask.json
│       │   │   ├── match.json
│       │   │   ├── matchingDelim.json
│       │   │   ├── MAXIMUM_MESSAGE_SIZE.json
│       │   │   ├── mdcContent.json
│       │   │   ├── MERMAID_DOM_ID_PREFIX.json
│       │   │   ├── message.json
│       │   │   ├── messages.json
│       │   │   ├── meth.json
│       │   │   ├── minimatch.json
│       │   │   ├── MOCK_CONSTRUCTOR_NAME.json
│       │   │   ├── MOCKS_PATTERN.json
│       │   │   ├── moduleDirectory.json
│       │   │   ├── msg.json
│       │   │   ├── mtr.json
│       │   │   ├── multipartType.json
│       │   │   ├── n.json
│       │   │   ├── N.json
│       │   │   ├── name.json
│       │   │   ├── NATIVE_PLATFORM.json
│       │   │   ├── newUrl.json
│       │   │   ├── NM.json
│       │   │   ├── NO_ARGUMENTS.json
│       │   │   ├── NO_DIFF_MESSAGE.json
│       │   │   ├── NODE_MODULES.json
│       │   │   ├── nodeInternalPrefix.json
│       │   │   ├── nonASCIIidentifierStartChars.json
│       │   │   ├── nonKey.json
│       │   │   ├── NOT_A_DOT.json
│       │   │   ├── notCharacterOrDash.json
│       │   │   ├── notebookURL.json
│       │   │   ├── notSelector.json
│       │   │   ├── nullTag.json
│       │   │   ├── num.json
│       │   │   ├── NUMBER.json
│       │   │   ├── o.json
│       │   │   ├── O.json
│       │   │   ├── octChar.json
│       │   │   ├── octetStreamType.json
│       │   │   ├── operators.json
│       │   │   ├── out.json
│       │   │   ├── OUTSIDE_JEST_VM_PROTOCOL.json
│       │   │   ├── override.json
│       │   │   ├── p.json
│       │   │   ├── PACKAGE_FILENAME.json
│       │   │   ├── PACKAGE_JSON.json
│       │   │   ├── packageVersion.json
│       │   │   ├── paddedNumber.json
│       │   │   ├── page.json
│       │   │   ├── parseClass.json
│       │   │   ├── path.json
│       │   │   ├── pathExt.json
│       │   │   ├── pattern.json
│       │   │   ├── PatternBoolean.json
│       │   │   ├── pBuiltins.json
│       │   │   ├── pFloatForm.json
│       │   │   ├── pkg.json
│       │   │   ├── PLUGIN_ID_DOC_MANAGER.json
│       │   │   ├── plusChar.json
│       │   │   ├── PN_CHARS.json
│       │   │   ├── point.json
│       │   │   ├── prefix.json
│       │   │   ├── PRETTY_PLACEHOLDER.json
│       │   │   ├── property_prefix.json
│       │   │   ├── pubkey256.json
│       │   │   ├── Q.json
│       │   │   ├── qmark.json
│       │   │   ├── QO.json
│       │   │   ├── query.json
│       │   │   ├── querystringType.json
│       │   │   ├── queryText.json
│       │   │   ├── r.json
│       │   │   ├── R.json
│       │   │   ├── rangeStart.json
│       │   │   ├── re.json
│       │   │   ├── reI.json
│       │   │   ├── REQUIRED_FIELD_SYMBOL.json
│       │   │   ├── reserve.json
│       │   │   ├── resolvedDestination.json
│       │   │   ├── resolverDir.json
│       │   │   ├── responseType.json
│       │   │   ├── result.json
│       │   │   ├── ROOT_DESCRIBE_BLOCK_NAME.json
│       │   │   ├── ROOT_NAMESPACE_NAME.json
│       │   │   ├── ROOT_TASK_NAME.json
│       │   │   ├── route.json
│       │   │   ├── RUNNING_TEXT.json
│       │   │   ├── s.json
│       │   │   ├── SCHEMA_PATH.json
│       │   │   ├── se.json
│       │   │   ├── SEARCHABLE_CLASS.json
│       │   │   ├── secret.json
│       │   │   ├── selector.json
│       │   │   ├── SEMVER_SPEC_VERSION.json
│       │   │   ├── sensitiveHeaders.json
│       │   │   ├── sep.json
│       │   │   ├── separator.json
│       │   │   ├── SHAPE_STATE.json
│       │   │   ├── shape.json
│       │   │   ├── SHARED.json
│       │   │   ├── short.json
│       │   │   ├── side.json
│       │   │   ├── SNAPSHOT_VERSION.json
│       │   │   ├── SOURCE_MAPPING_PREFIX.json
│       │   │   ├── source.json
│       │   │   ├── sourceMapContent.json
│       │   │   ├── SPACE_SYMBOL.json
│       │   │   ├── SPACE.json
│       │   │   ├── sqlKeywords.json
│       │   │   ├── sranges.json
│       │   │   ├── st.json
│       │   │   ├── ST.json
│       │   │   ├── stack.json
│       │   │   ├── START_HIDING.json
│       │   │   ├── START_OF_LINE.json
│       │   │   ├── startNoTraversal.json
│       │   │   ├── STATES.json
│       │   │   ├── stats.json
│       │   │   ├── statSync.json
│       │   │   ├── storageStatus.json
│       │   │   ├── storageType.json
│       │   │   ├── str.json
│       │   │   ├── stringifiedObject.json
│       │   │   ├── stringPath.json
│       │   │   ├── stringResult.json
│       │   │   ├── stringTag.json
│       │   │   ├── strValue.json
│       │   │   ├── style.json
│       │   │   ├── SUB_NAME.json
│       │   │   ├── subkey.json
│       │   │   ├── SUBPROTOCOL.json
│       │   │   ├── SUITE_NAME.json
│       │   │   ├── symbolPattern.json
│       │   │   ├── symbolTag.json
│       │   │   ├── t.json
│       │   │   ├── T.json
│       │   │   ├── templateDir.json
│       │   │   ├── tempName.json
│       │   │   ├── text.json
│       │   │   ├── time.json
│       │   │   ├── titleSeparator.json
│       │   │   ├── tmpl.json
│       │   │   ├── tn.json
│       │   │   ├── toValue.json
│       │   │   ├── transform.json
│       │   │   ├── trustProxyDefaultSymbol.json
│       │   │   ├── typeArgumentsKey.json
│       │   │   ├── typeKey.json
│       │   │   ├── typeMessage.json
│       │   │   ├── typesRegistryPackageName.json
│       │   │   ├── u.json
│       │   │   ├── UNDEFINED.json
│       │   │   ├── unit.json
│       │   │   ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│       │   │   ├── ur.json
│       │   │   ├── USAGE.json
│       │   │   ├── value.json
│       │   │   ├── Vr.json
│       │   │   ├── watchmanURL.json
│       │   │   ├── webkit.json
│       │   │   ├── xhtml.json
│       │   │   ├── XP_DEFAULT_PATHEXT.json
│       │   │   └── y.json
│       │   ├── Collaborative_Development_with_MCP_Integration.json
│       │   ├── colorCode.json
│       │   ├── comma.json
│       │   ├── command.json
│       │   ├── completionShTemplate.json
│       │   ├── configJsContent.json
│       │   ├── connectionString.json
│       │   ├── Consolidated_TypeScript_Interfaces_Template.json
│       │   ├── Could_you_interpret_the_assumed_applicat___.json
│       │   ├── cssClassStr.json
│       │   ├── currentBoundaryParse.json
│       │   ├── d.json
│       │   ├── Data_Analysis_Template.json
│       │   ├── data.json
│       │   ├── DATA.json
│       │   ├── Database_Query_Assistant.json
│       │   ├── dataWebpackPrefix.json
│       │   ├── debug.json
│       │   ├── Debugging_Assistant.json
│       │   ├── decodeStateVectorV2.json
│       │   ├── DEFAULT_DELIMITER.json
│       │   ├── DEFAULT_DIAGRAM_DIRECTION.json
│       │   ├── DEFAULT_INDENT.json
│       │   ├── DEFAULT_JS_PATTERN.json
│       │   ├── DEFAULT_LOG_TARGET.json
│       │   ├── defaultHelpOpt.json
│       │   ├── defaultHost.json
│       │   ├── deferY18nLookupPrefix.json
│       │   ├── DELIM.json
│       │   ├── delimiter.json
│       │   ├── DEPRECATION.json
│       │   ├── DESCENDING.json
│       │   ├── destMain.json
│       │   ├── development
│       │   │   ├── Collaborative_Development_with_MCP_Integration.json
│       │   │   ├── Consolidated_TypeScript_Interfaces_Template.json
│       │   │   ├── Development_Workflow.json
│       │   │   ├── index.json
│       │   │   ├── MCP_Server_Development_Prompt_Combiner.json
│       │   │   └── Monorepo_Migration_and_Code_Organization_Guide.json
│       │   ├── Development_System_Prompt.json
│       │   ├── Development_Workflow.json
│       │   ├── devops
│       │   │   ├── Docker_Compose_Prompt_Combiner.json
│       │   │   ├── Docker_Containerization_Guide.json
│       │   │   └── index.json
│       │   ├── DID_NOT_THROW.json
│       │   ├── direction.json
│       │   ├── displayValue.json
│       │   ├── DNS.json
│       │   ├── doc.json
│       │   ├── Docker_Compose_Prompt_Combiner.json
│       │   ├── Docker_Containerization_Guide.json
│       │   ├── Docker_MCP_Servers_Orchestration_Guide.json
│       │   ├── DOCUMENTATION_NOTE.json
│       │   ├── DOT.json
│       │   ├── DOTS.json
│       │   ├── dummyCompoundId.json
│       │   ├── e.json
│       │   ├── E.json
│       │   ├── earlyHintsLink.json
│       │   ├── elide.json
│       │   ├── EMPTY.json
│       │   ├── encoded.json
│       │   ├── end.json
│       │   ├── endpoint.json
│       │   ├── environment.json
│       │   ├── ERR_CODE.json
│       │   ├── errMessage.json
│       │   ├── errMsg.json
│       │   ├── ERROR_MESSAGE.json
│       │   ├── error.json
│       │   ├── ERROR.json
│       │   ├── ERRORCLASS.json
│       │   ├── errorMessage.json
│       │   ├── es6Default.json
│       │   ├── ESC.json
│       │   ├── Escapable.json
│       │   ├── escapedChar.json
│       │   ├── escapeFuncStr.json
│       │   ├── escSlash.json
│       │   ├── ev.json
│       │   ├── event.json
│       │   ├── execaMessage.json
│       │   ├── EXPECTED_LABEL.json
│       │   ├── expected.json
│       │   ├── expectedString.json
│       │   ├── expression1.json
│       │   ├── EXTENSION.json
│       │   ├── f.json
│       │   ├── FAIL_TEXT.json
│       │   ├── FILE_BROWSER_FACTORY.json
│       │   ├── fill.json
│       │   ├── findPackageJson.json
│       │   ├── fnKey.json
│       │   ├── Foresight_Assistant.json
│       │   ├── FORMAT.json
│       │   ├── formatted.json
│       │   ├── from.json
│       │   ├── fullpaths.json
│       │   ├── FUNC_ERROR_TEXT.json
│       │   ├── general
│       │   │   └── index.json
│       │   ├── Generate_different_types_of_questions_ab___.json
│       │   ├── Generate_Mermaid_Diagram.json
│       │   ├── GenStateSuspendedStart.json
│       │   ├── GENSYNC_EXPECTED_START.json
│       │   ├── GitHub_Repository_Explorer.json
│       │   ├── gutter.json
│       │   ├── h.json
│       │   ├── handlerFuncName.json
│       │   ├── HASH_UNDEFINED.json
│       │   ├── head.json
│       │   ├── helpMessage.json
│       │   ├── HINT_ARG.json
│       │   ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│       │   ├── i.json
│       │   ├── id.json
│       │   ├── identifier.json
│       │   ├── Identifier.json
│       │   ├── INDENT.json
│       │   ├── indentation.json
│       │   ├── index.json
│       │   ├── INDIRECTION_FRAGMENT.json
│       │   ├── Initialize_project_setup_for_a_new_micro___.json
│       │   ├── input.json
│       │   ├── inputText.json
│       │   ├── insert.json
│       │   ├── insertPromptQuery.json
│       │   ├── INSPECT_MAX_BYTES.json
│       │   ├── install_dependencies__build__run__test____.json
│       │   ├── intToCharMap.json
│       │   ├── IS_ITERABLE_SENTINEL.json
│       │   ├── IS_KEYED_SENTINEL.json
│       │   ├── isConfigType.json
│       │   ├── isoSentinel.json
│       │   ├── isSourceNode.json
│       │   ├── j.json
│       │   ├── J.json
│       │   ├── JAKE_CMD.json
│       │   ├── JEST_GLOBAL_NAME.json
│       │   ├── JEST_GLOBALS_MODULE_NAME.json
│       │   ├── JSON_SYNTAX_CHAR.json
│       │   ├── json.json
│       │   ├── jsonType.json
│       │   ├── jupyter_namespaceObject.json
│       │   ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│       │   ├── k.json
│       │   ├── KERNEL_STATUS_ERROR_CLASS.json
│       │   ├── key.json
│       │   ├── l.json
│       │   ├── labelId.json
│       │   ├── LATEST_PROTOCOL_VERSION.json
│       │   ├── LETTERDASHNUMBER.json
│       │   ├── LF.json
│       │   ├── LIMIT_REPLACE_NODE.json
│       │   ├── LINE_FEED.json
│       │   ├── logTime.json
│       │   ├── lstatkey.json
│       │   ├── lt.json
│       │   ├── m.json
│       │   ├── maliciousPayload.json
│       │   ├── manager.py
│       │   ├── marker.json
│       │   ├── mask.json
│       │   ├── match.json
│       │   ├── matchingDelim.json
│       │   ├── MAXIMUM_MESSAGE_SIZE.json
│       │   ├── MCP_Integration_Assistant.json
│       │   ├── MCP_Resources_Explorer.json
│       │   ├── MCP_Resources_Integration_Guide.json
│       │   ├── MCP_Server_Development_Prompt_Combiner.json
│       │   ├── MCP_Server_Integration_Guide.json
│       │   ├── mcp-code-generator.json
│       │   ├── mdcContent.json
│       │   ├── Mermaid_Analysis_Expert.json
│       │   ├── Mermaid_Class_Diagram_Generator.json
│       │   ├── Mermaid_Diagram_Generator.json
│       │   ├── Mermaid_Diagram_Modifier.json
│       │   ├── MERMAID_DOM_ID_PREFIX.json
│       │   ├── message.json
│       │   ├── messages.json
│       │   ├── meth.json
│       │   ├── minimatch.json
│       │   ├── MOBILE_QUERY.json
│       │   ├── MOCK_CONSTRUCTOR_NAME.json
│       │   ├── MOCKS_PATTERN.json
│       │   ├── Modify_Mermaid_Diagram.json
│       │   ├── moduleDirectory.json
│       │   ├── Monorepo_Migration_and_Code_Organization_Guide.json
│       │   ├── msg.json
│       │   ├── mtr.json
│       │   ├── Multi-Resource_Context_Assistant.json
│       │   ├── multipartType.json
│       │   ├── n.json
│       │   ├── N.json
│       │   ├── name.json
│       │   ├── NATIVE_PLATFORM.json
│       │   ├── newUrl.json
│       │   ├── NM.json
│       │   ├── NO_ARGUMENTS.json
│       │   ├── NO_DIFF_MESSAGE.json
│       │   ├── NODE_MODULES.json
│       │   ├── nodeInternalPrefix.json
│       │   ├── nonASCIIidentifierStartChars.json
│       │   ├── nonKey.json
│       │   ├── NOT_A_DOT.json
│       │   ├── notCharacterOrDash.json
│       │   ├── notebookURL.json
│       │   ├── notSelector.json
│       │   ├── nullTag.json
│       │   ├── num.json
│       │   ├── NUMBER.json
│       │   ├── o.json
│       │   ├── O.json
│       │   ├── octChar.json
│       │   ├── octetStreamType.json
│       │   ├── operators.json
│       │   ├── other
│       │   │   ├── __image_1___describe_the_icon_in_one_sen___.json
│       │   │   ├── __type.json
│       │   │   ├── Advanced_Multi-Server_Integration_Template.json
│       │   │   ├── Analyze_Mermaid_Diagram.json
│       │   │   ├── Basic_Template.json
│       │   │   ├── Code_Diagram_Documentation_Creator.json
│       │   │   ├── Collaborative_Development_with_MCP_Integration.json
│       │   │   ├── completionShTemplate.json
│       │   │   ├── Could_you_interpret_the_assumed_applicat___.json
│       │   │   ├── DEFAULT_INDENT.json
│       │   │   ├── Docker_MCP_Servers_Orchestration_Guide.json
│       │   │   ├── Generate_different_types_of_questions_ab___.json
│       │   │   ├── Generate_Mermaid_Diagram.json
│       │   │   ├── GitHub_Repository_Explorer.json
│       │   │   ├── index.json
│       │   │   ├── Initialize_project_setup_for_a_new_micro___.json
│       │   │   ├── install_dependencies__build__run__test____.json
│       │   │   ├── LINE_FEED.json
│       │   │   ├── MCP_Resources_Explorer.json
│       │   │   ├── MCP_Resources_Integration_Guide.json
│       │   │   ├── MCP_Server_Integration_Guide.json
│       │   │   ├── mcp-code-generator.json
│       │   │   ├── Mermaid_Class_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Generator.json
│       │   │   ├── Mermaid_Diagram_Modifier.json
│       │   │   ├── Modify_Mermaid_Diagram.json
│       │   │   ├── Multi-Resource_Context_Assistant.json
│       │   │   ├── output.json
│       │   │   ├── sseUrl.json
│       │   │   ├── string.json
│       │   │   ├── Task_List_Helper.json
│       │   │   ├── Template-Based_MCP_Integration.json
│       │   │   ├── Test_Prompt.json
│       │   │   ├── type.json
│       │   │   ├── VERSION.json
│       │   │   ├── WIN_SLASH.json
│       │   │   └── You_are_limited_to_respond_Yes_or_No_onl___.json
│       │   ├── out.json
│       │   ├── output.json
│       │   ├── OUTSIDE_JEST_VM_PROTOCOL.json
│       │   ├── override.json
│       │   ├── p.json
│       │   ├── PACKAGE_FILENAME.json
│       │   ├── PACKAGE_JSON.json
│       │   ├── packageVersion.json
│       │   ├── paddedNumber.json
│       │   ├── page.json
│       │   ├── parseClass.json
│       │   ├── PATH_NODE_MODULES.json
│       │   ├── path.json
│       │   ├── pathExt.json
│       │   ├── pattern.json
│       │   ├── PatternBoolean.json
│       │   ├── pBuiltins.json
│       │   ├── pFloatForm.json
│       │   ├── pkg.json
│       │   ├── PLUGIN_ID_DOC_MANAGER.json
│       │   ├── plusChar.json
│       │   ├── PN_CHARS.json
│       │   ├── point.json
│       │   ├── prefix.json
│       │   ├── PRETTY_PLACEHOLDER.json
│       │   ├── Project_Analysis_Assistant.json
│       │   ├── ProjectsUpdatedInBackgroundEvent.json
│       │   ├── PromptCombiner_Interface.json
│       │   ├── promptId.json
│       │   ├── property_prefix.json
│       │   ├── pubkey256.json
│       │   ├── Q.json
│       │   ├── qmark.json
│       │   ├── QO.json
│       │   ├── query.json
│       │   ├── querystringType.json
│       │   ├── queryText.json
│       │   ├── r.json
│       │   ├── R.json
│       │   ├── rangeStart.json
│       │   ├── re.json
│       │   ├── reI.json
│       │   ├── REQUIRED_FIELD_SYMBOL.json
│       │   ├── Research_Assistant.json
│       │   ├── reserve.json
│       │   ├── resolvedDestination.json
│       │   ├── resolverDir.json
│       │   ├── responseType.json
│       │   ├── result.json
│       │   ├── ROOT_DESCRIBE_BLOCK_NAME.json
│       │   ├── ROOT_NAMESPACE_NAME.json
│       │   ├── ROOT_TASK_NAME.json
│       │   ├── route.json
│       │   ├── RUNNING_TEXT.json
│       │   ├── RXstyle.json
│       │   ├── s.json
│       │   ├── SCHEMA_PATH.json
│       │   ├── schemaQuery.json
│       │   ├── se.json
│       │   ├── SEARCHABLE_CLASS.json
│       │   ├── secret.json
│       │   ├── selector.json
│       │   ├── SEMVER_SPEC_VERSION.json
│       │   ├── sensitiveHeaders.json
│       │   ├── sep.json
│       │   ├── separator.json
│       │   ├── Sequential_Data_Analysis_with_MCP_Integration.json
│       │   ├── SHAPE_STATE.json
│       │   ├── shape.json
│       │   ├── SHARED.json
│       │   ├── short.json
│       │   ├── side.json
│       │   ├── SNAPSHOT_VERSION.json
│       │   ├── SOLID_Code_Analysis_Visualizer.json
│       │   ├── SOURCE_MAPPING_PREFIX.json
│       │   ├── source.json
│       │   ├── sourceMapContent.json
│       │   ├── SPACE_SYMBOL.json
│       │   ├── SPACE.json
│       │   ├── sqlKeywords.json
│       │   ├── sranges.json
│       │   ├── sseUrl.json
│       │   ├── st.json
│       │   ├── ST.json
│       │   ├── stack.json
│       │   ├── START_HIDING.json
│       │   ├── START_OF_LINE.json
│       │   ├── startNoTraversal.json
│       │   ├── STATES.json
│       │   ├── stats.json
│       │   ├── statSync.json
│       │   ├── status.json
│       │   ├── storageStatus.json
│       │   ├── storageType.json
│       │   ├── str.json
│       │   ├── string.json
│       │   ├── stringifiedObject.json
│       │   ├── stringPath.json
│       │   ├── stringResult.json
│       │   ├── stringTag.json
│       │   ├── strValue.json
│       │   ├── style.json
│       │   ├── SUB_NAME.json
│       │   ├── subkey.json
│       │   ├── SUBPROTOCOL.json
│       │   ├── SUITE_NAME.json
│       │   ├── symbolPattern.json
│       │   ├── symbolTag.json
│       │   ├── system
│       │   │   ├── Aa.json
│       │   │   ├── b.json
│       │   │   ├── Development_System_Prompt.json
│       │   │   ├── index.json
│       │   │   ├── marker.json
│       │   │   ├── PATH_NODE_MODULES.json
│       │   │   ├── ProjectsUpdatedInBackgroundEvent.json
│       │   │   ├── RXstyle.json
│       │   │   ├── status.json
│       │   │   └── versionMajorMinor.json
│       │   ├── t.json
│       │   ├── T.json
│       │   ├── Task_List_Helper.json
│       │   ├── Template-Based_MCP_Integration.json
│       │   ├── template.py
│       │   ├── templateDir.json
│       │   ├── tempName.json
│       │   ├── Test_Prompt.json
│       │   ├── text.json
│       │   ├── time.json
│       │   ├── titleSeparator.json
│       │   ├── tmpl.json
│       │   ├── tn.json
│       │   ├── TOPBAR_FACTORY.json
│       │   ├── toValue.json
│       │   ├── transform.json
│       │   ├── trustProxyDefaultSymbol.json
│       │   ├── txt.json
│       │   ├── type.json
│       │   ├── typeArgumentsKey.json
│       │   ├── typeKey.json
│       │   ├── typeMessage.json
│       │   ├── typesRegistryPackageName.json
│       │   ├── u.json
│       │   ├── UNDEFINED.json
│       │   ├── unit.json
│       │   ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│       │   ├── ur.json
│       │   ├── usage.json
│       │   ├── USAGE.json
│       │   ├── user
│       │   │   ├── backupId.json
│       │   │   ├── DESCENDING.json
│       │   │   ├── encoded.json
│       │   │   ├── index.json
│       │   │   ├── J.json
│       │   │   ├── MOBILE_QUERY.json
│       │   │   ├── promptId.json
│       │   │   ├── schemaQuery.json
│       │   │   ├── TOPBAR_FACTORY.json
│       │   │   ├── txt.json
│       │   │   └── usage.json
│       │   ├── value.json
│       │   ├── VERSION.json
│       │   ├── version.py
│       │   ├── versionMajorMinor.json
│       │   ├── Vr.json
│       │   ├── watchmanURL.json
│       │   ├── webkit.json
│       │   ├── WIN_SLASH.json
│       │   ├── xhtml.json
│       │   ├── XP_DEFAULT_PATHEXT.json
│       │   ├── y.json
│       │   └── You_are_limited_to_respond_Yes_or_No_onl___.json
│       ├── resources
│       │   ├── __init__.py
│       │   ├── code_examples
│       │   │   └── index.json
│       │   ├── config
│       │   │   └── index.json
│       │   ├── documentation
│       │   │   └── index.json
│       │   ├── images
│       │   │   └── index.json
│       │   ├── index.json
│       │   └── other
│       │       └── index.json
│       ├── server.py
│       ├── templates
│       │   ├── __init__.py
│       │   ├── AbstractFactory.json
│       │   ├── Adapter.json
│       │   ├── base.py
│       │   ├── Builder.json
│       │   ├── Chain.json
│       │   ├── Command.json
│       │   ├── component
│       │   │   ├── AbstractFactory.json
│       │   │   ├── Adapter.json
│       │   │   ├── Builder.json
│       │   │   ├── Chain.json
│       │   │   ├── Command.json
│       │   │   ├── Decorator.json
│       │   │   ├── Facade.json
│       │   │   ├── Factory.json
│       │   │   ├── Iterator.json
│       │   │   ├── Mediator.json
│       │   │   ├── Memento.json
│       │   │   ├── Observer.json
│       │   │   ├── Prototype.json
│       │   │   ├── Proxy.json
│       │   │   ├── Singleton.json
│       │   │   ├── State.json
│       │   │   ├── Strategy.json
│       │   │   ├── TemplateMethod.json
│       │   │   └── Visitor.json
│       │   ├── component.py
│       │   ├── Decorator.json
│       │   ├── Facade.json
│       │   ├── Factory.json
│       │   ├── index.json
│       │   ├── Iterator.json
│       │   ├── manager.py
│       │   ├── Mediator.json
│       │   ├── Memento.json
│       │   ├── Observer.json
│       │   ├── project.py
│       │   ├── Prototype.json
│       │   ├── Proxy.json
│       │   ├── renderer.py
│       │   ├── Singleton.json
│       │   ├── State.json
│       │   ├── Strategy.json
│       │   ├── template_manager.py
│       │   ├── TemplateMethod.json
│       │   ├── types.py
│       │   └── Visitor.json
│       └── utils
│           └── __init__.py
├── SUMMARY.md
├── TASK_COMPLETION_SUMMARY.md
├── templates
│   └── openssl
│       ├── files
│       │   ├── CMakeLists.txt.jinja2
│       │   ├── conanfile.py.jinja2
│       │   ├── main.cpp.jinja2
│       │   └── README.md.jinja2
│       ├── openssl-consumer.json
│       └── template.json
├── test_openssl_integration.sh
├── test_package
│   └── conanfile.py
└── tests
    ├── __init__.py
    ├── conftest.py
    ├── integration
    │   ├── test_core_integration.py
    │   ├── test_mermaid_integration.py
    │   ├── test_prompt_manager_integration.py
    │   └── test_server_integration.py
    ├── test_aws_mcp.py
    ├── test_base_classes.py
    ├── test_config.py
    ├── test_exceptions.py
    ├── test_mermaid.py
    ├── test_prompts.py
    └── test_templates.py
```

# Files

--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/fan_out_orchestrator.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Cross-Repository Release Fan-Out Orchestrator
  3 | 
  4 | Manages coordinated releases across the OpenSSL ecosystem by triggering
  5 | workflows in dependent repositories based on dependency relationships.
  6 | """
  7 | 
  8 | import asyncio
  9 | import json
 10 | import logging
 11 | from datetime import datetime
 12 | from typing import Dict, List, Optional, Set
 13 | from dataclasses import dataclass
 14 | from enum import Enum
 15 | 
 16 | import httpx
 17 | from github import Github
 18 | from github.Repository import Repository
 19 | 
 20 | logger = logging.getLogger(__name__)
 21 | 
 22 | 
 23 | class ReleaseType(Enum):
 24 |     """Types of releases that can trigger fan-out."""
 25 |     FOUNDATION = "foundation"  # openssl-conan-base, openssl-fips-policy
 26 |     TOOLING = "tooling"       # openssl-tools
 27 |     DOMAIN = "domain"         # openssl
 28 |     ORCHESTRATION = "orchestration"  # mcp-project-orchestrator
 29 | 
 30 | 
 31 | @dataclass
 32 | class RepositoryInfo:
 33 |     """Information about a repository in the ecosystem."""
 34 |     name: str
 35 |     full_name: str
 36 |     release_type: ReleaseType
 37 |     dependencies: List[str]
 38 |     dependents: List[str]
 39 |     version_file: Optional[str] = None
 40 |     conanfile_path: str = "conanfile.py"
 41 | 
 42 | 
 43 | @dataclass
 44 | class ReleaseTrigger:
 45 |     """Represents a release trigger for fan-out."""
 46 |     source_repo: str
 47 |     version: str
 48 |     release_type: ReleaseType
 49 |     triggered_at: datetime
 50 |     dependencies_updated: List[str]
 51 | 
 52 | 
 53 | class FanOutOrchestrator:
 54 |     """Orchestrates cross-repository releases and dependency updates."""
 55 |     
 56 |     def __init__(self, github_token: str):
 57 |         self.github = Github(github_token)
 58 |         self.repositories = self._initialize_repositories()
 59 |         self.dependency_graph = self._build_dependency_graph()
 60 |         
 61 |     def _initialize_repositories(self) -> Dict[str, RepositoryInfo]:
 62 |         """Initialize repository information."""
 63 |         return {
 64 |             "openssl-conan-base": RepositoryInfo(
 65 |                 name="openssl-conan-base",
 66 |                 full_name="sparesparrow/openssl-conan-base",
 67 |                 release_type=ReleaseType.FOUNDATION,
 68 |                 dependencies=[],
 69 |                 dependents=["openssl-tools"],
 70 |                 version_file=None,
 71 |                 conanfile_path="conanfile.py"
 72 |             ),
 73 |             "openssl-fips-policy": RepositoryInfo(
 74 |                 name="openssl-fips-policy",
 75 |                 full_name="sparesparrow/openssl-fips-policy",
 76 |                 release_type=ReleaseType.FOUNDATION,
 77 |                 dependencies=[],
 78 |                 dependents=["openssl-tools"],
 79 |                 version_file=None,
 80 |                 conanfile_path="conanfile.py"
 81 |             ),
 82 |             "openssl-tools": RepositoryInfo(
 83 |                 name="openssl-tools",
 84 |                 full_name="sparesparrow/openssl-tools",
 85 |                 release_type=ReleaseType.TOOLING,
 86 |                 dependencies=["openssl-conan-base", "openssl-fips-policy"],
 87 |                 dependents=["openssl"],
 88 |                 version_file=None,
 89 |                 conanfile_path="conanfile.py"
 90 |             ),
 91 |             "openssl": RepositoryInfo(
 92 |                 name="openssl",
 93 |                 full_name="sparesparrow/openssl",
 94 |                 release_type=ReleaseType.DOMAIN,
 95 |                 dependencies=["openssl-tools"],
 96 |                 dependents=[],
 97 |                 version_file="VERSION.dat",
 98 |                 conanfile_path="conanfile.py"
 99 |             ),
100 |             "mcp-project-orchestrator": RepositoryInfo(
101 |                 name="mcp-project-orchestrator",
102 |                 full_name="sparesparrow/mcp-project-orchestrator",
103 |                 release_type=ReleaseType.ORCHESTRATION,
104 |                 dependencies=[],
105 |                 dependents=[],
106 |                 version_file=None,
107 |                 conanfile_path="conanfile.py"
108 |             )
109 |         }
110 |     
111 |     def _build_dependency_graph(self) -> Dict[str, Set[str]]:
112 |         """Build dependency graph for release ordering."""
113 |         graph = {}
114 |         for repo_name, repo_info in self.repositories.items():
115 |             graph[repo_name] = set(repo_info.dependencies)
116 |         return graph
117 |     
118 |     async def trigger_release_cascade(self, source_repo: str, version: str, 
119 |                                     release_type: ReleaseType) -> List[ReleaseTrigger]:
120 |         """Trigger a release cascade starting from a source repository."""
121 |         triggers = []
122 |         
123 |         # Create initial trigger
124 |         initial_trigger = ReleaseTrigger(
125 |             source_repo=source_repo,
126 |             version=version,
127 |             release_type=release_type,
128 |             triggered_at=datetime.utcnow(),
129 |             dependencies_updated=[]
130 |         )
131 |         triggers.append(initial_trigger)
132 |         
133 |         # Find all dependent repositories
134 |         dependents = self._get_all_dependents(source_repo)
135 |         
136 |         # Trigger releases in dependency order
137 |         for dependent in dependents:
138 |             try:
139 |                 success = await self._trigger_dependent_release(
140 |                     dependent, source_repo, version
141 |                 )
142 |                 if success:
143 |                     trigger = ReleaseTrigger(
144 |                         source_repo=dependent,
145 |                         version=version,
146 |                         release_type=self.repositories[dependent].release_type,
147 |                         triggered_at=datetime.utcnow(),
148 |                         dependencies_updated=[source_repo]
149 |                     )
150 |                     triggers.append(trigger)
151 |                     logger.info(f"Successfully triggered release in {dependent}")
152 |                 else:
153 |                     logger.error(f"Failed to trigger release in {dependent}")
154 |                     
155 |             except Exception as e:
156 |                 logger.error(f"Error triggering release in {dependent}: {e}")
157 |         
158 |         return triggers
159 |     
160 |     def _get_all_dependents(self, repo_name: str) -> List[str]:
161 |         """Get all repositories that depend on the given repository."""
162 |         dependents = set()
163 |         to_process = [repo_name]
164 |         
165 |         while to_process:
166 |             current = to_process.pop(0)
167 |             for repo_name_check, repo_info in self.repositories.items():
168 |                 if current in repo_info.dependencies and repo_name_check not in dependents:
169 |                     dependents.add(repo_name_check)
170 |                     to_process.append(repo_name_check)
171 |         
172 |         return list(dependents)
173 |     
174 |     async def _trigger_dependent_release(self, dependent_repo: str, 
175 |                                        source_repo: str, version: str) -> bool:
176 |         """Trigger a release in a dependent repository."""
177 |         try:
178 |             repo = self.github.get_repo(self.repositories[dependent_repo].full_name)
179 |             
180 |             # Trigger workflow_dispatch event
181 |             workflow_dispatch_inputs = {
182 |                 "source_repository": source_repo,
183 |                 "source_version": version,
184 |                 "dependency_update": "true",
185 |                 "triggered_by": "fan-out-orchestrator"
186 |             }
187 |             
188 |             # Find the appropriate workflow to trigger
189 |             workflows = repo.get_workflows()
190 |             target_workflow = None
191 |             
192 |             for workflow in workflows:
193 |                 if workflow.name.lower() in ["release", "build", "ci"]:
194 |                     target_workflow = workflow
195 |                     break
196 |             
197 |             if not target_workflow:
198 |                 logger.warning(f"No suitable workflow found in {dependent_repo}")
199 |                 return False
200 |             
201 |             # Dispatch the workflow
202 |             target_workflow.create_dispatch(
203 |                 ref="main",  # or get default branch
204 |                 inputs=workflow_dispatch_inputs
205 |             )
206 |             
207 |             logger.info(f"Dispatched workflow in {dependent_repo}")
208 |             return True
209 |             
210 |         except Exception as e:
211 |             logger.error(f"Error dispatching workflow in {dependent_repo}: {e}")
212 |             return False
213 |     
214 |     async def update_dependency_versions(self, repo_name: str, 
215 |                                        dependency_updates: Dict[str, str]) -> bool:
216 |         """Update dependency versions in a repository's conanfile.py."""
217 |         try:
218 |             repo = self.github.get_repo(self.repositories[repo_name].full_name)
219 |             
220 |             # Get the conanfile.py content
221 |             conanfile_content = repo.get_contents(
222 |                 self.repositories[repo_name].conanfile_path
223 |             ).decoded_content.decode('utf-8')
224 |             
225 |             # Update dependency versions
226 |             updated_content = conanfile_content
227 |             for dep_name, new_version in dependency_updates.items():
228 |                 # Simple string replacement - in production, use proper parsing
229 |                 old_pattern = f'"{dep_name}/'
230 |                 new_pattern = f'"{dep_name}/{new_version}@'
231 |                 updated_content = updated_content.replace(old_pattern, new_pattern)
232 |             
233 |             # Create a new branch and commit
234 |             branch_name = f"update-dependencies-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}"
235 |             
236 |             # Get default branch
237 |             default_branch = repo.default_branch
238 |             
239 |             # Create new branch
240 |             ref = repo.get_git_ref(f"heads/{default_branch}")
241 |             repo.create_git_ref(f"refs/heads/{branch_name}", ref.object.sha)
242 |             
243 |             # Update file
244 |             repo.update_file(
245 |                 path=self.repositories[repo_name].conanfile_path,
246 |                 message=f"Update dependencies: {', '.join(f'{k}={v}' for k, v in dependency_updates.items())}",
247 |                 content=updated_content,
248 |                 sha=repo.get_contents(
249 |                     self.repositories[repo_name].conanfile_path
250 |                 ).sha,
251 |                 branch=branch_name
252 |             )
253 |             
254 |             # Create pull request
255 |             pr = repo.create_pull(
256 |                 title=f"Update dependencies from {list(dependency_updates.keys())[0]}",
257 |                 body=f"""
258 | ## Dependency Updates
259 | 
260 | This PR updates the following dependencies:
261 | 
262 | {chr(10).join(f'- **{dep}**: {version}' for dep, version in dependency_updates.items())}
263 | 
264 | ### Changes
265 | - Updated `conanfile.py` with new dependency versions
266 | - Triggered by fan-out orchestrator from {list(dependency_updates.keys())[0]}
267 | 
268 | ### Testing
269 | - [ ] Verify builds pass with new dependencies
270 | - [ ] Run integration tests
271 | - [ ] Check for any breaking changes
272 | 
273 | ---
274 | *This PR was automatically created by the Fan-Out Orchestrator*
275 |                 """,
276 |                 head=branch_name,
277 |                 base=default_branch
278 |             )
279 |             
280 |             logger.info(f"Created PR #{pr.number} in {repo_name} for dependency updates")
281 |             return True
282 |             
283 |         except Exception as e:
284 |             logger.error(f"Error updating dependencies in {repo_name}: {e}")
285 |             return False
286 |     
287 |     async def get_release_status(self, triggers: List[ReleaseTrigger]) -> Dict[str, str]:
288 |         """Get the status of release triggers."""
289 |         status = {}
290 |         
291 |         for trigger in triggers:
292 |             try:
293 |                 repo = self.github.get_repo(self.repositories[trigger.source_repo].full_name)
294 |                 workflows = repo.get_workflow_runs(
295 |                     head_sha=trigger.triggered_at.isoformat(),
296 |                     per_page=10
297 |                 )
298 |                 
299 |                 # Find the most recent workflow run
300 |                 latest_run = None
301 |                 for run in workflows:
302 |                     if run.created_at >= trigger.triggered_at:
303 |                         latest_run = run
304 |                         break
305 |                 
306 |                 if latest_run:
307 |                     status[trigger.source_repo] = latest_run.conclusion or latest_run.status
308 |                 else:
309 |                     status[trigger.source_repo] = "not_found"
310 |                     
311 |             except Exception as e:
312 |                 logger.error(f"Error getting status for {trigger.source_repo}: {e}")
313 |                 status[trigger.source_repo] = "error"
314 |         
315 |         return status
316 | 
317 | 
318 | class ReleaseCoordinator:
319 |     """Coordinates releases across the OpenSSL ecosystem."""
320 |     
321 |     def __init__(self, github_token: str):
322 |         self.orchestrator = FanOutOrchestrator(github_token)
323 |     
324 |     async def coordinate_foundation_release(self, version: str) -> Dict[str, any]:
325 |         """Coordinate a foundation layer release (openssl-conan-base or openssl-fips-policy)."""
326 |         logger.info(f"Coordinating foundation release: {version}")
327 |         
328 |         # Trigger tooling layer updates
329 |         tooling_triggers = await self.orchestrator.trigger_release_cascade(
330 |             "openssl-tools", version, ReleaseType.TOOLING
331 |         )
332 |         
333 |         return {
334 |             "foundation_version": version,
335 |             "tooling_triggers": tooling_triggers,
336 |             "status": "coordinated"
337 |         }
338 |     
339 |     async def coordinate_tooling_release(self, version: str) -> Dict[str, any]:
340 |         """Coordinate a tooling layer release (openssl-tools)."""
341 |         logger.info(f"Coordinating tooling release: {version}")
342 |         
343 |         # Trigger domain layer updates
344 |         domain_triggers = await self.orchestrator.trigger_release_cascade(
345 |             "openssl", version, ReleaseType.DOMAIN
346 |         )
347 |         
348 |         return {
349 |             "tooling_version": version,
350 |             "domain_triggers": domain_triggers,
351 |             "status": "coordinated"
352 |         }
353 |     
354 |     async def coordinate_domain_release(self, version: str) -> Dict[str, any]:
355 |         """Coordinate a domain layer release (openssl)."""
356 |         logger.info(f"Coordinating domain release: {version}")
357 |         
358 |         # Domain releases don't trigger other releases
359 |         # but we can update orchestration layer if needed
360 |         orchestration_triggers = await self.orchestrator.trigger_release_cascade(
361 |             "mcp-project-orchestrator", version, ReleaseType.ORCHESTRATION
362 |         )
363 |         
364 |         return {
365 |             "domain_version": version,
366 |             "orchestration_triggers": orchestration_triggers,
367 |             "status": "coordinated"
368 |         }
369 | 
370 | 
371 | async def main():
372 |     """Main orchestration function."""
373 |     # Configuration
374 |     GITHUB_TOKEN = "your-github-token"  # Should come from environment
375 |     
376 |     # Initialize coordinator
377 |     coordinator = ReleaseCoordinator(GITHUB_TOKEN)
378 |     
379 |     # Example: Coordinate a tooling release
380 |     result = await coordinator.coordinate_tooling_release("1.2.5")
381 |     logger.info(f"Release coordination result: {result}")
382 | 
383 | 
384 | if __name__ == "__main__":
385 |     logging.basicConfig(level=logging.INFO)
386 |     asyncio.run(main())
```

--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/tests/test_cursor_deployer.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Tests for Cursor configuration deployer.
  3 | 
  4 | This module contains tests for the CursorConfigDeployer class and related functionality.
  5 | """
  6 | 
  7 | import pytest
  8 | import tempfile
  9 | import shutil
 10 | from pathlib import Path
 11 | from unittest.mock import Mock, patch
 12 | 
 13 | from mcp_orchestrator.cursor_deployer import CursorConfigDeployer
 14 | from mcp_orchestrator.platform_detector import PlatformDetector
 15 | from mcp_orchestrator.cursor_config import CursorConfig
 16 | 
 17 | 
 18 | class TestCursorConfigDeployer:
 19 |     """Test cases for CursorConfigDeployer."""
 20 |     
 21 |     def setup_method(self):
 22 |         """Set up test fixtures."""
 23 |         self.temp_dir = tempfile.mkdtemp()
 24 |         self.repo_root = Path(self.temp_dir) / "test_repo"
 25 |         self.package_root = Path(self.temp_dir) / "test_package"
 26 |         
 27 |         # Create test repository
 28 |         self.repo_root.mkdir(parents=True)
 29 |         
 30 |         # Create test package structure
 31 |         self.package_root.mkdir(parents=True)
 32 |         (self.package_root / "cursor-rules" / "rules").mkdir(parents=True)
 33 |         (self.package_root / "cursor-rules" / "prompts").mkdir(parents=True)
 34 |         
 35 |         # Create test templates
 36 |         self._create_test_templates()
 37 |         
 38 |         # Create deployer
 39 |         self.deployer = CursorConfigDeployer(self.repo_root, self.package_root)
 40 |     
 41 |     def teardown_method(self):
 42 |         """Clean up test fixtures."""
 43 |         shutil.rmtree(self.temp_dir)
 44 |     
 45 |     def _create_test_templates(self):
 46 |         """Create test template files."""
 47 |         # Create shared rule template
 48 |         shared_template = self.package_root / "cursor-rules" / "rules" / "shared.mdc.jinja2"
 49 |         shared_template.write_text("""---
 50 | title: Shared Rules
 51 | description: Common rules for all platforms
 52 | created: {{ timestamp }}
 53 | platform: shared
 54 | user: {{ user }}
 55 | ---
 56 | 
 57 | # Shared Rules
 58 | 
 59 | This is a test shared rule template.
 60 | Platform: {{ os }}
 61 | User: {{ user }}
 62 | """)
 63 |         
 64 |         # Create Linux rule template
 65 |         linux_template = self.package_root / "cursor-rules" / "rules" / "linux-dev.mdc.jinja2"
 66 |         linux_template.write_text("""---
 67 | title: Linux Development Rules
 68 | description: Linux-specific development rules
 69 | created: {{ timestamp }}
 70 | platform: linux
 71 | user: {{ user }}
 72 | ---
 73 | 
 74 | # Linux Development Rules
 75 | 
 76 | This is a test Linux rule template.
 77 | OS: {{ os }}
 78 | Architecture: {{ architecture }}
 79 | """)
 80 |         
 81 |         # Create prompt template
 82 |         prompt_template = self.package_root / "cursor-rules" / "prompts" / "test-prompt.md.jinja2"
 83 |         prompt_template.write_text("""# Test Prompt
 84 | 
 85 | This is a test prompt template.
 86 | Platform: {{ os }}
 87 | User: {{ user }}
 88 | """)
 89 |         
 90 |         # Create MCP config template
 91 |         mcp_template = self.package_root / "cursor-rules" / "mcp.json.jinja2"
 92 |         mcp_template.write_text("""{
 93 |   "mcpServers": {
 94 |     "test-server": {
 95 |       "command": "{{ platform_detector.get_mcp_command() }}",
 96 |       "args": ["-y", "@test/mcp-server"],
 97 |       "env": {
 98 |         "PLATFORM": "{{ os }}",
 99 |         "USER": "{{ user }}"
100 |       }
101 |     }
102 |   }
103 | }
104 | """)
105 |     
106 |     def test_initialization(self):
107 |         """Test deployer initialization."""
108 |         assert self.deployer.repo_root == self.repo_root
109 |         assert self.deployer.package_root == self.package_root
110 |         assert self.deployer.cursor_dir == self.repo_root / ".cursor"
111 |         assert self.deployer.templates_dir == self.package_root / "cursor-rules"
112 |     
113 |     def test_detect_platform(self):
114 |         """Test platform detection."""
115 |         platform_info = self.deployer.detect_platform()
116 |         
117 |         assert "os" in platform_info
118 |         assert "architecture" in platform_info
119 |         assert "python_version" in platform_info
120 |         assert "user" in platform_info
121 |         assert "home" in platform_info
122 |         assert "is_ci" in platform_info
123 |     
124 |     def test_deploy_basic(self):
125 |         """Test basic deployment."""
126 |         self.deployer.deploy()
127 |         
128 |         # Check that .cursor directory was created
129 |         assert self.deployer.cursor_dir.exists()
130 |         assert (self.deployer.cursor_dir / "rules").exists()
131 |         assert (self.deployer.cursor_dir / "prompts").exists()
132 |         
133 |         # Check that shared rule was deployed
134 |         shared_rule = self.deployer.cursor_dir / "rules" / "shared.mdc"
135 |         assert shared_rule.exists()
136 |         
137 |         # Check that platform-specific rule was deployed
138 |         platform_info = self.deployer.detect_platform()
139 |         os_name = platform_info["os"]
140 |         platform_rule = self.deployer.cursor_dir / "rules" / f"{os_name}-dev.mdc"
141 |         assert platform_rule.exists()
142 |         
143 |         # Check that prompt was deployed
144 |         prompt_file = self.deployer.cursor_dir / "prompts" / "test-prompt.md"
145 |         assert prompt_file.exists()
146 |         
147 |         # Check that MCP config was deployed
148 |         mcp_config = self.deployer.cursor_dir / "mcp.json"
149 |         assert mcp_config.exists()
150 |     
151 |     def test_deploy_with_custom_rules(self):
152 |         """Test deployment with custom rules."""
153 |         # Create custom rule file
154 |         custom_rule = Path(self.temp_dir) / "custom-rule.mdc"
155 |         custom_rule.write_text("""---
156 | title: Custom Rule
157 | description: A custom rule for testing
158 | ---
159 | 
160 | # Custom Rule
161 | 
162 | This is a custom rule.
163 | """)
164 |         
165 |         # Deploy with custom rules
166 |         self.deployer.deploy(custom_rules=[custom_rule])
167 |         
168 |         # Check that custom rule was imported
169 |         custom_dir = self.deployer.cursor_dir / "rules" / "custom"
170 |         assert custom_dir.exists()
171 |         
172 |         imported_rule = custom_dir / "custom-rule.mdc"
173 |         assert imported_rule.exists()
174 |         assert imported_rule.read_text() == custom_rule.read_text()
175 |     
176 |     def test_deploy_opt_out(self):
177 |         """Test deployment opt-out."""
178 |         # Deploy with opt-out
179 |         self.deployer.deploy(opt_out=True)
180 |         
181 |         # Check that .cursor directory was not created
182 |         assert not self.deployer.cursor_dir.exists()
183 |     
184 |     def test_deploy_force(self):
185 |         """Test deployment with force flag."""
186 |         # Deploy once
187 |         self.deployer.deploy()
188 |         assert self.deployer.cursor_dir.exists()
189 |         
190 |         # Deploy again with force
191 |         self.deployer.deploy(force=True)
192 |         assert self.deployer.cursor_dir.exists()
193 |     
194 |     def test_deploy_existing_without_force(self):
195 |         """Test deployment when .cursor already exists without force."""
196 |         # Deploy once
197 |         self.deployer.deploy()
198 |         assert self.deployer.cursor_dir.exists()
199 |         
200 |         # Try to deploy again without force (should not overwrite)
201 |         with patch('builtins.print') as mock_print:
202 |             self.deployer.deploy(force=False)
203 |             mock_print.assert_called_with("ℹ️  .cursor/ already exists. Use --force to overwrite.")
204 |     
205 |     def test_show_status(self):
206 |         """Test status display."""
207 |         # Deploy configuration
208 |         self.deployer.deploy()
209 |         
210 |         # Show status
211 |         with patch('builtins.print') as mock_print:
212 |             self.deployer.show_status()
213 |             # Check that status was printed
214 |             assert mock_print.call_count > 0
215 |     
216 |     def test_dry_run(self):
217 |         """Test dry run mode."""
218 |         with patch('builtins.print') as mock_print:
219 |             self.deployer.dry_run()
220 |             # Check that dry run information was printed
221 |             assert mock_print.call_count > 0
222 |     
223 |     def test_render_template(self):
224 |         """Test template rendering."""
225 |         # Create a simple template
226 |         template_path = self.package_root / "cursor-rules" / "test-template.jinja2"
227 |         template_path.write_text("Hello {{ user }} from {{ os }}!")
228 |         
229 |         # Render template
230 |         output_path = self.deployer.cursor_dir / "test-output.txt"
231 |         self.deployer.cursor_dir.mkdir(parents=True)
232 |         
233 |         platform_info = self.deployer.detect_platform()
234 |         self.deployer._render_template(
235 |             "test-template.jinja2",
236 |             output_path,
237 |             platform_info
238 |         )
239 |         
240 |         # Check output
241 |         assert output_path.exists()
242 |         content = output_path.read_text()
243 |         assert "Hello" in content
244 |         assert platform_info["user"] in content
245 |         assert platform_info["os"] in content
246 | 
247 | 
248 | class TestPlatformDetector:
249 |     """Test cases for PlatformDetector."""
250 |     
251 |     def test_detect_platform(self):
252 |         """Test platform detection."""
253 |         detector = PlatformDetector()
254 |         platform_info = detector.detect_platform()
255 |         
256 |         assert "os" in platform_info
257 |         assert "architecture" in platform_info
258 |         assert "python_version" in platform_info
259 |         assert "user" in platform_info
260 |         assert "home" in platform_info
261 |         assert "is_ci" in platform_info
262 |     
263 |     def test_get_rule_template_name(self):
264 |         """Test rule template name selection."""
265 |         detector = PlatformDetector()
266 |         template_name = detector.get_rule_template_name()
267 |         
268 |         assert template_name in ["linux-dev", "macos-dev", "windows-dev", "ci-linux"]
269 |     
270 |     def test_get_mcp_command(self):
271 |         """Test MCP command selection."""
272 |         detector = PlatformDetector()
273 |         command = detector.get_mcp_command()
274 |         
275 |         assert command in ["npx", "npx.cmd"]
276 |     
277 |     def test_is_development_environment(self):
278 |         """Test development environment detection."""
279 |         detector = PlatformDetector()
280 |         is_dev = detector.is_development_environment()
281 |         
282 |         assert isinstance(is_dev, bool)
283 |     
284 |     def test_get_conan_home(self):
285 |         """Test Conan home directory detection."""
286 |         detector = PlatformDetector()
287 |         conan_home = detector.get_conan_home()
288 |         
289 |         assert isinstance(conan_home, str)
290 |         assert len(conan_home) > 0
291 | 
292 | 
293 | class TestCursorConfig:
294 |     """Test cases for CursorConfig."""
295 |     
296 |     def setup_method(self):
297 |         """Set up test fixtures."""
298 |         self.temp_dir = tempfile.mkdtemp()
299 |         self.cursor_dir = Path(self.temp_dir) / ".cursor"
300 |         self.cursor_config = CursorConfig(self.cursor_dir)
301 |     
302 |     def teardown_method(self):
303 |         """Clean up test fixtures."""
304 |         shutil.rmtree(self.temp_dir)
305 |     
306 |     def test_create_directory_structure(self):
307 |         """Test directory structure creation."""
308 |         self.cursor_config.create_directory_structure()
309 |         
310 |         assert self.cursor_dir.exists()
311 |         assert (self.cursor_dir / "rules").exists()
312 |         assert (self.cursor_dir / "prompts").exists()
313 |         assert (self.cursor_dir / "rules" / "custom").exists()
314 |     
315 |     def test_write_rule(self):
316 |         """Test rule writing."""
317 |         from mcp_orchestrator.cursor_config import CursorRule
318 |         
319 |         rule = CursorRule(
320 |             title="Test Rule",
321 |             description="A test rule",
322 |             platform="test",
323 |             content="# Test Rule\n\nThis is a test rule.",
324 |             created="2024-01-01T00:00:00",
325 |             user="testuser"
326 |         )
327 |         
328 |         self.cursor_config.create_directory_structure()
329 |         self.cursor_config.write_rule(rule, "test-rule")
330 |         
331 |         rule_file = self.cursor_dir / "rules" / "test-rule.mdc"
332 |         assert rule_file.exists()
333 |         
334 |         content = rule_file.read_text()
335 |         assert "Test Rule" in content
336 |         assert "testuser" in content
337 |     
338 |     def test_write_prompt(self):
339 |         """Test prompt writing."""
340 |         self.cursor_config.create_directory_structure()
341 |         self.cursor_config.write_prompt("Test Prompt", "This is a test prompt.", "test-prompt")
342 |         
343 |         prompt_file = self.cursor_dir / "prompts" / "test-prompt.md"
344 |         assert prompt_file.exists()
345 |         
346 |         content = prompt_file.read_text()
347 |         assert "# Test Prompt" in content
348 |         assert "This is a test prompt." in content
349 |     
350 |     def test_write_mcp_config(self):
351 |         """Test MCP configuration writing."""
352 |         from mcp_orchestrator.cursor_config import MCPServerConfig
353 |         
354 |         servers = [
355 |             MCPServerConfig(
356 |                 name="test-server",
357 |                 command="npx",
358 |                 args=["-y", "@test/server"],
359 |                 env={"PLATFORM": "test"},
360 |                 disabled=False
361 |             )
362 |         ]
363 |         
364 |         self.cursor_config.create_directory_structure()
365 |         self.cursor_config.write_mcp_config(servers)
366 |         
367 |         mcp_file = self.cursor_dir / "mcp.json"
368 |         assert mcp_file.exists()
369 |         
370 |         import json
371 |         config = json.loads(mcp_file.read_text())
372 |         assert "mcpServers" in config
373 |         assert "test-server" in config["mcpServers"]
374 |     
375 |     def test_create_gitignore(self):
376 |         """Test .gitignore creation."""
377 |         self.cursor_config.create_directory_structure()
378 |         self.cursor_config.create_gitignore()
379 |         
380 |         gitignore_file = self.cursor_dir / ".gitignore"
381 |         assert gitignore_file.exists()
382 |         
383 |         content = gitignore_file.read_text()
384 |         assert "rules/custom/" in content
385 |         assert "*.log" in content
386 |     
387 |     def test_get_existing_rules(self):
388 |         """Test getting existing rules."""
389 |         self.cursor_config.create_directory_structure()
390 |         
391 |         # Create test rule files
392 |         (self.cursor_dir / "rules" / "rule1.mdc").write_text("Rule 1")
393 |         (self.cursor_dir / "rules" / "rule2.mdc").write_text("Rule 2")
394 |         
395 |         rules = self.cursor_config.get_existing_rules()
396 |         assert "rule1" in rules
397 |         assert "rule2" in rules
398 |     
399 |     def test_get_existing_prompts(self):
400 |         """Test getting existing prompts."""
401 |         self.cursor_config.create_directory_structure()
402 |         
403 |         # Create test prompt files
404 |         (self.cursor_dir / "prompts" / "prompt1.md").write_text("Prompt 1")
405 |         (self.cursor_dir / "prompts" / "prompt2.md").write_text("Prompt 2")
406 |         
407 |         prompts = self.cursor_config.get_existing_prompts()
408 |         assert "prompt1" in prompts
409 |         assert "prompt2" in prompts
410 |     
411 |     def test_has_mcp_config(self):
412 |         """Test MCP configuration detection."""
413 |         self.cursor_config.create_directory_structure()
414 |         
415 |         # Initially no MCP config
416 |         assert not self.cursor_config.has_mcp_config()
417 |         
418 |         # Create MCP config
419 |         (self.cursor_dir / "mcp.json").write_text('{"test": "config"}')
420 |         assert self.cursor_config.has_mcp_config()
421 |     
422 |     def test_is_configured(self):
423 |         """Test configuration detection."""
424 |         # Initially not configured
425 |         assert not self.cursor_config.is_configured()
426 |         
427 |         # Create directory structure
428 |         self.cursor_config.create_directory_structure()
429 |         
430 |         # Still not configured (no rules or prompts)
431 |         assert not self.cursor_config.is_configured()
432 |         
433 |         # Add a rule
434 |         (self.cursor_dir / "rules" / "test.mdc").write_text("Test rule")
435 |         assert self.cursor_config.is_configured()
```

--------------------------------------------------------------------------------
/REFACTORING_RECOMMENDATIONS.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Refactoring Recommendations
  2 | 
  3 | ## Executive Summary
  4 | 
  5 | This document outlines recommended refactoring and improvement opportunities for the MCP Project Orchestrator. The codebase is functional with all tests passing, but these improvements will enhance maintainability, performance, and extensibility.
  6 | 
  7 | ## 🎯 Priority Matrix
  8 | 
  9 | | Priority | Impact | Effort | Recommendation |
 10 | |----------|---------|--------|----------------|
 11 | | P0 | High | Low | Config naming consolidation |
 12 | | P0 | High | Medium | Test coverage increase |
 13 | | P1 | High | Medium | Manager abstraction |
 14 | | P1 | Medium | Low | Error handling improvements |
 15 | | P2 | Medium | Medium | Plugin system |
 16 | | P2 | Low | Low | Code documentation |
 17 | | P3 | Medium | High | Performance optimization |
 18 | 
 19 | ## 🔧 Critical Refactorings (P0)
 20 | 
 21 | ### 1. Configuration Naming Consolidation
 22 | 
 23 | **Problem**: Inconsistent naming between `Config` and `MCPConfig`
 24 | - Tests import `MCPConfig`
 25 | - Some modules expect `Config`
 26 | - Creates confusion and maintenance burden
 27 | 
 28 | **Solution**:
 29 | ```python
 30 | # Standardize on MCPConfig everywhere
 31 | # Update core/__init__.py
 32 | from .config import MCPConfig as Config, MCPConfig
 33 | 
 34 | # Update all imports to use MCPConfig consistently
 35 | # OR rename MCPConfig to Config in config.py
 36 | ```
 37 | 
 38 | **Benefits**:
 39 | - Single source of truth
 40 | - Clearer imports
 41 | - Easier to understand
 42 | 
 43 | **Estimated Effort**: 2 hours
 44 | **Breaking Changes**: Minimal (alias preserves backward compatibility)
 45 | 
 46 | ### 2. Test Coverage Improvement
 47 | 
 48 | **Current**: 27% overall coverage
 49 | **Target**: 80%+ coverage
 50 | 
 51 | **Focus Areas**:
 52 | ```
 53 | Priority modules to test:
 54 | 1. prompt_manager/manager.py (32% → 80%)
 55 | 2. mermaid/generator.py (24% → 80%)
 56 | 3. mermaid/renderer.py (43% → 80%)
 57 | 4. core/config.py (61% → 85%)
 58 | ```
 59 | 
 60 | **Approach**:
 61 | ```python
 62 | # Add tests for:
 63 | # 1. Manager async methods
 64 | # 2. Edge cases and error conditions
 65 | # 3. Integration scenarios
 66 | # 4. Complex diagram generation
 67 | 
 68 | # Example new test:
 69 | def test_prompt_manager_async_operations(prompt_manager):
 70 |     """Test async prompt loading and caching."""
 71 |     # Test async load
 72 |     # Test cache behavior
 73 |     # Test concurrent access
 74 | ```
 75 | 
 76 | **Estimated Effort**: 1-2 weeks
 77 | **Benefits**: Better reliability, easier refactoring, confidence in changes
 78 | 
 79 | ## 🏗️ Structural Improvements (P1)
 80 | 
 81 | ### 3. Abstract Manager Base Class
 82 | 
 83 | **Problem**: PromptManager and TemplateManager have duplicate patterns
 84 | 
 85 | **Current Structure**:
 86 | ```python
 87 | # templates/__init__.py
 88 | class TemplateManager:
 89 |     def __init__(self, templates_dir): ...
 90 |     def discover_templates(self): ...
 91 |     def list_templates(self, filter): ...
 92 |     def get_template(self, name): ...
 93 | 
 94 | # prompt_manager/manager.py
 95 | class PromptManager:
 96 |     def __init__(self, config): ...
 97 |     def discover_prompts(self): ...
 98 |     def list_prompts(self, category): ...
 99 |     def get_prompt(self, name): ...
100 | ```
101 | 
102 | **Proposed Solution**:
103 | ```python
104 | # core/managers.py
105 | from abc import ABC, abstractmethod
106 | from typing import Generic, TypeVar, List, Optional
107 | 
108 | T = TypeVar('T')
109 | 
110 | class BaseResourceManager(ABC, Generic[T]):
111 |     """Abstract base class for resource managers."""
112 |     
113 |     def __init__(self, base_dir: Path):
114 |         self.base_dir = base_dir
115 |         self._resources: Dict[str, T] = {}
116 |     
117 |     @abstractmethod
118 |     def discover_resources(self) -> None:
119 |         """Discover and load resources from base directory."""
120 |         pass
121 |     
122 |     @abstractmethod
123 |     def validate_resource(self, resource: T) -> bool:
124 |         """Validate a resource."""
125 |         pass
126 |     
127 |     def list_resources(self, **filters) -> List[str]:
128 |         """List resources matching filters."""
129 |         return list(self._resources.keys())
130 |     
131 |     def get_resource(self, name: str) -> Optional[T]:
132 |         """Get a resource by name."""
133 |         return self._resources.get(name)
134 |     
135 |     def save_resource(self, name: str, resource: T) -> None:
136 |         """Save a resource."""
137 |         if not self.validate_resource(resource):
138 |             raise ValueError(f"Invalid resource: {name}")
139 |         self._resources[name] = resource
140 | 
141 | # Usage:
142 | class TemplateManager(BaseResourceManager[BaseTemplate]):
143 |     def discover_resources(self):
144 |         # Template-specific discovery
145 |         pass
146 |     
147 |     def validate_resource(self, resource):
148 |         return resource.validate()
149 | ```
150 | 
151 | **Benefits**:
152 | - DRY principle
153 | - Consistent API
154 | - Easier to add new managers
155 | - Shared testing utilities
156 | 
157 | **Estimated Effort**: 1 week
158 | **Breaking Changes**: Minimal (preserve existing public APIs)
159 | 
160 | ### 4. Enhanced Error Handling
161 | 
162 | **Current Issues**:
163 | - Generic exceptions lose context
164 | - No error codes for programmatic handling
165 | - Limited debugging information
166 | 
167 | **Proposed Solution**:
168 | ```python
169 | # core/exceptions.py
170 | from enum import Enum
171 | from typing import Optional, Dict, Any
172 | 
173 | class ErrorCode(Enum):
174 |     """Standard error codes for MCP operations."""
175 |     TEMPLATE_NOT_FOUND = "E001"
176 |     TEMPLATE_INVALID = "E002"
177 |     VARIABLE_MISSING = "E003"
178 |     DIAGRAM_INVALID = "E004"
179 |     IO_ERROR = "E005"
180 |     CONFIG_ERROR = "E006"
181 | 
182 | class MCPError(Exception):
183 |     """Enhanced MCP exception with context."""
184 |     
185 |     def __init__(
186 |         self,
187 |         message: str,
188 |         code: ErrorCode,
189 |         details: Optional[Dict[str, Any]] = None,
190 |         cause: Optional[Exception] = None
191 |     ):
192 |         super().__init__(message)
193 |         self.code = code
194 |         self.details = details or {}
195 |         self.cause = cause
196 |     
197 |     def to_dict(self) -> Dict[str, Any]:
198 |         """Convert to dictionary for serialization."""
199 |         return {
200 |             "message": str(self),
201 |             "code": self.code.value,
202 |             "details": self.details,
203 |             "cause": str(self.cause) if self.cause else None
204 |         }
205 | 
206 | class TemplateNotFoundError(MCPError):
207 |     """Template not found."""
208 |     def __init__(self, name: str):
209 |         super().__init__(
210 |             f"Template not found: {name}",
211 |             ErrorCode.TEMPLATE_NOT_FOUND,
212 |             {"template_name": name}
213 |         )
214 | 
215 | # Usage:
216 | def get_template(self, name: str) -> Template:
217 |     if name not in self._templates:
218 |         raise TemplateNotFoundError(name)
219 |     return self._templates[name]
220 | ```
221 | 
222 | **Benefits**:
223 | - Better debugging
224 | - Programmatic error handling
225 | - Detailed error reports
226 | - Error tracking/monitoring
227 | 
228 | **Estimated Effort**: 3-4 days
229 | 
230 | ## 🚀 Feature Enhancements (P2)
231 | 
232 | ### 5. Plugin System
233 | 
234 | **Goal**: Allow external plugins for templates, diagrams, and tools
235 | 
236 | **Architecture**:
237 | ```python
238 | # core/plugins.py
239 | from typing import Protocol, List
240 | from abc import abstractmethod
241 | 
242 | class TemplateProvider(Protocol):
243 |     """Protocol for template providers."""
244 |     
245 |     @abstractmethod
246 |     def list_templates(self) -> List[str]:
247 |         """List available templates."""
248 |         ...
249 |     
250 |     @abstractmethod
251 |     def get_template(self, name: str) -> BaseTemplate:
252 |         """Get a template by name."""
253 |         ...
254 | 
255 | class PluginRegistry:
256 |     """Central plugin registry."""
257 |     
258 |     def __init__(self):
259 |         self._template_providers: List[TemplateProvider] = []
260 |         self._diagram_renderers: List[DiagramRenderer] = []
261 |     
262 |     def register_template_provider(self, provider: TemplateProvider):
263 |         """Register a template provider plugin."""
264 |         self._template_providers.append(provider)
265 |     
266 |     def discover_plugins(self):
267 |         """Discover plugins using entry points."""
268 |         import importlib.metadata
269 |         
270 |         for entry_point in importlib.metadata.entry_points().select(
271 |             group='mcp_orchestrator.plugins'
272 |         ):
273 |             plugin = entry_point.load()
274 |             plugin.register(self)
275 | 
276 | # pyproject.toml
277 | [project.entry-points."mcp_orchestrator.plugins"]
278 | my_plugin = "my_package.plugin:register"
279 | ```
280 | 
281 | **Benefits**:
282 | - Extensibility without core changes
283 | - Third-party integrations
284 | - Community contributions
285 | - Isolated plugin failures
286 | 
287 | **Estimated Effort**: 2 weeks
288 | 
289 | ### 6. Event System
290 | 
291 | **Goal**: Decouple components with event-driven architecture
292 | 
293 | **Implementation**:
294 | ```python
295 | # core/events.py
296 | from dataclasses import dataclass
297 | from typing import Callable, List, Any
298 | from enum import Enum
299 | 
300 | class EventType(Enum):
301 |     TEMPLATE_APPLIED = "template.applied"
302 |     PROJECT_CREATED = "project.created"
303 |     DIAGRAM_GENERATED = "diagram.generated"
304 |     PROMPT_RENDERED = "prompt.rendered"
305 | 
306 | @dataclass
307 | class Event:
308 |     """Base event class."""
309 |     type: EventType
310 |     data: Any
311 |     source: str
312 | 
313 | class EventBus:
314 |     """Simple event bus for pub/sub."""
315 |     
316 |     def __init__(self):
317 |         self._listeners: Dict[EventType, List[Callable]] = {}
318 |     
319 |     def subscribe(self, event_type: EventType, handler: Callable):
320 |         """Subscribe to an event."""
321 |         if event_type not in self._listeners:
322 |             self._listeners[event_type] = []
323 |         self._listeners[event_type].append(handler)
324 |     
325 |     def publish(self, event: Event):
326 |         """Publish an event."""
327 |         for handler in self._listeners.get(event.type, []):
328 |             try:
329 |                 handler(event)
330 |             except Exception as e:
331 |                 # Log but don't fail
332 |                 logger.error(f"Event handler failed: {e}")
333 | 
334 | # Usage:
335 | event_bus = EventBus()
336 | 
337 | # Subscribe
338 | def on_template_applied(event: Event):
339 |     logger.info(f"Template applied: {event.data['name']}")
340 | 
341 | event_bus.subscribe(EventType.TEMPLATE_APPLIED, on_template_applied)
342 | 
343 | # Publish
344 | event_bus.publish(Event(
345 |     type=EventType.TEMPLATE_APPLIED,
346 |     data={"name": "fastapi-project"},
347 |     source="TemplateManager"
348 | ))
349 | ```
350 | 
351 | **Benefits**:
352 | - Loose coupling
353 | - Extensible workflows
354 | - Audit logging
355 | - Monitoring hooks
356 | 
357 | **Estimated Effort**: 1 week
358 | 
359 | ## 📊 Performance Optimizations (P3)
360 | 
361 | ### 7. Caching Strategy
362 | 
363 | **Current**: Minimal caching, repeated file I/O
364 | 
365 | **Proposed**:
366 | ```python
367 | # core/cache.py
368 | from functools import lru_cache, wraps
369 | from typing import Callable, Any
370 | import hashlib
371 | import pickle
372 | from pathlib import Path
373 | 
374 | class FileCache:
375 |     """File-backed cache for expensive operations."""
376 |     
377 |     def __init__(self, cache_dir: Path):
378 |         self.cache_dir = cache_dir
379 |         self.cache_dir.mkdir(parents=True, exist_ok=True)
380 |     
381 |     def get(self, key: str) -> Any:
382 |         """Get cached value."""
383 |         cache_file = self.cache_dir / self._hash_key(key)
384 |         if cache_file.exists():
385 |             with open(cache_file, 'rb') as f:
386 |                 return pickle.load(f)
387 |         return None
388 |     
389 |     def set(self, key: str, value: Any):
390 |         """Set cached value."""
391 |         cache_file = self.cache_dir / self._hash_key(key)
392 |         with open(cache_file, 'wb') as f:
393 |             pickle.dump(value, f)
394 |     
395 |     def _hash_key(self, key: str) -> str:
396 |         """Hash key for filename."""
397 |         return hashlib.sha256(key.encode()).hexdigest()
398 | 
399 | def cached_property(func: Callable) -> property:
400 |     """Cached property decorator."""
401 |     attr_name = f'_cached_{func.__name__}'
402 |     
403 |     @wraps(func)
404 |     def wrapper(self):
405 |         if not hasattr(self, attr_name):
406 |             setattr(self, attr_name, func(self))
407 |         return getattr(self, attr_name)
408 |     
409 |     return property(wrapper)
410 | 
411 | # Usage:
412 | class TemplateManager:
413 |     @cached_property
414 |     def available_templates(self) -> List[str]:
415 |         """Cached list of templates."""
416 |         return self._discover_templates()
417 | ```
418 | 
419 | **Estimated Effort**: 3-4 days
420 | 
421 | ### 8. Async Operations
422 | 
423 | **Current**: Synchronous I/O blocks
424 | 
425 | **Proposed**:
426 | ```python
427 | # Use async for I/O-bound operations
428 | import asyncio
429 | import aiofiles
430 | 
431 | class AsyncTemplateManager:
432 |     async def load_template(self, name: str) -> Template:
433 |         """Load template asynchronously."""
434 |         path = self.templates_dir / f"{name}.json"
435 |         async with aiofiles.open(path) as f:
436 |             content = await f.read()
437 |             return Template.from_json(content)
438 |     
439 |     async def load_all_templates(self) -> List[Template]:
440 |         """Load all templates concurrently."""
441 |         template_files = list(self.templates_dir.glob("*.json"))
442 |         tasks = [self.load_template(f.stem) for f in template_files]
443 |         return await asyncio.gather(*tasks)
444 | ```
445 | 
446 | **Estimated Effort**: 1 week
447 | 
448 | ## 🧹 Code Quality Improvements
449 | 
450 | ### 9. Type Hints Enhancement
451 | 
452 | **Current**: Basic type hints
453 | **Target**: Comprehensive type coverage
454 | 
455 | ```python
456 | # Use Protocol for duck typing
457 | from typing import Protocol
458 | 
459 | class Renderable(Protocol):
460 |     """Protocol for renderable objects."""
461 |     def render(self, context: Dict[str, Any]) -> str: ...
462 | 
463 | def render_template(template: Renderable, context: Dict[str, Any]) -> str:
464 |     return template.render(context)
465 | 
466 | # Use Generic types
467 | from typing import Generic, TypeVar
468 | 
469 | T = TypeVar('T', bound=BaseTemplate)
470 | 
471 | class TemplateRegistry(Generic[T]):
472 |     def __init__(self):
473 |         self._templates: Dict[str, T] = {}
474 |     
475 |     def register(self, name: str, template: T) -> None:
476 |         self._templates[name] = template
477 |     
478 |     def get(self, name: str) -> Optional[T]:
479 |         return self._templates.get(name)
480 | ```
481 | 
482 | ### 10. Documentation Generation
483 | 
484 | **Setup Sphinx for API docs**:
485 | 
486 | ```bash
487 | # Install Sphinx
488 | pip install sphinx sphinx-rtd-theme sphinx-autodoc-typehints
489 | 
490 | # Generate docs
491 | sphinx-quickstart docs
492 | sphinx-apidoc -o docs/api src/mcp_project_orchestrator
493 | sphinx-build -b html docs docs/_build
494 | ```
495 | 
496 | **Configuration**:
497 | ```python
498 | # docs/conf.py
499 | import os
500 | import sys
501 | sys.path.insert(0, os.path.abspath('../src'))
502 | 
503 | extensions = [
504 |     'sphinx.ext.autodoc',
505 |     'sphinx.ext.napoleon',
506 |     'sphinx.ext.viewcode',
507 |     'sphinx_autodoc_typehints',
508 | ]
509 | 
510 | html_theme = 'sphinx_rtd_theme'
511 | ```
512 | 
513 | ## 📋 Implementation Roadmap
514 | 
515 | ### Phase 1: Foundation (Week 1-2)
516 | - [ ] Config naming consolidation
517 | - [ ] Test coverage to 50%
518 | - [ ] Enhanced error handling
519 | 
520 | ### Phase 2: Structure (Week 3-4)
521 | - [ ] Abstract manager base class
522 | - [ ] Test coverage to 65%
523 | - [ ] Event system basics
524 | 
525 | ### Phase 3: Features (Week 5-7)
526 | - [ ] Plugin system
527 | - [ ] Test coverage to 80%
528 | - [ ] Caching implementation
529 | 
530 | ### Phase 4: Polish (Week 8)
531 | - [ ] Documentation generation
532 | - [ ] Performance profiling
533 | - [ ] Final testing and cleanup
534 | 
535 | ## 🎯 Success Metrics
536 | 
537 | ### Before Refactoring
538 | - Test Coverage: 27%
539 | - Modules: 36
540 | - Code Smells: Medium
541 | - Maintainability: Good
542 | 
543 | ### After Refactoring (Target)
544 | - Test Coverage: 80%+
545 | - Modules: ~40 (well-organized)
546 | - Code Smells: Low
547 | - Maintainability: Excellent
548 | - Performance: 2x faster template operations
549 | - Plugin Ecosystem: 3+ community plugins
550 | 
551 | ## 🔍 Code Review Checklist
552 | 
553 | For each refactoring:
554 | - [ ] Tests updated and passing
555 | - [ ] Documentation updated
556 | - [ ] Type hints complete
557 | - [ ] No breaking changes (or documented)
558 | - [ ] Performance not regressed
559 | - [ ] Security considered
560 | - [ ] Accessibility maintained
561 | - [ ] CI/CD passing
562 | 
563 | ## 📚 References
564 | 
565 | - [Python Design Patterns](https://refactoring.guru/design-patterns/python)
566 | - [Effective Python](https://effectivepython.com/)
567 | - [Python Type Hints PEP 484](https://peps.python.org/pep-0484/)
568 | - [Plugin Architecture in Python](https://realpython.com/python-application-layouts/)
569 | 
570 | ---
571 | 
572 | **Last Updated**: 2025-10-01  
573 | **Maintainer**: MCP Project Orchestrator Team
574 | 
```

--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/ecosystem_monitor.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | OpenSSL Ecosystem Workflow Monitor
  3 | 
  4 | Monitors workflow runs across all OpenSSL repositories and provides
  5 | AI-assisted failure analysis and automated issue creation.
  6 | """
  7 | 
  8 | import asyncio
  9 | import json
 10 | import logging
 11 | from datetime import datetime, timedelta
 12 | from typing import Dict, List, Optional, Tuple
 13 | from dataclasses import dataclass
 14 | from enum import Enum
 15 | 
 16 | import httpx
 17 | from github import Github
 18 | from github.WorkflowRun import WorkflowRun
 19 | from github.Repository import Repository
 20 | 
 21 | logger = logging.getLogger(__name__)
 22 | 
 23 | 
 24 | class FailureType(Enum):
 25 |     """Types of workflow failures for classification."""
 26 |     BUILD_ERROR = "build_error"
 27 |     TEST_FAILURE = "test_failure"
 28 |     SECURITY_SCAN = "security_scan"
 29 |     DEPENDENCY_ISSUE = "dependency_issue"
 30 |     TIMEOUT = "timeout"
 31 |     PERMISSION_DENIED = "permission_denied"
 32 |     CACHE_ISSUE = "cache_issue"
 33 |     UNKNOWN = "unknown"
 34 | 
 35 | 
 36 | @dataclass
 37 | class WorkflowFailure:
 38 |     """Represents a workflow failure with context."""
 39 |     repository: str
 40 |     workflow_name: str
 41 |     run_id: int
 42 |     failure_type: FailureType
 43 |     error_message: str
 44 |     failed_at: datetime
 45 |     duration: int
 46 |     platform: Optional[str] = None
 47 |     branch: Optional[str] = None
 48 |     commit_sha: Optional[str] = None
 49 |     actor: Optional[str] = None
 50 | 
 51 | 
 52 | @dataclass
 53 | class FailurePattern:
 54 |     """Represents a recurring failure pattern."""
 55 |     failure_type: FailureType
 56 |     repositories: List[str]
 57 |     frequency: int
 58 |     first_seen: datetime
 59 |     last_seen: datetime
 60 |     common_error: str
 61 |     suggested_fix: str
 62 | 
 63 | 
 64 | class EcosystemMonitor:
 65 |     """Monitors OpenSSL ecosystem workflows and analyzes failures."""
 66 |     
 67 |     def __init__(self, github_token: str, repositories: List[str]):
 68 |         self.github = Github(github_token)
 69 |         self.repositories = repositories
 70 |         self.failure_patterns: Dict[str, FailurePattern] = {}
 71 |         
 72 |     async def monitor_workflows(self, hours_back: int = 4) -> List[WorkflowFailure]:
 73 |         """Monitor workflows across all repositories for failures."""
 74 |         failures = []
 75 |         cutoff_time = datetime.utcnow() - timedelta(hours=hours_back)
 76 |         
 77 |         for repo_name in self.repositories:
 78 |             try:
 79 |                 repo = self.github.get_repo(repo_name)
 80 |                 workflow_runs = repo.get_workflow_runs(
 81 |                     status="completed",
 82 |                     created=f">={cutoff_time.isoformat()}"
 83 |                 )
 84 |                 
 85 |                 for run in workflow_runs:
 86 |                     if run.conclusion == "failure":
 87 |                         failure = await self._analyze_failure(run, repo_name)
 88 |                         if failure:
 89 |                             failures.append(failure)
 90 |                             
 91 |             except Exception as e:
 92 |                 logger.error(f"Error monitoring {repo_name}: {e}")
 93 |                 
 94 |         return failures
 95 |     
 96 |     async def _analyze_failure(self, run: WorkflowRun, repo_name: str) -> Optional[WorkflowFailure]:
 97 |         """Analyze a failed workflow run and classify the failure."""
 98 |         try:
 99 |             # Get workflow run details
100 |             run_details = run.get_workflow_run()
101 |             jobs = run_details.get_jobs()
102 |             
103 |             # Find the failed job
104 |             failed_job = None
105 |             for job in jobs:
106 |                 if job.conclusion == "failure":
107 |                     failed_job = job
108 |                     break
109 |             
110 |             if not failed_job:
111 |                 return None
112 |                 
113 |             # Get job logs
114 |             logs = failed_job.get_logs()
115 |             error_message = self._extract_error_message(logs)
116 |             
117 |             # Classify failure type
118 |             failure_type = self._classify_failure(error_message, failed_job.name)
119 |             
120 |             return WorkflowFailure(
121 |                 repository=repo_name,
122 |                 workflow_name=run.name,
123 |                 run_id=run.id,
124 |                 failure_type=failure_type,
125 |                 error_message=error_message,
126 |                 failed_at=run.created_at,
127 |                 duration=run.run_duration_ms or 0,
128 |                 platform=self._extract_platform(failed_job.name),
129 |                 branch=run.head_branch,
130 |                 commit_sha=run.head_sha,
131 |                 actor=run.actor.login if run.actor else None
132 |             )
133 |             
134 |         except Exception as e:
135 |             logger.error(f"Error analyzing failure in {repo_name} run {run.id}: {e}")
136 |             return None
137 |     
138 |     def _extract_error_message(self, logs: str) -> str:
139 |         """Extract the most relevant error message from job logs."""
140 |         lines = logs.split('\n')
141 |         
142 |         # Look for common error patterns
143 |         error_indicators = [
144 |             "ERROR:", "FAILED:", "Exception:", "Error:", 
145 |             "fatal:", "error:", "failed:", "FAIL:"
146 |         ]
147 |         
148 |         for line in reversed(lines):
149 |             if any(indicator in line for indicator in error_indicators):
150 |                 return line.strip()
151 |                 
152 |         # Fallback to last few lines
153 |         return '\n'.join(lines[-5:]).strip()
154 |     
155 |     def _classify_failure(self, error_message: str, job_name: str) -> FailureType:
156 |         """Classify the type of failure based on error message and job name."""
157 |         error_lower = error_message.lower()
158 |         job_lower = job_name.lower()
159 |         
160 |         if "timeout" in error_lower or "timed out" in error_lower:
161 |             return FailureType.TIMEOUT
162 |         elif "permission denied" in error_lower or "unauthorized" in error_lower:
163 |             return FailureType.PERMISSION_DENIED
164 |         elif "cache" in error_lower and ("miss" in error_lower or "invalid" in error_lower):
165 |             return FailureType.CACHE_ISSUE
166 |         elif "dependency" in error_lower or "package not found" in error_lower:
167 |             return FailureType.DEPENDENCY_ISSUE
168 |         elif "security" in job_lower or "scan" in job_lower:
169 |             return FailureType.SECURITY_SCAN
170 |         elif "test" in job_lower or "pytest" in error_lower:
171 |             return FailureType.TEST_FAILURE
172 |         elif "build" in job_lower or "cmake" in error_lower or "conan" in error_lower:
173 |             return FailureType.BUILD_ERROR
174 |         else:
175 |             return FailureType.UNKNOWN
176 |     
177 |     def _extract_platform(self, job_name: str) -> Optional[str]:
178 |         """Extract platform information from job name."""
179 |         platforms = ["linux", "windows", "macos", "ubuntu", "centos", "alpine"]
180 |         for platform in platforms:
181 |             if platform in job_name.lower():
182 |                 return platform
183 |         return None
184 |     
185 |     def analyze_patterns(self, failures: List[WorkflowFailure]) -> List[FailurePattern]:
186 |         """Analyze failures to identify recurring patterns."""
187 |         patterns = {}
188 |         
189 |         # Group failures by type and error message
190 |         for failure in failures:
191 |             key = f"{failure.failure_type.value}:{failure.error_message[:100]}"
192 |             
193 |             if key not in patterns:
194 |                 patterns[key] = {
195 |                     'failures': [],
196 |                     'repositories': set(),
197 |                     'first_seen': failure.failed_at,
198 |                     'last_seen': failure.failed_at
199 |                 }
200 |             
201 |             patterns[key]['failures'].append(failure)
202 |             patterns[key]['repositories'].add(failure.repository)
203 |             patterns[key]['last_seen'] = max(patterns[key]['last_seen'], failure.failed_at)
204 |             patterns[key]['first_seen'] = min(patterns[key]['first_seen'], failure.failed_at)
205 |         
206 |         # Convert to FailurePattern objects
207 |         failure_patterns = []
208 |         for key, data in patterns.items():
209 |             if len(data['failures']) >= 2:  # Only patterns with 2+ occurrences
210 |                 failure_type = data['failures'][0].failure_type
211 |                 common_error = data['failures'][0].error_message
212 |                 
213 |                 pattern = FailurePattern(
214 |                     failure_type=failure_type,
215 |                     repositories=list(data['repositories']),
216 |                     frequency=len(data['failures']),
217 |                     first_seen=data['first_seen'],
218 |                     last_seen=data['last_seen'],
219 |                     common_error=common_error,
220 |                     suggested_fix=self._generate_fix_suggestion(failure_type, common_error)
221 |                 )
222 |                 failure_patterns.append(pattern)
223 |         
224 |         return failure_patterns
225 |     
226 |     def _generate_fix_suggestion(self, failure_type: FailureType, error_message: str) -> str:
227 |         """Generate AI-assisted fix suggestions based on failure type."""
228 |         suggestions = {
229 |             FailureType.BUILD_ERROR: """
230 | **Build Error Fix Suggestions:**
231 | 1. Check Conan profile compatibility
232 | 2. Verify dependency versions in conanfile.py
233 | 3. Clear Conan cache: `conan cache clean`
234 | 4. Update build tools and compilers
235 | 5. Check for missing system dependencies
236 |             """,
237 |             FailureType.TEST_FAILURE: """
238 | **Test Failure Fix Suggestions:**
239 | 1. Review test logs for specific assertion failures
240 | 2. Check test data and fixtures
241 | 3. Verify test environment setup
242 | 4. Update test expectations if behavior changed
243 | 5. Check for flaky tests and add retries
244 |             """,
245 |             FailureType.SECURITY_SCAN: """
246 | **Security Scan Fix Suggestions:**
247 | 1. Update vulnerable dependencies
248 | 2. Review security scan reports in GitHub Security tab
249 | 3. Address CRITICAL and HIGH severity issues first
250 | 4. Update SBOM generation if needed
251 | 5. Check for hardcoded secrets or credentials
252 |             """,
253 |             FailureType.DEPENDENCY_ISSUE: """
254 | **Dependency Issue Fix Suggestions:**
255 | 1. Verify Conan remote configuration
256 | 2. Check package availability in remote
257 | 3. Update package versions in conanfile.py
258 | 4. Clear and rebuild Conan cache
259 | 5. Check network connectivity to package registry
260 |             """,
261 |             FailureType.TIMEOUT: """
262 | **Timeout Fix Suggestions:**
263 | 1. Increase workflow timeout limits
264 | 2. Optimize build process (parallel builds, caching)
265 | 3. Check for resource constraints
266 | 4. Review long-running operations
267 | 5. Consider splitting large jobs into smaller ones
268 |             """,
269 |             FailureType.PERMISSION_DENIED: """
270 | **Permission Denied Fix Suggestions:**
271 | 1. Check OIDC configuration and permissions
272 | 2. Verify repository secrets and environment variables
273 | 3. Review GitHub Actions permissions
274 | 4. Check Cloudsmith API key or OIDC setup
275 | 5. Verify workflow file permissions
276 |             """,
277 |             FailureType.CACHE_ISSUE: """
278 | **Cache Issue Fix Suggestions:**
279 | 1. Clear GitHub Actions cache
280 | 2. Update cache keys to include relevant changes
281 | 3. Check cache size limits
282 | 4. Verify cache restoration logic
283 | 5. Consider cache invalidation strategy
284 |             """
285 |         }
286 |         
287 |         base_suggestion = suggestions.get(failure_type, "Review error logs and check common failure causes.")
288 |         
289 |         # Add specific error context if available
290 |         if "conan" in error_message.lower():
291 |             base_suggestion += "\n\n**Conan-specific:** Check Conan configuration, remotes, and package availability."
292 |         elif "cmake" in error_message.lower():
293 |             base_suggestion += "\n\n**CMake-specific:** Verify CMakeLists.txt configuration and build settings."
294 |         elif "docker" in error_message.lower():
295 |             base_suggestion += "\n\n**Docker-specific:** Check Dockerfile syntax and base image availability."
296 |             
297 |         return base_suggestion.strip()
298 | 
299 | 
300 | class IssueCreator:
301 |     """Creates GitHub issues for recurring failure patterns."""
302 |     
303 |     def __init__(self, github_token: str):
304 |         self.github = Github(github_token)
305 |     
306 |     async def create_failure_issue(self, pattern: FailurePattern, target_repo: str) -> Optional[int]:
307 |         """Create a GitHub issue for a failure pattern."""
308 |         try:
309 |             repo = self.github.get_repo(target_repo)
310 |             
311 |             title = f"🚨 Recurring {pattern.failure_type.value.replace('_', ' ').title()} Pattern Detected"
312 |             
313 |             body = f"""
314 | ## Failure Pattern Analysis
315 | 
316 | **Pattern Type:** {pattern.failure_type.value.replace('_', ' ').title()}
317 | **Frequency:** {pattern.frequency} occurrences
318 | **Repositories Affected:** {', '.join(pattern.repositories)}
319 | **First Seen:** {pattern.first_seen.isoformat()}
320 | **Last Seen:** {pattern.last_seen.isoformat()}
321 | 
322 | ### Common Error
323 | ```
324 | {pattern.common_error}
325 | ```
326 | 
327 | ### Suggested Fix
328 | {pattern.suggested_fix}
329 | 
330 | ### Next Steps
331 | 1. Review the error pattern and suggested fixes
332 | 2. Implement the recommended solution
333 | 3. Monitor for recurrence
334 | 4. Close this issue once resolved
335 | 
336 | ---
337 | *This issue was automatically created by the OpenSSL Ecosystem Monitor*
338 |             """
339 |             
340 |             labels = ["bug", "automated", "ecosystem-monitor"]
341 |             if pattern.failure_type == FailureType.SECURITY_SCAN:
342 |                 labels.append("security")
343 |             elif pattern.failure_type == FailureType.BUILD_ERROR:
344 |                 labels.append("build")
345 |             elif pattern.failure_type == FailureType.TEST_FAILURE:
346 |                 labels.append("tests")
347 |             
348 |             issue = repo.create_issue(
349 |                 title=title,
350 |                 body=body,
351 |                 labels=labels
352 |             )
353 |             
354 |             logger.info(f"Created issue #{issue.number} in {target_repo} for {pattern.failure_type.value}")
355 |             return issue.number
356 |             
357 |         except Exception as e:
358 |             logger.error(f"Error creating issue in {target_repo}: {e}")
359 |             return None
360 | 
361 | 
362 | async def main():
363 |     """Main monitoring function."""
364 |     # Configuration
365 |     GITHUB_TOKEN = "your-github-token"  # Should come from environment
366 |     REPOSITORIES = [
367 |         "sparesparrow/openssl",
368 |         "sparesparrow/openssl-conan-base", 
369 |         "sparesparrow/openssl-fips-policy",
370 |         "sparesparrow/openssl-tools",
371 |         "sparesparrow/mcp-project-orchestrator"
372 |     ]
373 |     
374 |     # Initialize monitor
375 |     monitor = EcosystemMonitor(GITHUB_TOKEN, REPOSITORIES)
376 |     issue_creator = IssueCreator(GITHUB_TOKEN)
377 |     
378 |     # Monitor workflows
379 |     logger.info("Starting ecosystem monitoring...")
380 |     failures = await monitor.monitor_workflows(hours_back=4)
381 |     
382 |     if not failures:
383 |         logger.info("No failures detected in the last 4 hours")
384 |         return
385 |     
386 |     logger.info(f"Found {len(failures)} failures across {len(set(f.repository for f in failures))} repositories")
387 |     
388 |     # Analyze patterns
389 |     patterns = monitor.analyze_patterns(failures)
390 |     logger.info(f"Identified {len(patterns)} recurring failure patterns")
391 |     
392 |     # Create issues for significant patterns
393 |     for pattern in patterns:
394 |         if pattern.frequency >= 3:  # Only create issues for patterns with 3+ occurrences
395 |             # Create issue in the most affected repository
396 |             main_repo = max(pattern.repositories, key=lambda r: sum(1 for f in failures if f.repository == r))
397 |             issue_number = await issue_creator.create_failure_issue(pattern, main_repo)
398 |             
399 |             if issue_number:
400 |                 logger.info(f"Created issue #{issue_number} for pattern: {pattern.failure_type.value}")
401 | 
402 | 
403 | if __name__ == "__main__":
404 |     logging.basicConfig(level=logging.INFO)
405 |     asyncio.run(main())
```

--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/mcp-py/CustomBashTool.py:
--------------------------------------------------------------------------------

```python
 1 | import anthropic
 2 | 
 3 | client = anthropic.Anthropic(
 4 |     # defaults to os.environ.get("ANTHROPIC_API_KEY")
 5 |     api_key="my_api_key",
 6 | )
 7 | 
 8 | # Replace placeholders like {{USER_REQUEST}} with real values,
 9 | # because the SDK does not support variables.
10 | message = client.messages.create(
11 |     model="claude-3-5-sonnet-20241022",
12 |     max_tokens=1000,
13 |     temperature=0,
14 |     system="You are an AI assistant specialized in translating natural language requests into bash commands for Ubuntu Studio Linux with KDE desktop environment. Your task is to interpret user requests and provide the corresponding bash commands along with brief explanations.\n\nHere's the user's request:\n\n<user_request>\n{{USER_REQUEST}}\n</user_request>\n\nPlease follow these steps to process the request and generate a response:\n\n1. Analyze the request and identify the key components:\n   - Action to be performed\n   - Specific tools or services mentioned\n   - Parameters or conditions specified\n\n2. Formulate an appropriate bash script that fulfills the user's request, considering:\n   - Common Linux command-line tools and utilities\n   - Specific tools mentioned in the request (prioritize these)\n   - Necessary options or flags to achieve the desired outcome\n   - Combining multiple steps using appropriate bash syntax (e.g., functions, loops, or conditionals) if needed\n   - Including echo commands to communicate analysis content to the user\n   - Adding interactive elements where appropriate (e.g., user prompts, menus)\n\n3. Evaluate the safety of the script:\n   - Ensure the commands don't pose potential risks to the system\n   - If a request seems unsafe, prepare a warning and suggest a safer alternative\n\n4. Generate a JSON response with the following structure:\n   {\n     \"name\": \"descriptive_filename.sh\",\n     \"description\": \"Brief explanation of what the script does\",\n     \"script\": \"The actual bash script\"\n   }\n\n   Note: The \"name\" field should be a descriptive filename based on the script's function, ending with .sh\n\nBefore providing your final response, wrap your thought process inside <thinking> tags. This analysis should include:\na. A clear list of the key components identified in the user's request\nb. Consideration of multiple command options, explaining the reasoning for your final choice\nc. An evaluation of potential security risks and explanation of any mitigation strategies\nd. Your approach to making the script interactive and informative\ne. Consideration of potential edge cases or variations in the user's request\nf. Explicit consideration of the KDE desktop environment and Ubuntu Studio-specific tools\ng. A step-by-step breakdown of how you'll construct the bash script\n\nRemember to always prioritize security and avoid potentially harmful commands. If the user's request is unclear or too complex to be translated into a single bash script, explain the limitations in your analysis process and suggest breaking down the request into smaller, more manageable tasks.\n\nAfter your analysis, provide the JSON response as described above.",
15 |     messages=[
16 |         {
17 |             "role": "user",
18 |             "content": [
19 |                 {
20 |                     "type": "text",
21 |                     "text": "<examples>\n<example>\n<USER_REQUEST>\nMonitor MIDI input from a USB MIDI controller\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n   - Action: Monitor MIDI input\n   - Specific tool: USB MIDI controller\n   - Parameters: None specified\n\nb. Command options:\n   1. aconnect -i: Lists MIDI input ports but doesn't monitor events.\n   2. amidi -d: Dumps raw MIDI data, not suitable for monitoring in a user-friendly format.\n   3. aseqdump: Provides real-time display of MIDI events in a human-readable format.\n\n   After considering these options, aseqdump seems most appropriate for monitoring MIDI input in a user-friendly way.\n\nc. Security evaluation:\n   aseqdump is safe to use as it only reads MIDI input without modifying system settings. No significant security risks are associated with this command.\n\nd. Interactivity and information:\n   The script will directly output MIDI events, providing real-time information without requiring additional interactivity.\n\ne. Edge cases:\n   - Multiple MIDI devices connected: We'll use the -p option to specify the device name.\n   - No MIDI device connected: We'll add an error check before running aseqdump.\n\nf. KDE and Ubuntu Studio consideration:\n   This command is not specific to KDE or Ubuntu Studio, but it's compatible with these environments.\n\ng. Script construction:\n   1. Check if aseqdump is installed\n   2. Verify MIDI device connectivity\n   3. Run aseqdump with appropriate options\n   4. Provide helpful output messages\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n  \"name\": \"monitor_midi_input.sh\",\n  \"description\": \"Monitors MIDI input from a USB MIDI controller using aseqdump\",\n  \"script\": \"#!/bin/bash\\n\\n# Check if aseqdump is installed\\nif ! command -v aseqdump &> /dev/null; then\\n    echo \\\"Error: aseqdump is not installed. Please install it using 'sudo apt-get install alsa-utils'.\\\"\\n    exit 1\\nfi\\n\\n# Check if MIDI device is connected\\nif ! aconnect -i | grep -q 'USB MIDI Controller'; then\\n    echo \\\"Error: USB MIDI Controller not found. Please connect your device and try again.\\\"\\n    exit 1\\nfi\\n\\necho \\\"Monitoring MIDI input from USB MIDI Controller. Press Ctrl+C to stop.\\\"\\naseqdump -p \\\"USB MIDI Controller\\\"\\n\"\n}\n</ideal_output>\n</example>\n<example>\n<USER_REQUEST>\nCreate an SSH tunnel\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n   - Action: Create an SSH tunnel\n   - Specific tool: SSH\n   - Parameters: None specified (assumptions needed)\n\nb. Command options:\n   1. ssh -D: Creates a SOCKS proxy, not a typical SSH tunnel.\n   2. ssh -R: Creates a reverse tunnel, not commonly meant by \"SSH tunnel\".\n   3. ssh -L: Creates a local port forwarding tunnel, the most common type.\n\n   After consideration, ssh -L is most appropriate for creating a standard SSH tunnel.\n\nc. Security evaluation:\n   SSH tunnels encrypt traffic but have potential risks:\n   1. Trust in the remote host is crucial.\n   2. Local port conflicts should be avoided.\n   3. Users must understand this creates a potential entry point to their system.\n\n   To mitigate risks, we'll use a non-standard high port number and add checks.\n\nd. Interactivity and information:\n   We'll add prompts for user input to specify remote host, port, and local port.\n\ne. Edge cases:\n   - Local port already in use: We'll add a check and allow retry.\n   - SSH key authentication: We'll provide an option for this.\n\nf. KDE and Ubuntu Studio consideration:\n   This command is not specific to KDE or Ubuntu Studio but is fully compatible.\n\ng. Script construction:\n   1. Prompt for remote host, remote port, and local port\n   2. Check if local port is available\n   3. Offer SSH key authentication option\n   4. Construct and execute SSH command\n   5. Provide helpful output messages\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n  \"name\": \"create_ssh_tunnel.sh\",\n  \"description\": \"Creates an SSH tunnel with user-specified parameters and security checks\",\n  \"script\": \"#!/bin/bash\\n\\necho \\\"SSH Tunnel Creator\\\"\\n\\nread -p \\\"Enter remote host: \\\" remote_host\\nread -p \\\"Enter remote port: \\\" remote_port\\nread -p \\\"Enter local port (default 8022): \\\" local_port\\nlocal_port=${local_port:-8022}\\n\\n# Check if local port is available\\nif ss -tln | grep -q :$local_port; then\\n    echo \\\"Error: Local port $local_port is already in use.\\\"\\n    exit 1\\nfi\\n\\nread -p \\\"Use SSH key authentication? (y/n): \\\" use_key\\n\\nif [ \\\"$use_key\\\" = \\\"y\\\" ]; then\\n    ssh_command=\\\"ssh -i ~/.ssh/id_rsa -L $local_port:localhost:$remote_port $remote_host\\\"\\nelse\\n    ssh_command=\\\"ssh -L $local_port:localhost:$remote_port $remote_host\\\"\\nfi\\n\\necho \\\"Creating SSH tunnel...\\\"\\necho \\\"Local port $local_port will be forwarded to port $remote_port on $remote_host\\\"\\necho \\\"Press Ctrl+C to stop the tunnel\\\"\\n\\n$ssh_command\\n\"\n}\n</ideal_output>\n</example>\n<example>\n<USER_REQUEST>\nCheck recent journalctl errors\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n   - Action: Check recent errors\n   - Specific tool: journalctl\n   - Parameters: Recent, errors\n\nb. Command options:\n   1. journalctl -xe: Shows journal end, not ideal for recent errors.\n   2. journalctl -f: Follows journal real-time, not suitable for checking recent errors.\n   3. journalctl -p err..emerg: Filters error to emergency priority levels.\n\n   After analysis, combining journalctl -p err..emerg with -n for recent entries is most appropriate.\n\nc. Security evaluation:\n   This command only reads from the system journal without modifications. However, output may contain sensitive information, so we'll add a warning.\n\nd. Interactivity and information:\n   We'll add an option for the user to specify the number of recent entries to display.\n\ne. Edge cases:\n   - No recent errors: We'll add a check and inform the user if no errors are found.\n   - Very large number of errors: We'll add a confirmation prompt for large outputs.\n\nf. KDE and Ubuntu Studio consideration:\n   While not specific to KDE or Ubuntu Studio, this command works in these environments and can help troubleshoot system issues.\n\ng. Script construction:\n   1. Prompt user for number of entries to display\n   2. Add confirmation for large number of entries\n   3. Execute journalctl command with appropriate options\n   4. Check for empty output and inform user if no errors found\n   5. Display a warning about potential sensitive information in output\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n  \"name\": \"check_recent_journalctl_errors.sh\",\n  \"description\": \"Checks and displays recent journalctl errors with user-specified entry count\",\n  \"script\": \"#!/bin/bash\\n\\necho \\\"Recent Journalctl Errors Checker\\\"\\n\\nread -p \\\"Enter the number of recent entries to check (default 50): \\\" num_entries\\nnum_entries=${num_entries:-50}\\n\\nif [ $num_entries -gt 1000 ]; then\\n    read -p \\\"You've requested a large number of entries. This may take a while and produce a lot of output. Continue? (y/n): \\\" confirm\\n    if [ \\\"$confirm\\\" != \\\"y\\\" ]; then\\n        echo \\\"Operation cancelled.\\\"\\n        exit 0\\n    fi\\nfi\\n\\necho \\\"Checking the last $num_entries journalctl entries for errors...\\\"\\necho \\\"Warning: The output may contain sensitive system information.\\\"\\n\\noutput=$(journalctl -p err..emerg -n $num_entries --no-pager)\\n\\nif [ -z \\\"$output\\\" ]; then\\n    echo \\\"No errors found in the last $num_entries entries.\\\"\\nelse\\n    echo \\\"$output\\\"\\nfi\\n\"\n}\n</ideal_output>\n</example>\n<example>\n<USER_REQUEST>\nMonitor and replay input\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n   - Action 1: Monitor input\n   - Action 2: Replay input\n   - Specific tools: Not specified (need to choose appropriate tools)\n   - Parameters: None specified (assumptions needed)\n\nb. Command options:\n   For monitoring:\n   1. xinput: Provides detailed input device information and event monitoring.\n   2. evtest: Monitors input device events but requires root privileges.\n   3. xev: Monitors X events but limited to a specific window.\n\n   For replaying:\n   1. xdotool: Can simulate keyboard and mouse input events.\n   2. xte: Part of xautomation package, for simple event replay.\n   3. autokey: Complex scripting tool for input automation.\n\n   After analysis, xinput for monitoring and xdotool for replaying are most suitable.\n\nc. Security evaluation:\n   Potential security implications:\n   1. Captures all input, including sensitive data like passwords.\n   2. Replay script could execute harmful commands if malicious input is captured.\n\n   To mitigate risks:\n   1. Limit monitoring time.\n   2. Filter captured input to include only key and button press events.\n   3. Warn user to review replay script before execution.\n\nd. Interactivity and information:\n   We'll add prompts for monitoring duration and review options before replay.\n\ne. Edge cases:\n   - No input detected: Add a check and inform the user.\n   - Very long input sequence: Add a confirmation for long replay scripts.\n\nf. KDE and Ubuntu Studio consideration:\n   These tools work in KDE and Ubuntu Studio environments. We'll check for required packages.\n\ng. Script construction:\n   1. Check for required tools (xinput, xdotool)\n   2. Prompt for monitoring duration\n   3. Monitor input using xinput\n   4. Filter and process captured input\n   5. Generate replay script\n   6. Offer review and execution options\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n  \"name\": \"monitor_and_replay_input.sh\",\n  \"description\": \"Monitors input events and creates a script to replay them, with security considerations\",\n  \"script\": \"#!/bin/bash\\n\\necho \\\"Input Monitor and Replay Tool\\\"\\n\\n# Check for required tools\\nfor tool in xinput xdotool; do\\n    if ! command -v $tool &> /dev/null; then\\n        echo \\\"Error: $tool is not installed. Please install it and try again.\\\"\\n        exit 1\\n    fi\\ndone\\n\\nread -p \\\"Enter monitoring duration in seconds (default 10): \\\" duration\\nduration=${duration:-10}\\n\\necho \\\"Monitoring input for $duration seconds...\\\"\\ntimeout ${duration}s xinput test-xi2 --root > input_log.txt\\n\\nif [ ! -s input_log.txt ]; then\\n    echo \\\"No input detected during the monitoring period.\\\"\\n    exit 0\\nfi\\n\\necho \\\"Creating replay script...\\\"\\necho '#!/bin/bash' > replay_script.sh\\necho \\\"xdotool type $(cat input_log.txt | grep -E 'key press|button press' | awk '{print $NF}')\\\" >> replay_script.sh\\nchmod +x replay_script.sh\\n\\necho \\\"Replay script created as replay_script.sh\\\"\\necho \\\"Warning: Review the script content before executing to ensure it's safe.\\\"\\n\\nread -p \\\"Do you want to review the replay script? (y/n): \\\" review\\nif [ \\\"$review\\\" = \\\"y\\\" ]; then\\n    less replay_script.sh\\nfi\\n\\nread -p \\\"Do you want to execute the replay script now? (y/n): \\\" execute\\nif [ \\\"$execute\\\" = \\\"y\\\" ]; then\\n    echo \\\"Executing replay script in 5 seconds. Focus on the desired window.\\\"\\n    sleep 5\\n    ./replay_script.sh\\nelse\\n    echo \\\"You can execute the script later by running: ./replay_script.sh\\\"\\nfi\\n\"\n}\n</ideal_output>\n</example>\n</examples>\n\n"
22 |                 },
23 |                 {
24 |                     "type": "text",
25 |                     "text": "one of docker containers provides VNC server. i need to connect to VNC trough SSH\nContainer Network Details\nNetwork: ha_bridge_network (172.21.0.0/16)\nContainer IP: 172.21.0.3\nExposed ports: 5900(VNC), 6080(noVNC), 8501(Streamlit), 22(SSH)\n"
26 |                 }
27 |             ]
28 |         }
29 |     ]
30 | )
31 | print(message.content)
32 | 
```

--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/development/MCP_Server_Development_Prompt_Combiner.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "name": "MCP Server Development Prompt Combiner",
 3 |   "description": "A specialized prompt combiner for MCP server development that integrates interface definitions, implementation patterns, and best practices",
 4 |   "type": "prompt",
 5 |   "category": "development",
 6 |   "content": "/**\n * MCPServerDevPromptCombiner for {{project_name}}\n * \n * A specialized implementation of the PromptCombiner interface\n * focused on combining prompts for MCP server development workflows.\n */\n\nimport { PromptCombiner, CombinerContext, CombinedPromptResult, PromptSuggestion, CombinationValidationResult, WorkflowConfig, SavedWorkflow } from './prompt-combiner-interface';\nimport { PromptService } from '../services/prompt-service';\nimport { Prompt } from '../core/types';\n\n/**\n * MCP Server Development specific context\n */\nexport interface MCPServerDevContext extends CombinerContext {\n  /** Server configuration */\n  serverConfig?: {\n    name: string;\n    version: string;\n    capabilities: string[];\n  };\n  \n  /** Core technologies being used */\n  technologies: {\n    language: string;\n    runtime: string;\n    frameworks: string[];\n  };\n  \n  /** MCP Server SDK version */\n  sdkVersion: string;\n  \n  /** Tools to be implemented */\n  tools?: {\n    name: string;\n    description: string;\n    parameters?: Record<string, any>;\n  }[];\n  \n  /** Resources to be implemented */\n  resources?: {\n    protocol: string;\n    description: string;\n  }[];\n  \n  /** Deployment target environment */\n  deploymentTarget?: 'docker' | 'kubernetes' | 'serverless' | 'standalone';\n  \n  /** Additional MCP-specific context */\n  {{additional_mcp_context}}\n}\n\n/**\n * Specialized result for MCP Server development combinations\n */\nexport interface MCPServerDevResult extends CombinedPromptResult {\n  /** Generated interface definitions */\n  interfaces?: string;\n  \n  /** Generated MCP tools implementation */\n  toolsImplementation?: string;\n  \n  /** Generated MCP resources implementation */\n  resourcesImplementation?: string;\n  \n  /** Server configuration */\n  serverConfiguration?: string;\n  \n  /** Client integration examples */\n  clientExamples?: string;\n  \n  /** Testing approach */\n  testingApproach?: string;\n  \n  /** Dockerfile and Docker Compose configuration */\n  dockerConfiguration?: string;\n  \n  /** Additional MCP-specific results */\n  {{additional_mcp_results}}\n}\n\n/**\n * Implementation of MCPServerDevPromptCombiner\n */\nexport class MCPServerDevPromptCombiner implements PromptCombiner {\n  constructor(private promptService: PromptService) {}\n  \n  /**\n   * Combines MCP server development prompts\n   * @param promptIds Array of prompt IDs to combine\n   * @param context Optional MCP server development context\n   * @returns Combined MCP server development result\n   */\n  async combinePrompts(promptIds: string[], context?: MCPServerDevContext): Promise<MCPServerDevResult> {\n    // Implementation would include:\n    // 1. Validating the prompts are compatible for MCP development\n    // 2. Organizing prompts into logical sections (interfaces, tools, resources, etc.)\n    // 3. Resolving dependencies between prompts\n    // 4. Applying variables with MCP-specific knowledge\n    // 5. Generating a comprehensive server implementation guide\n    \n    // This is a template structure - in a real implementation, this would contain\n    // the actual logic for combining MCP server development prompts\n    \n    // For now, we'll outline the structure of how the implementation would work\n    \n    // Step 1: Load and categorize all prompts\n    const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n    \n    const interfacePrompts = prompts.filter(p => p.tags?.includes('interfaces'));\n    const toolPrompts = prompts.filter(p => p.tags?.includes('tools'));\n    const resourcePrompts = prompts.filter(p => p.tags?.includes('resources'));\n    const configPrompts = prompts.filter(p => p.tags?.includes('configuration'));\n    const deploymentPrompts = prompts.filter(p => p.tags?.includes('deployment'));\n    \n    // Step 2: Apply variables to each prompt category\n    const variables = context?.variables || {};\n    \n    // Combine interface definitions\n    const interfaces = await this.combineCategory(interfacePrompts, variables);\n    \n    // Combine tool implementations\n    const toolsImplementation = await this.combineCategory(toolPrompts, variables);\n    \n    // Combine resource implementations\n    const resourcesImplementation = await this.combineCategory(resourcePrompts, variables);\n    \n    // Combine server configuration\n    const serverConfiguration = await this.combineCategory(configPrompts, variables);\n    \n    // Combine deployment configuration\n    const dockerConfiguration = await this.combineCategory(deploymentPrompts, variables);\n    \n    // Step 3: Create combined content with logical sections\n    const combinedContent = `\n# MCP Server Implementation for ${variables.project_name || 'Your Project'}\n\n## Overview\n\nThis guide provides a comprehensive implementation plan for an MCP server using ${variables.language || 'TypeScript'} and the MCP SDK version ${context?.sdkVersion || 'latest'}.\n\n## Interface Definitions\n\n${interfaces.content}\n\n## Tools Implementation\n\n${toolsImplementation.content}\n\n## Resources Implementation\n\n${resourcesImplementation.content}\n\n## Server Configuration\n\n${serverConfiguration.content}\n\n## Deployment Configuration\n\n${dockerConfiguration.content}\n\n## Implementation Steps\n\n1. Set up the project structure\n2. Implement the interfaces\n3. Implement the MCP tools\n4. Implement the MCP resources\n5. Configure the server\n6. Set up deployment\n7. Implement tests\n8. Document the server\n    `;\n    \n    // Step 4: Return the comprehensive result\n    return {\n      content: combinedContent,\n      components: [\n        ...interfaces.components,\n        ...toolsImplementation.components,\n        ...resourcesImplementation.components,\n        ...serverConfiguration.components,\n        ...dockerConfiguration.components\n      ],\n      appliedVariables: variables,\n      interfaces: interfaces.content,\n      toolsImplementation: toolsImplementation.content,\n      resourcesImplementation: resourcesImplementation.content,\n      serverConfiguration: serverConfiguration.content,\n      dockerConfiguration: dockerConfiguration.content,\n      // Add suggestion for what to implement first\n      nextSteps: [\n        { action: 'implement_interfaces', description: 'Start by implementing the core interfaces' },\n        { action: 'implement_tools', description: 'Implement the MCP tools using the SDK' },\n        { action: 'implement_resources', description: 'Implement the MCP resources' },\n        { action: 'configure_server', description: 'Set up the server configuration' },\n        { action: 'setup_deployment', description: 'Configure Docker and deployment' }\n      ]\n    };\n  }\n  \n  /**\n   * Helper method to combine prompts in a specific category\n   * @param prompts Prompts in the category\n   * @param variables Variables to apply\n   * @returns Combined result for the category\n   */\n  private async combineCategory(prompts: Prompt[], variables: Record<string, any>): Promise<CombinedPromptResult> {\n    // Implementation would combine prompts within a category\n    // This is a simplified placeholder\n    let content = '';\n    const components: {id: string; name: string; contribution: string}[] = [];\n    \n    for (const prompt of prompts) {\n      const result = await this.promptService.applyTemplate(prompt.id, variables);\n      content += result.content + '\\n\\n';\n      components.push({\n        id: prompt.id,\n        name: prompt.name,\n        contribution: result.content\n      });\n    }\n    \n    return {\n      content: content.trim(),\n      components,\n      appliedVariables: variables\n    };\n  }\n  \n  /**\n   * Gets MCP server development prompt suggestions\n   * @param category Optional category to filter by\n   * @param context Current MCP context to inform suggestions\n   * @returns Array of prompt suggestions for MCP development\n   */\n  async getPromptSuggestions(category?: string, context?: MCPServerDevContext): Promise<PromptSuggestion[]> {\n    // Implementation would suggest prompts based on the current MCP development context\n    // For example, if building a tool-heavy server, suggest more tool-related prompts\n    // This is a placeholder for demonstration\n    \n    // In a real implementation, this would query the prompt service for relevant prompts\n    // based on the specific MCP development needs\n    \n    return [\n      {\n        id: 'consolidated-interfaces-template',\n        name: 'Consolidated TypeScript Interfaces',\n        relevance: 95,\n        compatibleWith: ['mcp-server-tools-implementation', 'docker-containerization-guide'],\n        reason: 'Provides the interface foundation for your MCP server'\n      },\n      {\n        id: 'mcp-server-tools-implementation',\n        name: 'MCP Server Tools Implementation',\n        relevance: 90,\n        compatibleWith: ['consolidated-interfaces-template', 'mcp-server-resources-implementation'],\n        reason: `${context?.tools?.length || 0} tools need implementation in your server`\n      },\n      {\n        id: 'mcp-server-resources-implementation',\n        name: 'MCP Server Resources Implementation',\n        relevance: 85,\n        compatibleWith: ['consolidated-interfaces-template', 'mcp-server-tools-implementation'],\n        reason: `${context?.resources?.length || 0} resources need implementation in your server`\n      },\n      {\n        id: 'docker-containerization-guide',\n        name: 'Docker Containerization Guide',\n        relevance: context?.deploymentTarget === 'docker' ? 100 : 70,\n        compatibleWith: ['consolidated-interfaces-template'],\n        reason: 'Provides Docker deployment configuration for your MCP server'\n      },\n      {\n        id: 'development-system-prompt',\n        name: 'Development System Prompt',\n        relevance: 60,\n        compatibleWith: [],\n        reason: 'Helps with general development assistance for your MCP server'\n      }\n    ];\n  }\n  \n  /**\n   * Validates if the prompts can be combined for MCP server development\n   * @param promptIds Array of prompt IDs to validate\n   * @returns Validation result with any issues specific to MCP development\n   */\n  async validateCombination(promptIds: string[]): Promise<CombinationValidationResult> {\n    // Implementation would validate that the prompts make sense for MCP development\n    // For example, ensuring there are no conflicting tool definitions\n    // This is a placeholder for demonstration\n    \n    const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n    \n    // Check for interface prompt\n    const hasInterface = prompts.some(p => p.tags?.includes('interfaces'));\n    if (!hasInterface) {\n      return {\n        isValid: false,\n        issues: [{\n          promptId: '',\n          issue: 'Missing interface definition prompt',\n          severity: 'error',\n          suggestion: 'Add a prompt with interface definitions, such as consolidated-interfaces-template'\n        }],\n        suggestions: [{\n          promptIds: [...promptIds, 'consolidated-interfaces-template'],\n          reason: 'Adding interface definitions is essential for MCP server development'\n        }]\n      };\n    }\n    \n    // In a real implementation, would do more validation specific to MCP development\n    \n    return {\n      isValid: true\n    };\n  }\n  \n  /**\n   * Creates a saved MCP server development workflow\n   * @param name Name for the new workflow\n   * @param promptIds Component prompt IDs\n   * @param config Configuration for the combination\n   * @returns The created MCP workflow\n   */\n  async saveWorkflow(name: string, promptIds: string[], config: WorkflowConfig): Promise<SavedWorkflow> {\n    // Implementation would save an MCP development workflow\n    // This is a placeholder for demonstration\n    \n    return {\n      id: `mcp-dev-workflow-${Date.now()}`,\n      name,\n      promptIds,\n      config,\n      createdAt: new Date().toISOString(),\n      updatedAt: new Date().toISOString(),\n      version: 1,\n      category: 'mcp-development',\n      tags: ['mcp', 'development', 'server']\n    };\n  }\n  \n  /**\n   * Loads a previously saved MCP server development workflow\n   * @param workflowId ID of the saved workflow\n   * @returns The loaded MCP workflow\n   */\n  async loadWorkflow(workflowId: string): Promise<SavedWorkflow> {\n    // Implementation would load an MCP development workflow\n    // This is a placeholder for demonstration\n    \n    throw new Error(`Workflow ${workflowId} not found or not implemented yet`);\n  }\n}\n\n/**\n * Usage Examples\n * \n * ```typescript\n * // Creating a combiner\n * const promptService = new PromptService(storageAdapter);\n * const mcpCombiner = new MCPServerDevPromptCombiner(promptService);\n * \n * // Getting prompt suggestions for MCP development\n * const suggestions = await mcpCombiner.getPromptSuggestions('tools', {\n *   technologies: {\n *     language: 'TypeScript',\n *     runtime: 'Node.js',\n *     frameworks: ['Express']\n *   },\n *   sdkVersion: '1.6.0',\n *   tools: [\n *     { name: 'get_document', description: 'Retrieve a document by ID' },\n *     { name: 'search_documents', description: 'Search for documents' }\n *   ],\n *   resources: [\n *     { protocol: 'document', description: 'Document resource protocol' }\n *   ],\n *   deploymentTarget: 'docker'\n * });\n * \n * // Combining prompts for MCP development\n * const result = await mcpCombiner.combinePrompts([\n *   'consolidated-interfaces-template',\n *   'mcp-server-tools-implementation',\n *   'docker-containerization-guide'\n * ], {\n *   variables: {\n *     project_name: 'Document Management MCP Server',\n *     language: 'TypeScript',\n *     primary_entity: 'Document',\n *     node_version: '20'\n *   },\n *   technologies: {\n *     language: 'TypeScript',\n *     runtime: 'Node.js',\n *     frameworks: ['Express']\n *   },\n *   sdkVersion: '1.6.0',\n *   deploymentTarget: 'docker'\n * });\n * \n * // Using the specialized result properties\n * console.log(result.interfaces); // Get just the interface definitions\n * console.log(result.toolsImplementation); // Get just the tools implementation\n * console.log(result.dockerConfiguration); // Get just the Docker configuration\n * ```\n */\n\n// ============================\n// Extension Guidelines\n// ============================\n\n/**\n * When extending MCPServerDevPromptCombiner, consider:\n * \n * 1. Adding support for specific MCP server types (e.g., FileSystem, GitHub, Memory)\n * 2. Enhancing the context with more MCP-specific properties\n * 3. Improving suggestion logic based on the development context\n * 4. Adding template validation specific to MCP compatibility\n * 5. {{additional_extension_guidelines}}\n */",
 7 |   "variables": [
 8 |     "project_name",
 9 |     "additional_mcp_context",
10 |     "additional_mcp_results",
11 |     "additional_extension_guidelines"
12 |   ],
13 |   "metadata": {
14 |     "source": "/home/sparrow/projects/mcp-prompts/prompts/mcp-server-dev-prompt-combiner.json",
15 |     "imported": true
16 |   }
17 | }
```

--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/MCP_Server_Development_Prompt_Combiner.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "name": "MCP Server Development Prompt Combiner",
 3 |   "description": "A specialized prompt combiner for MCP server development that integrates interface definitions, implementation patterns, and best practices",
 4 |   "type": "prompt",
 5 |   "category": "development",
 6 |   "content": "/**\n * MCPServerDevPromptCombiner for {{project_name}}\n * \n * A specialized implementation of the PromptCombiner interface\n * focused on combining prompts for MCP server development workflows.\n */\n\nimport { PromptCombiner, CombinerContext, CombinedPromptResult, PromptSuggestion, CombinationValidationResult, WorkflowConfig, SavedWorkflow } from './prompt-combiner-interface';\nimport { PromptService } from '../services/prompt-service';\nimport { Prompt } from '../core/types';\n\n/**\n * MCP Server Development specific context\n */\nexport interface MCPServerDevContext extends CombinerContext {\n  /** Server configuration */\n  serverConfig?: {\n    name: string;\n    version: string;\n    capabilities: string[];\n  };\n  \n  /** Core technologies being used */\n  technologies: {\n    language: string;\n    runtime: string;\n    frameworks: string[];\n  };\n  \n  /** MCP Server SDK version */\n  sdkVersion: string;\n  \n  /** Tools to be implemented */\n  tools?: {\n    name: string;\n    description: string;\n    parameters?: Record<string, any>;\n  }[];\n  \n  /** Resources to be implemented */\n  resources?: {\n    protocol: string;\n    description: string;\n  }[];\n  \n  /** Deployment target environment */\n  deploymentTarget?: 'docker' | 'kubernetes' | 'serverless' | 'standalone';\n  \n  /** Additional MCP-specific context */\n  {{additional_mcp_context}}\n}\n\n/**\n * Specialized result for MCP Server development combinations\n */\nexport interface MCPServerDevResult extends CombinedPromptResult {\n  /** Generated interface definitions */\n  interfaces?: string;\n  \n  /** Generated MCP tools implementation */\n  toolsImplementation?: string;\n  \n  /** Generated MCP resources implementation */\n  resourcesImplementation?: string;\n  \n  /** Server configuration */\n  serverConfiguration?: string;\n  \n  /** Client integration examples */\n  clientExamples?: string;\n  \n  /** Testing approach */\n  testingApproach?: string;\n  \n  /** Dockerfile and Docker Compose configuration */\n  dockerConfiguration?: string;\n  \n  /** Additional MCP-specific results */\n  {{additional_mcp_results}}\n}\n\n/**\n * Implementation of MCPServerDevPromptCombiner\n */\nexport class MCPServerDevPromptCombiner implements PromptCombiner {\n  constructor(private promptService: PromptService) {}\n  \n  /**\n   * Combines MCP server development prompts\n   * @param promptIds Array of prompt IDs to combine\n   * @param context Optional MCP server development context\n   * @returns Combined MCP server development result\n   */\n  async combinePrompts(promptIds: string[], context?: MCPServerDevContext): Promise<MCPServerDevResult> {\n    // Implementation would include:\n    // 1. Validating the prompts are compatible for MCP development\n    // 2. Organizing prompts into logical sections (interfaces, tools, resources, etc.)\n    // 3. Resolving dependencies between prompts\n    // 4. Applying variables with MCP-specific knowledge\n    // 5. Generating a comprehensive server implementation guide\n    \n    // This is a template structure - in a real implementation, this would contain\n    // the actual logic for combining MCP server development prompts\n    \n    // For now, we'll outline the structure of how the implementation would work\n    \n    // Step 1: Load and categorize all prompts\n    const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n    \n    const interfacePrompts = prompts.filter(p => p.tags?.includes('interfaces'));\n    const toolPrompts = prompts.filter(p => p.tags?.includes('tools'));\n    const resourcePrompts = prompts.filter(p => p.tags?.includes('resources'));\n    const configPrompts = prompts.filter(p => p.tags?.includes('configuration'));\n    const deploymentPrompts = prompts.filter(p => p.tags?.includes('deployment'));\n    \n    // Step 2: Apply variables to each prompt category\n    const variables = context?.variables || {};\n    \n    // Combine interface definitions\n    const interfaces = await this.combineCategory(interfacePrompts, variables);\n    \n    // Combine tool implementations\n    const toolsImplementation = await this.combineCategory(toolPrompts, variables);\n    \n    // Combine resource implementations\n    const resourcesImplementation = await this.combineCategory(resourcePrompts, variables);\n    \n    // Combine server configuration\n    const serverConfiguration = await this.combineCategory(configPrompts, variables);\n    \n    // Combine deployment configuration\n    const dockerConfiguration = await this.combineCategory(deploymentPrompts, variables);\n    \n    // Step 3: Create combined content with logical sections\n    const combinedContent = `\n# MCP Server Implementation for ${variables.project_name || 'Your Project'}\n\n## Overview\n\nThis guide provides a comprehensive implementation plan for an MCP server using ${variables.language || 'TypeScript'} and the MCP SDK version ${context?.sdkVersion || 'latest'}.\n\n## Interface Definitions\n\n${interfaces.content}\n\n## Tools Implementation\n\n${toolsImplementation.content}\n\n## Resources Implementation\n\n${resourcesImplementation.content}\n\n## Server Configuration\n\n${serverConfiguration.content}\n\n## Deployment Configuration\n\n${dockerConfiguration.content}\n\n## Implementation Steps\n\n1. Set up the project structure\n2. Implement the interfaces\n3. Implement the MCP tools\n4. Implement the MCP resources\n5. Configure the server\n6. Set up deployment\n7. Implement tests\n8. Document the server\n    `;\n    \n    // Step 4: Return the comprehensive result\n    return {\n      content: combinedContent,\n      components: [\n        ...interfaces.components,\n        ...toolsImplementation.components,\n        ...resourcesImplementation.components,\n        ...serverConfiguration.components,\n        ...dockerConfiguration.components\n      ],\n      appliedVariables: variables,\n      interfaces: interfaces.content,\n      toolsImplementation: toolsImplementation.content,\n      resourcesImplementation: resourcesImplementation.content,\n      serverConfiguration: serverConfiguration.content,\n      dockerConfiguration: dockerConfiguration.content,\n      // Add suggestion for what to implement first\n      nextSteps: [\n        { action: 'implement_interfaces', description: 'Start by implementing the core interfaces' },\n        { action: 'implement_tools', description: 'Implement the MCP tools using the SDK' },\n        { action: 'implement_resources', description: 'Implement the MCP resources' },\n        { action: 'configure_server', description: 'Set up the server configuration' },\n        { action: 'setup_deployment', description: 'Configure Docker and deployment' }\n      ]\n    };\n  }\n  \n  /**\n   * Helper method to combine prompts in a specific category\n   * @param prompts Prompts in the category\n   * @param variables Variables to apply\n   * @returns Combined result for the category\n   */\n  private async combineCategory(prompts: Prompt[], variables: Record<string, any>): Promise<CombinedPromptResult> {\n    // Implementation would combine prompts within a category\n    // This is a simplified placeholder\n    let content = '';\n    const components: {id: string; name: string; contribution: string}[] = [];\n    \n    for (const prompt of prompts) {\n      const result = await this.promptService.applyTemplate(prompt.id, variables);\n      content += result.content + '\\n\\n';\n      components.push({\n        id: prompt.id,\n        name: prompt.name,\n        contribution: result.content\n      });\n    }\n    \n    return {\n      content: content.trim(),\n      components,\n      appliedVariables: variables\n    };\n  }\n  \n  /**\n   * Gets MCP server development prompt suggestions\n   * @param category Optional category to filter by\n   * @param context Current MCP context to inform suggestions\n   * @returns Array of prompt suggestions for MCP development\n   */\n  async getPromptSuggestions(category?: string, context?: MCPServerDevContext): Promise<PromptSuggestion[]> {\n    // Implementation would suggest prompts based on the current MCP development context\n    // For example, if building a tool-heavy server, suggest more tool-related prompts\n    // This is a placeholder for demonstration\n    \n    // In a real implementation, this would query the prompt service for relevant prompts\n    // based on the specific MCP development needs\n    \n    return [\n      {\n        id: 'consolidated-interfaces-template',\n        name: 'Consolidated TypeScript Interfaces',\n        relevance: 95,\n        compatibleWith: ['mcp-server-tools-implementation', 'docker-containerization-guide'],\n        reason: 'Provides the interface foundation for your MCP server'\n      },\n      {\n        id: 'mcp-server-tools-implementation',\n        name: 'MCP Server Tools Implementation',\n        relevance: 90,\n        compatibleWith: ['consolidated-interfaces-template', 'mcp-server-resources-implementation'],\n        reason: `${context?.tools?.length || 0} tools need implementation in your server`\n      },\n      {\n        id: 'mcp-server-resources-implementation',\n        name: 'MCP Server Resources Implementation',\n        relevance: 85,\n        compatibleWith: ['consolidated-interfaces-template', 'mcp-server-tools-implementation'],\n        reason: `${context?.resources?.length || 0} resources need implementation in your server`\n      },\n      {\n        id: 'docker-containerization-guide',\n        name: 'Docker Containerization Guide',\n        relevance: context?.deploymentTarget === 'docker' ? 100 : 70,\n        compatibleWith: ['consolidated-interfaces-template'],\n        reason: 'Provides Docker deployment configuration for your MCP server'\n      },\n      {\n        id: 'development-system-prompt',\n        name: 'Development System Prompt',\n        relevance: 60,\n        compatibleWith: [],\n        reason: 'Helps with general development assistance for your MCP server'\n      }\n    ];\n  }\n  \n  /**\n   * Validates if the prompts can be combined for MCP server development\n   * @param promptIds Array of prompt IDs to validate\n   * @returns Validation result with any issues specific to MCP development\n   */\n  async validateCombination(promptIds: string[]): Promise<CombinationValidationResult> {\n    // Implementation would validate that the prompts make sense for MCP development\n    // For example, ensuring there are no conflicting tool definitions\n    // This is a placeholder for demonstration\n    \n    const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n    \n    // Check for interface prompt\n    const hasInterface = prompts.some(p => p.tags?.includes('interfaces'));\n    if (!hasInterface) {\n      return {\n        isValid: false,\n        issues: [{\n          promptId: '',\n          issue: 'Missing interface definition prompt',\n          severity: 'error',\n          suggestion: 'Add a prompt with interface definitions, such as consolidated-interfaces-template'\n        }],\n        suggestions: [{\n          promptIds: [...promptIds, 'consolidated-interfaces-template'],\n          reason: 'Adding interface definitions is essential for MCP server development'\n        }]\n      };\n    }\n    \n    // In a real implementation, would do more validation specific to MCP development\n    \n    return {\n      isValid: true\n    };\n  }\n  \n  /**\n   * Creates a saved MCP server development workflow\n   * @param name Name for the new workflow\n   * @param promptIds Component prompt IDs\n   * @param config Configuration for the combination\n   * @returns The created MCP workflow\n   */\n  async saveWorkflow(name: string, promptIds: string[], config: WorkflowConfig): Promise<SavedWorkflow> {\n    // Implementation would save an MCP development workflow\n    // This is a placeholder for demonstration\n    \n    return {\n      id: `mcp-dev-workflow-${Date.now()}`,\n      name,\n      promptIds,\n      config,\n      createdAt: new Date().toISOString(),\n      updatedAt: new Date().toISOString(),\n      version: 1,\n      category: 'mcp-development',\n      tags: ['mcp', 'development', 'server']\n    };\n  }\n  \n  /**\n   * Loads a previously saved MCP server development workflow\n   * @param workflowId ID of the saved workflow\n   * @returns The loaded MCP workflow\n   */\n  async loadWorkflow(workflowId: string): Promise<SavedWorkflow> {\n    // Implementation would load an MCP development workflow\n    // This is a placeholder for demonstration\n    \n    throw new Error(`Workflow ${workflowId} not found or not implemented yet`);\n  }\n}\n\n/**\n * Usage Examples\n * \n * ```typescript\n * // Creating a combiner\n * const promptService = new PromptService(storageAdapter);\n * const mcpCombiner = new MCPServerDevPromptCombiner(promptService);\n * \n * // Getting prompt suggestions for MCP development\n * const suggestions = await mcpCombiner.getPromptSuggestions('tools', {\n *   technologies: {\n *     language: 'TypeScript',\n *     runtime: 'Node.js',\n *     frameworks: ['Express']\n *   },\n *   sdkVersion: '1.6.0',\n *   tools: [\n *     { name: 'get_document', description: 'Retrieve a document by ID' },\n *     { name: 'search_documents', description: 'Search for documents' }\n *   ],\n *   resources: [\n *     { protocol: 'document', description: 'Document resource protocol' }\n *   ],\n *   deploymentTarget: 'docker'\n * });\n * \n * // Combining prompts for MCP development\n * const result = await mcpCombiner.combinePrompts([\n *   'consolidated-interfaces-template',\n *   'mcp-server-tools-implementation',\n *   'docker-containerization-guide'\n * ], {\n *   variables: {\n *     project_name: 'Document Management MCP Server',\n *     language: 'TypeScript',\n *     primary_entity: 'Document',\n *     node_version: '20'\n *   },\n *   technologies: {\n *     language: 'TypeScript',\n *     runtime: 'Node.js',\n *     frameworks: ['Express']\n *   },\n *   sdkVersion: '1.6.0',\n *   deploymentTarget: 'docker'\n * });\n * \n * // Using the specialized result properties\n * console.log(result.interfaces); // Get just the interface definitions\n * console.log(result.toolsImplementation); // Get just the tools implementation\n * console.log(result.dockerConfiguration); // Get just the Docker configuration\n * ```\n */\n\n// ============================\n// Extension Guidelines\n// ============================\n\n/**\n * When extending MCPServerDevPromptCombiner, consider:\n * \n * 1. Adding support for specific MCP server types (e.g., FileSystem, GitHub, Memory)\n * 2. Enhancing the context with more MCP-specific properties\n * 3. Improving suggestion logic based on the development context\n * 4. Adding template validation specific to MCP compatibility\n * 5. {{additional_extension_guidelines}}\n */",
 7 |   "variables": [
 8 |     "project_name",
 9 |     "additional_mcp_context",
10 |     "additional_mcp_results",
11 |     "additional_extension_guidelines"
12 |   ],
13 |   "metadata": {
14 |     "source": "/home/sparrow/projects/mcp-prompts/prompts/mcp-server-dev-prompt-combiner.json",
15 |     "imported": true
16 |   }
17 | }
```

--------------------------------------------------------------------------------
/data/prompts/templates/mcp-server-dev-prompt-combiner.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "id": "mcp-server-dev-prompt-combiner",
 3 |   "name": "MCP Server Development Prompt Combiner",
 4 |   "description": "A specialized prompt combiner for MCP server development that integrates interface definitions, implementation patterns, and best practices",
 5 |   "content": "/**\n * MCPServerDevPromptCombiner for {{project_name}}\n * \n * A specialized implementation of the PromptCombiner interface\n * focused on combining prompts for MCP server development workflows.\n */\n\nimport { PromptCombiner, CombinerContext, CombinedPromptResult, PromptSuggestion, CombinationValidationResult, WorkflowConfig, SavedWorkflow } from './prompt-combiner-interface';\nimport { PromptService } from '../services/prompt-service';\nimport { Prompt } from '../core/types';\n\n/**\n * MCP Server Development specific context\n */\nexport interface MCPServerDevContext extends CombinerContext {\n  /** Server configuration */\n  serverConfig?: {\n    name: string;\n    version: string;\n    capabilities: string[];\n  };\n  \n  /** Core technologies being used */\n  technologies: {\n    language: string;\n    runtime: string;\n    frameworks: string[];\n  };\n  \n  /** MCP Server SDK version */\n  sdkVersion: string;\n  \n  /** Tools to be implemented */\n  tools?: {\n    name: string;\n    description: string;\n    parameters?: Record<string, any>;\n  }[];\n  \n  /** Resources to be implemented */\n  resources?: {\n    protocol: string;\n    description: string;\n  }[];\n  \n  /** Deployment target environment */\n  deploymentTarget?: 'docker' | 'kubernetes' | 'serverless' | 'standalone';\n  \n  /** Additional MCP-specific context */\n  {{additional_mcp_context}}\n}\n\n/**\n * Specialized result for MCP Server development combinations\n */\nexport interface MCPServerDevResult extends CombinedPromptResult {\n  /** Generated interface definitions */\n  interfaces?: string;\n  \n  /** Generated MCP tools implementation */\n  toolsImplementation?: string;\n  \n  /** Generated MCP resources implementation */\n  resourcesImplementation?: string;\n  \n  /** Server configuration */\n  serverConfiguration?: string;\n  \n  /** Client integration examples */\n  clientExamples?: string;\n  \n  /** Testing approach */\n  testingApproach?: string;\n  \n  /** Dockerfile and Docker Compose configuration */\n  dockerConfiguration?: string;\n  \n  /** Additional MCP-specific results */\n  {{additional_mcp_results}}\n}\n\n/**\n * Implementation of MCPServerDevPromptCombiner\n */\nexport class MCPServerDevPromptCombiner implements PromptCombiner {\n  constructor(private promptService: PromptService) {}\n  \n  /**\n   * Combines MCP server development prompts\n   * @param promptIds Array of prompt IDs to combine\n   * @param context Optional MCP server development context\n   * @returns Combined MCP server development result\n   */\n  async combinePrompts(promptIds: string[], context?: MCPServerDevContext): Promise<MCPServerDevResult> {\n    // Implementation would include:\n    // 1. Validating the prompts are compatible for MCP development\n    // 2. Organizing prompts into logical sections (interfaces, tools, resources, etc.)\n    // 3. Resolving dependencies between prompts\n    // 4. Applying variables with MCP-specific knowledge\n    // 5. Generating a comprehensive server implementation guide\n    \n    // This is a template structure - in a real implementation, this would contain\n    // the actual logic for combining MCP server development prompts\n    \n    // For now, we'll outline the structure of how the implementation would work\n    \n    // Step 1: Load and categorize all prompts\n    const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n    \n    const interfacePrompts = prompts.filter(p => p.tags?.includes('interfaces'));\n    const toolPrompts = prompts.filter(p => p.tags?.includes('tools'));\n    const resourcePrompts = prompts.filter(p => p.tags?.includes('resources'));\n    const configPrompts = prompts.filter(p => p.tags?.includes('configuration'));\n    const deploymentPrompts = prompts.filter(p => p.tags?.includes('deployment'));\n    \n    // Step 2: Apply variables to each prompt category\n    const variables = context?.variables || {};\n    \n    // Combine interface definitions\n    const interfaces = await this.combineCategory(interfacePrompts, variables);\n    \n    // Combine tool implementations\n    const toolsImplementation = await this.combineCategory(toolPrompts, variables);\n    \n    // Combine resource implementations\n    const resourcesImplementation = await this.combineCategory(resourcePrompts, variables);\n    \n    // Combine server configuration\n    const serverConfiguration = await this.combineCategory(configPrompts, variables);\n    \n    // Combine deployment configuration\n    const dockerConfiguration = await this.combineCategory(deploymentPrompts, variables);\n    \n    // Step 3: Create combined content with logical sections\n    const combinedContent = `\n# MCP Server Implementation for ${variables.project_name || 'Your Project'}\n\n## Overview\n\nThis guide provides a comprehensive implementation plan for an MCP server using ${variables.language || 'TypeScript'} and the MCP SDK version ${context?.sdkVersion || 'latest'}.\n\n## Interface Definitions\n\n${interfaces.content}\n\n## Tools Implementation\n\n${toolsImplementation.content}\n\n## Resources Implementation\n\n${resourcesImplementation.content}\n\n## Server Configuration\n\n${serverConfiguration.content}\n\n## Deployment Configuration\n\n${dockerConfiguration.content}\n\n## Implementation Steps\n\n1. Set up the project structure\n2. Implement the interfaces\n3. Implement the MCP tools\n4. Implement the MCP resources\n5. Configure the server\n6. Set up deployment\n7. Implement tests\n8. Document the server\n    `;\n    \n    // Step 4: Return the comprehensive result\n    return {\n      content: combinedContent,\n      components: [\n        ...interfaces.components,\n        ...toolsImplementation.components,\n        ...resourcesImplementation.components,\n        ...serverConfiguration.components,\n        ...dockerConfiguration.components\n      ],\n      appliedVariables: variables,\n      interfaces: interfaces.content,\n      toolsImplementation: toolsImplementation.content,\n      resourcesImplementation: resourcesImplementation.content,\n      serverConfiguration: serverConfiguration.content,\n      dockerConfiguration: dockerConfiguration.content,\n      // Add suggestion for what to implement first\n      nextSteps: [\n        { action: 'implement_interfaces', description: 'Start by implementing the core interfaces' },\n        { action: 'implement_tools', description: 'Implement the MCP tools using the SDK' },\n        { action: 'implement_resources', description: 'Implement the MCP resources' },\n        { action: 'configure_server', description: 'Set up the server configuration' },\n        { action: 'setup_deployment', description: 'Configure Docker and deployment' }\n      ]\n    };\n  }\n  \n  /**\n   * Helper method to combine prompts in a specific category\n   * @param prompts Prompts in the category\n   * @param variables Variables to apply\n   * @returns Combined result for the category\n   */\n  private async combineCategory(prompts: Prompt[], variables: Record<string, any>): Promise<CombinedPromptResult> {\n    // Implementation would combine prompts within a category\n    // This is a simplified placeholder\n    let content = '';\n    const components: {id: string; name: string; contribution: string}[] = [];\n    \n    for (const prompt of prompts) {\n      const result = await this.promptService.applyTemplate(prompt.id, variables);\n      content += result.content + '\\n\\n';\n      components.push({\n        id: prompt.id,\n        name: prompt.name,\n        contribution: result.content\n      });\n    }\n    \n    return {\n      content: content.trim(),\n      components,\n      appliedVariables: variables\n    };\n  }\n  \n  /**\n   * Gets MCP server development prompt suggestions\n   * @param category Optional category to filter by\n   * @param context Current MCP context to inform suggestions\n   * @returns Array of prompt suggestions for MCP development\n   */\n  async getPromptSuggestions(category?: string, context?: MCPServerDevContext): Promise<PromptSuggestion[]> {\n    // Implementation would suggest prompts based on the current MCP development context\n    // For example, if building a tool-heavy server, suggest more tool-related prompts\n    // This is a placeholder for demonstration\n    \n    // In a real implementation, this would query the prompt service for relevant prompts\n    // based on the specific MCP development needs\n    \n    return [\n      {\n        id: 'consolidated-interfaces-template',\n        name: 'Consolidated TypeScript Interfaces',\n        relevance: 95,\n        compatibleWith: ['mcp-server-tools-implementation', 'docker-containerization-guide'],\n        reason: 'Provides the interface foundation for your MCP server'\n      },\n      {\n        id: 'mcp-server-tools-implementation',\n        name: 'MCP Server Tools Implementation',\n        relevance: 90,\n        compatibleWith: ['consolidated-interfaces-template', 'mcp-server-resources-implementation'],\n        reason: `${context?.tools?.length || 0} tools need implementation in your server`\n      },\n      {\n        id: 'mcp-server-resources-implementation',\n        name: 'MCP Server Resources Implementation',\n        relevance: 85,\n        compatibleWith: ['consolidated-interfaces-template', 'mcp-server-tools-implementation'],\n        reason: `${context?.resources?.length || 0} resources need implementation in your server`\n      },\n      {\n        id: 'docker-containerization-guide',\n        name: 'Docker Containerization Guide',\n        relevance: context?.deploymentTarget === 'docker' ? 100 : 70,\n        compatibleWith: ['consolidated-interfaces-template'],\n        reason: 'Provides Docker deployment configuration for your MCP server'\n      },\n      {\n        id: 'development-system-prompt',\n        name: 'Development System Prompt',\n        relevance: 60,\n        compatibleWith: [],\n        reason: 'Helps with general development assistance for your MCP server'\n      }\n    ];\n  }\n  \n  /**\n   * Validates if the prompts can be combined for MCP server development\n   * @param promptIds Array of prompt IDs to validate\n   * @returns Validation result with any issues specific to MCP development\n   */\n  async validateCombination(promptIds: string[]): Promise<CombinationValidationResult> {\n    // Implementation would validate that the prompts make sense for MCP development\n    // For example, ensuring there are no conflicting tool definitions\n    // This is a placeholder for demonstration\n    \n    const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n    \n    // Check for interface prompt\n    const hasInterface = prompts.some(p => p.tags?.includes('interfaces'));\n    if (!hasInterface) {\n      return {\n        isValid: false,\n        issues: [{\n          promptId: '',\n          issue: 'Missing interface definition prompt',\n          severity: 'error',\n          suggestion: 'Add a prompt with interface definitions, such as consolidated-interfaces-template'\n        }],\n        suggestions: [{\n          promptIds: [...promptIds, 'consolidated-interfaces-template'],\n          reason: 'Adding interface definitions is essential for MCP server development'\n        }]\n      };\n    }\n    \n    // In a real implementation, would do more validation specific to MCP development\n    \n    return {\n      isValid: true\n    };\n  }\n  \n  /**\n   * Creates a saved MCP server development workflow\n   * @param name Name for the new workflow\n   * @param promptIds Component prompt IDs\n   * @param config Configuration for the combination\n   * @returns The created MCP workflow\n   */\n  async saveWorkflow(name: string, promptIds: string[], config: WorkflowConfig): Promise<SavedWorkflow> {\n    // Implementation would save an MCP development workflow\n    // This is a placeholder for demonstration\n    \n    return {\n      id: `mcp-dev-workflow-${Date.now()}`,\n      name,\n      promptIds,\n      config,\n      createdAt: new Date().toISOString(),\n      updatedAt: new Date().toISOString(),\n      version: 1,\n      category: 'mcp-development',\n      tags: ['mcp', 'development', 'server']\n    };\n  }\n  \n  /**\n   * Loads a previously saved MCP server development workflow\n   * @param workflowId ID of the saved workflow\n   * @returns The loaded MCP workflow\n   */\n  async loadWorkflow(workflowId: string): Promise<SavedWorkflow> {\n    // Implementation would load an MCP development workflow\n    // This is a placeholder for demonstration\n    \n    throw new Error(`Workflow ${workflowId} not found or not implemented yet`);\n  }\n}\n\n/**\n * Usage Examples\n * \n * ```typescript\n * // Creating a combiner\n * const promptService = new PromptService(storageAdapter);\n * const mcpCombiner = new MCPServerDevPromptCombiner(promptService);\n * \n * // Getting prompt suggestions for MCP development\n * const suggestions = await mcpCombiner.getPromptSuggestions('tools', {\n *   technologies: {\n *     language: 'TypeScript',\n *     runtime: 'Node.js',\n *     frameworks: ['Express']\n *   },\n *   sdkVersion: '1.6.0',\n *   tools: [\n *     { name: 'get_document', description: 'Retrieve a document by ID' },\n *     { name: 'search_documents', description: 'Search for documents' }\n *   ],\n *   resources: [\n *     { protocol: 'document', description: 'Document resource protocol' }\n *   ],\n *   deploymentTarget: 'docker'\n * });\n * \n * // Combining prompts for MCP development\n * const result = await mcpCombiner.combinePrompts([\n *   'consolidated-interfaces-template',\n *   'mcp-server-tools-implementation',\n *   'docker-containerization-guide'\n * ], {\n *   variables: {\n *     project_name: 'Document Management MCP Server',\n *     language: 'TypeScript',\n *     primary_entity: 'Document',\n *     node_version: '20'\n *   },\n *   technologies: {\n *     language: 'TypeScript',\n *     runtime: 'Node.js',\n *     frameworks: ['Express']\n *   },\n *   sdkVersion: '1.6.0',\n *   deploymentTarget: 'docker'\n * });\n * \n * // Using the specialized result properties\n * console.log(result.interfaces); // Get just the interface definitions\n * console.log(result.toolsImplementation); // Get just the tools implementation\n * console.log(result.dockerConfiguration); // Get just the Docker configuration\n * ```\n */\n\n// ============================\n// Extension Guidelines\n// ============================\n\n/**\n * When extending MCPServerDevPromptCombiner, consider:\n * \n * 1. Adding support for specific MCP server types (e.g., FileSystem, GitHub, Memory)\n * 2. Enhancing the context with more MCP-specific properties\n * 3. Improving suggestion logic based on the development context\n * 4. Adding template validation specific to MCP compatibility\n * 5. {{additional_extension_guidelines}}\n */",
 6 |   "isTemplate": true,
 7 |   "variables": [
 8 |     "project_name",
 9 |     "additional_mcp_context",
10 |     "additional_mcp_results",
11 |     "additional_extension_guidelines"
12 |   ],
13 |   "tags": [
14 |     "development",
15 |     "mcp",
16 |     "server",
17 |     "prompt-engineering",
18 |     "integration"
19 |   ],
20 |   "category": "development",
21 |   "createdAt": "2024-08-08T17:15:00.000Z",
22 |   "updatedAt": "2024-08-08T17:15:00.000Z",
23 |   "version": 1
24 | } 
```

--------------------------------------------------------------------------------
/printcast-agent/src/integrations/asterisk.py:
--------------------------------------------------------------------------------

```python
  1 | """
  2 | Asterisk SIP integration for PrintCast Agent.
  3 | 
  4 | Handles telephony operations including:
  5 | - Call routing and management
  6 | - IVR interactions
  7 | - DTMF processing
  8 | - Call recording
  9 | """
 10 | 
 11 | import asyncio
 12 | import logging
 13 | from typing import Any, Callable, Dict, List, Optional
 14 | from datetime import datetime
 15 | 
 16 | import structlog
 17 | from panoramisk import Manager
 18 | from pydantic import BaseModel, Field
 19 | 
 20 | logger = structlog.get_logger(__name__)
 21 | 
 22 | 
 23 | class CallInfo(BaseModel):
 24 |     """Information about an active call."""
 25 |     
 26 |     channel: str
 27 |     caller_id: str
 28 |     called_number: str
 29 |     start_time: datetime
 30 |     state: str
 31 |     unique_id: str
 32 |     metadata: Dict[str, Any] = Field(default_factory=dict)
 33 | 
 34 | 
 35 | class AsteriskManager:
 36 |     """
 37 |     Manages Asterisk SIP server integration.
 38 |     
 39 |     Provides high-level interface for:
 40 |     - AMI (Asterisk Manager Interface) operations
 41 |     - Call control and routing
 42 |     - IVR menu handling
 43 |     - DTMF input processing
 44 |     """
 45 |     
 46 |     def __init__(self, config: Dict[str, Any]):
 47 |         """
 48 |         Initialize Asterisk manager.
 49 |         
 50 |         Args:
 51 |             config: Asterisk configuration including:
 52 |                 - host: Asterisk server hostname
 53 |                 - port: AMI port (default 5038)
 54 |                 - username: AMI username
 55 |                 - password: AMI password
 56 |                 - context: Default dialplan context
 57 |         """
 58 |         self.config = config
 59 |         self.ami: Optional[Manager] = None
 60 |         self.connected = False
 61 |         self.active_calls: Dict[str, CallInfo] = {}
 62 |         self.event_handlers: Dict[str, List[Callable]] = {}
 63 |         
 64 |         # Configuration
 65 |         self.host = config.get("host", "localhost")
 66 |         self.port = config.get("port", 5038)
 67 |         self.username = config.get("username", "admin")
 68 |         self.password = config.get("password", "")
 69 |         self.context = config.get("context", "printcast-ivr")
 70 |         
 71 |         logger.info(
 72 |             "Asterisk manager initialized",
 73 |             host=self.host,
 74 |             port=self.port,
 75 |             context=self.context
 76 |         )
 77 |     
 78 |     async def connect(self) -> bool:
 79 |         """
 80 |         Connect to Asterisk AMI.
 81 |         
 82 |         Returns:
 83 |             True if connection successful
 84 |         """
 85 |         try:
 86 |             self.ami = Manager(
 87 |                 host=self.host,
 88 |                 port=self.port,
 89 |                 username=self.username,
 90 |                 secret=self.password
 91 |             )
 92 |             
 93 |             # Connect to AMI
 94 |             await self.ami.connect()
 95 |             
 96 |             # Register event handlers
 97 |             self.ami.register_event("*", self._handle_ami_event)
 98 |             
 99 |             self.connected = True
100 |             logger.info("Connected to Asterisk AMI", host=self.host)
101 |             
102 |             return True
103 |             
104 |         except Exception as e:
105 |             logger.error("Failed to connect to Asterisk", error=str(e))
106 |             self.connected = False
107 |             return False
108 |     
109 |     async def disconnect(self):
110 |         """Disconnect from Asterisk AMI."""
111 |         if self.ami and self.connected:
112 |             try:
113 |                 await self.ami.logoff()
114 |                 await self.ami.close()
115 |                 self.connected = False
116 |                 logger.info("Disconnected from Asterisk")
117 |             except Exception as e:
118 |                 logger.error("Error disconnecting from Asterisk", error=str(e))
119 |     
120 |     def is_connected(self) -> bool:
121 |         """Check if connected to Asterisk."""
122 |         return self.connected
123 |     
124 |     async def _handle_ami_event(self, event: Dict[str, Any]):
125 |         """
126 |         Handle AMI events.
127 |         
128 |         Args:
129 |             event: AMI event data
130 |         """
131 |         event_type = event.get("Event", "")
132 |         
133 |         try:
134 |             # Handle specific events
135 |             if event_type == "Newchannel":
136 |                 await self._handle_new_channel(event)
137 |             elif event_type == "Hangup":
138 |                 await self._handle_hangup(event)
139 |             elif event_type == "DTMF":
140 |                 await self._handle_dtmf(event)
141 |             elif event_type == "NewCallerid":
142 |                 await self._handle_caller_id(event)
143 |             
144 |             # Call registered handlers
145 |             if event_type in self.event_handlers:
146 |                 for handler in self.event_handlers[event_type]:
147 |                     asyncio.create_task(handler(event))
148 |                     
149 |         except Exception as e:
150 |             logger.error(
151 |                 "Error handling AMI event",
152 |                 event_type=event_type,
153 |                 error=str(e)
154 |             )
155 |     
156 |     async def _handle_new_channel(self, event: Dict[str, Any]):
157 |         """Handle new channel creation."""
158 |         channel = event.get("Channel", "")
159 |         caller_id = event.get("CallerIDNum", "")
160 |         unique_id = event.get("Uniqueid", "")
161 |         
162 |         call_info = CallInfo(
163 |             channel=channel,
164 |             caller_id=caller_id,
165 |             called_number=event.get("Exten", ""),
166 |             start_time=datetime.now(),
167 |             state="ringing",
168 |             unique_id=unique_id
169 |         )
170 |         
171 |         self.active_calls[unique_id] = call_info
172 |         
173 |         logger.info(
174 |             "New call detected",
175 |             channel=channel,
176 |             caller_id=caller_id,
177 |             unique_id=unique_id
178 |         )
179 |     
180 |     async def _handle_hangup(self, event: Dict[str, Any]):
181 |         """Handle call hangup."""
182 |         unique_id = event.get("Uniqueid", "")
183 |         
184 |         if unique_id in self.active_calls:
185 |             call_info = self.active_calls[unique_id]
186 |             duration = (datetime.now() - call_info.start_time).total_seconds()
187 |             
188 |             logger.info(
189 |                 "Call ended",
190 |                 unique_id=unique_id,
191 |                 duration=duration,
192 |                 caller_id=call_info.caller_id
193 |             )
194 |             
195 |             del self.active_calls[unique_id]
196 |     
197 |     async def _handle_dtmf(self, event: Dict[str, Any]):
198 |         """Handle DTMF digit press."""
199 |         digit = event.get("Digit", "")
200 |         unique_id = event.get("Uniqueid", "")
201 |         
202 |         if unique_id in self.active_calls:
203 |             call_info = self.active_calls[unique_id]
204 |             
205 |             # Store DTMF in metadata
206 |             if "dtmf_buffer" not in call_info.metadata:
207 |                 call_info.metadata["dtmf_buffer"] = ""
208 |             
209 |             call_info.metadata["dtmf_buffer"] += digit
210 |             
211 |             logger.debug(
212 |                 "DTMF received",
213 |                 digit=digit,
214 |                 unique_id=unique_id,
215 |                 buffer=call_info.metadata["dtmf_buffer"]
216 |             )
217 |     
218 |     async def _handle_caller_id(self, event: Dict[str, Any]):
219 |         """Handle caller ID update."""
220 |         unique_id = event.get("Uniqueid", "")
221 |         caller_id = event.get("CallerIDNum", "")
222 |         
223 |         if unique_id in self.active_calls:
224 |             self.active_calls[unique_id].caller_id = caller_id
225 |     
226 |     async def originate_call(
227 |         self,
228 |         destination: str,
229 |         caller_id: str = "PrintCast",
230 |         timeout: int = 30,
231 |         variables: Optional[Dict[str, str]] = None
232 |     ) -> Dict[str, Any]:
233 |         """
234 |         Originate an outbound call.
235 |         
236 |         Args:
237 |             destination: Destination number
238 |             caller_id: Caller ID to present
239 |             timeout: Call timeout in seconds
240 |             variables: Channel variables to set
241 |         
242 |         Returns:
243 |             Call result information
244 |         """
245 |         if not self.connected:
246 |             raise RuntimeError("Not connected to Asterisk")
247 |         
248 |         try:
249 |             response = await self.ami.send_action({
250 |                 "Action": "Originate",
251 |                 "Channel": f"SIP/{destination}",
252 |                 "Context": self.context,
253 |                 "Exten": "s",
254 |                 "Priority": "1",
255 |                 "CallerID": caller_id,
256 |                 "Timeout": str(timeout * 1000),
257 |                 "Variable": variables or {}
258 |             })
259 |             
260 |             logger.info(
261 |                 "Call originated",
262 |                 destination=destination,
263 |                 caller_id=caller_id
264 |             )
265 |             
266 |             return {
267 |                 "success": response.get("Response") == "Success",
268 |                 "message": response.get("Message", ""),
269 |                 "action_id": response.get("ActionID", "")
270 |             }
271 |             
272 |         except Exception as e:
273 |             logger.error("Failed to originate call", error=str(e))
274 |             raise
275 |     
276 |     async def transfer_call(
277 |         self,
278 |         channel: str,
279 |         destination: str,
280 |         context: Optional[str] = None
281 |     ) -> bool:
282 |         """
283 |         Transfer an active call.
284 |         
285 |         Args:
286 |             channel: Channel to transfer
287 |             destination: Transfer destination
288 |             context: Optional context (uses default if not specified)
289 |         
290 |         Returns:
291 |             True if transfer successful
292 |         """
293 |         if not self.connected:
294 |             raise RuntimeError("Not connected to Asterisk")
295 |         
296 |         try:
297 |             response = await self.ami.send_action({
298 |                 "Action": "Redirect",
299 |                 "Channel": channel,
300 |                 "Context": context or self.context,
301 |                 "Exten": destination,
302 |                 "Priority": "1"
303 |             })
304 |             
305 |             success = response.get("Response") == "Success"
306 |             
307 |             logger.info(
308 |                 "Call transferred",
309 |                 channel=channel,
310 |                 destination=destination,
311 |                 success=success
312 |             )
313 |             
314 |             return success
315 |             
316 |         except Exception as e:
317 |             logger.error("Failed to transfer call", error=str(e))
318 |             return False
319 |     
320 |     async def hangup_call(self, channel: str, cause: int = 16) -> bool:
321 |         """
322 |         Hangup an active call.
323 |         
324 |         Args:
325 |             channel: Channel to hangup
326 |             cause: Hangup cause code (16 = normal clearing)
327 |         
328 |         Returns:
329 |             True if hangup successful
330 |         """
331 |         if not self.connected:
332 |             raise RuntimeError("Not connected to Asterisk")
333 |         
334 |         try:
335 |             response = await self.ami.send_action({
336 |                 "Action": "Hangup",
337 |                 "Channel": channel,
338 |                 "Cause": str(cause)
339 |             })
340 |             
341 |             success = response.get("Response") == "Success"
342 |             
343 |             logger.info(
344 |                 "Call hangup requested",
345 |                 channel=channel,
346 |                 success=success
347 |             )
348 |             
349 |             return success
350 |             
351 |         except Exception as e:
352 |             logger.error("Failed to hangup call", error=str(e))
353 |             return False
354 |     
355 |     async def play_audio(
356 |         self,
357 |         channel: str,
358 |         audio_file: str,
359 |         interrupt_dtmf: bool = True
360 |     ) -> bool:
361 |         """
362 |         Play audio file to channel.
363 |         
364 |         Args:
365 |             channel: Channel to play audio to
366 |             audio_file: Path to audio file
367 |             interrupt_dtmf: Allow DTMF to interrupt playback
368 |         
369 |         Returns:
370 |             True if playback started
371 |         """
372 |         if not self.connected:
373 |             raise RuntimeError("Not connected to Asterisk")
374 |         
375 |         try:
376 |             response = await self.ami.send_action({
377 |                 "Action": "Playback",
378 |                 "Channel": channel,
379 |                 "Filename": audio_file,
380 |                 "Interrupt": "yes" if interrupt_dtmf else "no"
381 |             })
382 |             
383 |             success = response.get("Response") == "Success"
384 |             
385 |             logger.info(
386 |                 "Audio playback started",
387 |                 channel=channel,
388 |                 file=audio_file,
389 |                 success=success
390 |             )
391 |             
392 |             return success
393 |             
394 |         except Exception as e:
395 |             logger.error("Failed to play audio", error=str(e))
396 |             return False
397 |     
398 |     async def get_channel_variable(
399 |         self,
400 |         channel: str,
401 |         variable: str
402 |     ) -> Optional[str]:
403 |         """
404 |         Get channel variable value.
405 |         
406 |         Args:
407 |             channel: Channel name
408 |             variable: Variable name
409 |         
410 |         Returns:
411 |             Variable value or None
412 |         """
413 |         if not self.connected:
414 |             return None
415 |         
416 |         try:
417 |             response = await self.ami.send_action({
418 |                 "Action": "GetVar",
419 |                 "Channel": channel,
420 |                 "Variable": variable
421 |             })
422 |             
423 |             if response.get("Response") == "Success":
424 |                 return response.get("Value")
425 |             
426 |             return None
427 |             
428 |         except Exception as e:
429 |             logger.error("Failed to get channel variable", error=str(e))
430 |             return None
431 |     
432 |     async def set_channel_variable(
433 |         self,
434 |         channel: str,
435 |         variable: str,
436 |         value: str
437 |     ) -> bool:
438 |         """
439 |         Set channel variable.
440 |         
441 |         Args:
442 |             channel: Channel name
443 |             variable: Variable name
444 |             value: Variable value
445 |         
446 |         Returns:
447 |             True if variable set successfully
448 |         """
449 |         if not self.connected:
450 |             return False
451 |         
452 |         try:
453 |             response = await self.ami.send_action({
454 |                 "Action": "SetVar",
455 |                 "Channel": channel,
456 |                 "Variable": variable,
457 |                 "Value": value
458 |             })
459 |             
460 |             return response.get("Response") == "Success"
461 |             
462 |         except Exception as e:
463 |             logger.error("Failed to set channel variable", error=str(e))
464 |             return False
465 |     
466 |     def register_event_handler(
467 |         self,
468 |         event_type: str,
469 |         handler: Callable[[Dict[str, Any]], None]
470 |     ):
471 |         """
472 |         Register custom event handler.
473 |         
474 |         Args:
475 |             event_type: AMI event type
476 |             handler: Async handler function
477 |         """
478 |         if event_type not in self.event_handlers:
479 |             self.event_handlers[event_type] = []
480 |         
481 |         self.event_handlers[event_type].append(handler)
482 |         
483 |         logger.debug(
484 |             "Event handler registered",
485 |             event_type=event_type,
486 |             handler=handler.__name__
487 |         )
488 |     
489 |     async def execute_agi_command(
490 |         self,
491 |         channel: str,
492 |         command: str,
493 |         args: Optional[List[str]] = None
494 |     ) -> Dict[str, Any]:
495 |         """
496 |         Execute AGI command on channel.
497 |         
498 |         Args:
499 |             channel: Channel name
500 |             command: AGI command
501 |             args: Command arguments
502 |         
503 |         Returns:
504 |             Command result
505 |         """
506 |         if not self.connected:
507 |             raise RuntimeError("Not connected to Asterisk")
508 |         
509 |         command_line = command
510 |         if args:
511 |             command_line += " " + " ".join(args)
512 |         
513 |         try:
514 |             response = await self.ami.send_action({
515 |                 "Action": "AGI",
516 |                 "Channel": channel,
517 |                 "Command": command_line
518 |             })
519 |             
520 |             return {
521 |                 "success": response.get("Response") == "Success",
522 |                 "result": response.get("Result", ""),
523 |                 "data": response.get("ResultData", "")
524 |             }
525 |             
526 |         except Exception as e:
527 |             logger.error("Failed to execute AGI command", error=str(e))
528 |             raise
529 |     
530 |     def get_active_calls(self) -> List[CallInfo]:
531 |         """Get list of active calls."""
532 |         return list(self.active_calls.values())
533 |     
534 |     def get_call_by_caller_id(self, caller_id: str) -> Optional[CallInfo]:
535 |         """Get call info by caller ID."""
536 |         for call in self.active_calls.values():
537 |             if call.caller_id == caller_id:
538 |                 return call
539 |         return None
```
Page 15/24FirstPrevNextLast