This is page 12 of 21. Use http://codebase.md/sparesparrow/mcp-project-orchestrator?lines=false&page={x} to view the full context.
# Directory Structure
```
├── .cursorrules
├── .env.example
├── .github
│ └── workflows
│ ├── build.yml
│ ├── ci-cd.yml
│ ├── ci.yml
│ ├── deploy.yml
│ ├── ecosystem-monitor.yml
│ ├── fan-out-orchestrator.yml
│ └── release.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AUTOMOTIVE_CAMERA_SYSTEM_SUMMARY.md
├── automotive-camera-system
│ ├── docs
│ │ └── IMPLEMENTACE_CS.md
│ └── README.md
├── AWS_MCP_IMPLEMENTATION_SUMMARY.md
├── AWS_MCP_QUICKSTART.md
├── AWS_SIP_TRUNK_DEPLOYMENT_COMPLETE.md
├── aws-sip-trunk
│ ├── .gitignore
│ ├── config
│ │ ├── extensions.conf.j2
│ │ └── pjsip.conf.j2
│ ├── DEPLOYMENT_SUMMARY.md
│ ├── docs
│ │ ├── DEPLOYMENT.md
│ │ └── TROUBLESHOOTING.md
│ ├── PROJECT_INDEX.md
│ ├── pyproject.toml
│ ├── QUICKSTART.md
│ ├── README.md
│ ├── scripts
│ │ ├── deploy-asterisk-aws.sh
│ │ └── user-data.sh
│ ├── terraform
│ │ ├── ec2.tf
│ │ ├── main.tf
│ │ ├── monitoring.tf
│ │ ├── networking.tf
│ │ ├── outputs.tf
│ │ ├── storage.tf
│ │ ├── terraform.tfvars.example
│ │ └── variables.tf
│ ├── tests
│ │ └── test_sip_connectivity.py
│ └── VERIFICATION_CHECKLIST.md
├── CLAUDE.md
├── component_templates.json
├── conanfile.py
├── config
│ ├── default.json
│ └── project_orchestration.json
├── Containerfile
├── cursor-templates
│ └── openssl
│ ├── linux-dev.mdc.jinja2
│ └── shared.mdc.jinja2
├── data
│ └── prompts
│ └── templates
│ ├── advanced-multi-server-template.json
│ ├── analysis-assistant.json
│ ├── analyze-mermaid-diagram.json
│ ├── architecture-design-assistant.json
│ ├── code-diagram-documentation-creator.json
│ ├── code-refactoring-assistant.json
│ ├── code-review-assistant.json
│ ├── collaborative-development.json
│ ├── consolidated-interfaces-template.json
│ ├── could-you-interpret-the-assumed-applicat.json
│ ├── data-analysis-template.json
│ ├── database-query-assistant.json
│ ├── debugging-assistant.json
│ ├── development-system-prompt-zcna0.json
│ ├── development-system-prompt.json
│ ├── development-workflow.json
│ ├── docker-compose-prompt-combiner.json
│ ├── docker-containerization-guide.json
│ ├── docker-mcp-servers-orchestration.json
│ ├── foresight-assistant.json
│ ├── generate-different-types-of-questions-ab.json
│ ├── generate-mermaid-diagram.json
│ ├── image-1-describe-the-icon-in-one-sen.json
│ ├── initialize-project-setup-for-a-new-micro.json
│ ├── install-dependencies-build-run-test.json
│ ├── mcp-code-generator.json
│ ├── mcp-integration-assistant.json
│ ├── mcp-resources-explorer.json
│ ├── mcp-resources-integration.json
│ ├── mcp-server-configurator.json
│ ├── mcp-server-dev-prompt-combiner.json
│ ├── mcp-server-integration-template.json
│ ├── mcp-template-system.json
│ ├── mermaid-analysis-expert.json
│ ├── mermaid-class-diagram-generator.json
│ ├── mermaid-diagram-generator.json
│ ├── mermaid-diagram-modifier.json
│ ├── modify-mermaid-diagram.json
│ ├── monorepo-migration-guide.json
│ ├── multi-resource-context.json
│ ├── project-analysis-assistant.json
│ ├── prompt-combiner-interface.json
│ ├── prompt-templates.json
│ ├── repository-explorer.json
│ ├── research-assistant.json
│ ├── sequential-data-analysis.json
│ ├── solid-code-analysis-visualizer.json
│ ├── task-list-helper-8ithy.json
│ ├── template-based-mcp-integration.json
│ ├── templates.json
│ ├── test-prompt.json
│ └── you-are-limited-to-respond-yes-or-no-onl.json
├── docs
│ ├── AWS_MCP.md
│ ├── AWS.md
│ ├── CONAN.md
│ └── integration.md
├── elevenlabs-agents
│ ├── agent-prompts.json
│ └── README.md
├── IMPLEMENTATION_STATUS.md
├── integration_plan.md
├── LICENSE
├── MANIFEST.in
├── mcp-project-orchestrator
│ └── openssl
│ ├── .github
│ │ └── workflows
│ │ └── validate-cursor-config.yml
│ ├── conanfile.py
│ ├── CURSOR_DEPLOYMENT_POLISH.md
│ ├── cursor-rules
│ │ ├── mcp.json.jinja2
│ │ ├── prompts
│ │ │ ├── fips-compliance.md.jinja2
│ │ │ ├── openssl-coding-standards.md.jinja2
│ │ │ └── pr-review.md.jinja2
│ │ └── rules
│ │ ├── ci-linux.mdc.jinja2
│ │ ├── linux-dev.mdc.jinja2
│ │ ├── macos-dev.mdc.jinja2
│ │ ├── shared.mdc.jinja2
│ │ └── windows-dev.mdc.jinja2
│ ├── docs
│ │ └── cursor-configuration-management.md
│ ├── examples
│ │ └── example-workspace
│ │ ├── .cursor
│ │ │ ├── mcp.json
│ │ │ └── rules
│ │ │ ├── linux-dev.mdc
│ │ │ └── shared.mdc
│ │ ├── .gitignore
│ │ ├── CMakeLists.txt
│ │ ├── conanfile.py
│ │ ├── profiles
│ │ │ ├── linux-gcc-debug.profile
│ │ │ └── linux-gcc-release.profile
│ │ ├── README.md
│ │ └── src
│ │ ├── crypto_utils.cpp
│ │ ├── crypto_utils.h
│ │ └── main.cpp
│ ├── IMPLEMENTATION_SUMMARY.md
│ ├── mcp_orchestrator
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── conan_integration.py
│ │ ├── cursor_config.py
│ │ ├── cursor_deployer.py
│ │ ├── deploy_cursor.py
│ │ ├── env_config.py
│ │ ├── platform_detector.py
│ │ └── yaml_validator.py
│ ├── openssl-cursor-example-workspace-20251014_121133.zip
│ ├── pyproject.toml
│ ├── README.md
│ ├── requirements.txt
│ ├── scripts
│ │ └── create_example_workspace.py
│ ├── setup.py
│ ├── test_deployment.py
│ └── tests
│ ├── __init__.py
│ ├── test_cursor_deployer.py
│ └── test_template_validation.py
├── printcast-agent
│ ├── .env.example
│ ├── config
│ │ └── asterisk
│ │ └── extensions.conf
│ ├── Containerfile
│ ├── docker-compose.yml
│ ├── pyproject.toml
│ ├── README.md
│ ├── scripts
│ │ └── docker-entrypoint.sh
│ ├── src
│ │ ├── integrations
│ │ │ ├── __init__.py
│ │ │ ├── asterisk.py
│ │ │ ├── content.py
│ │ │ ├── delivery.py
│ │ │ ├── elevenlabs.py
│ │ │ └── printing.py
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ └── orchestration
│ │ ├── __init__.py
│ │ └── workflow.py
│ └── tests
│ └── test_mcp_server.py
├── project_orchestration.json
├── project_templates.json
├── pyproject.toml
├── README.md
├── REFACTORING_COMPLETED.md
├── REFACTORING_RECOMMENDATIONS.md
├── requirements.txt
├── scripts
│ ├── archive
│ │ ├── init_claude_test.sh
│ │ ├── init_postgres.sh
│ │ ├── start_mcp_servers.sh
│ │ └── test_claude_desktop.sh
│ ├── consolidate_mermaid.py
│ ├── consolidate_prompts.py
│ ├── consolidate_resources.py
│ ├── consolidate_templates.py
│ ├── INSTRUCTIONS.md
│ ├── README.md
│ ├── setup_aws_mcp.sh
│ ├── setup_mcp.sh
│ ├── setup_orchestrator.sh
│ ├── setup_project.py
│ └── test_mcp.sh
├── src
│ └── mcp_project_orchestrator
│ ├── __init__.py
│ ├── __main__.py
│ ├── aws_mcp.py
│ ├── cli
│ │ └── __init__.py
│ ├── cli.py
│ ├── commands
│ │ └── openssl_cli.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── config.py
│ │ ├── exceptions.py
│ │ ├── fastmcp.py
│ │ ├── logging.py
│ │ └── managers.py
│ ├── cursor_deployer.py
│ ├── ecosystem_monitor.py
│ ├── fan_out_orchestrator.py
│ ├── fastmcp.py
│ ├── mcp-py
│ │ ├── AggregateVersions.py
│ │ ├── CustomBashTool.py
│ │ ├── FileAnnotator.py
│ │ ├── mcp-client.py
│ │ ├── mcp-server.py
│ │ ├── MermaidDiagramGenerator.py
│ │ ├── NamingAgent.py
│ │ └── solid-analyzer-agent.py
│ ├── mermaid
│ │ ├── __init__.py
│ │ ├── generator.py
│ │ ├── mermaid_orchestrator.py
│ │ ├── renderer.py
│ │ ├── templates
│ │ │ ├── AbstractFactory-diagram.json
│ │ │ ├── Adapter-diagram.json
│ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ ├── Builder-diagram.json
│ │ │ ├── Chain-diagram.json
│ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ ├── Command-diagram.json
│ │ │ ├── Decorator-diagram.json
│ │ │ ├── Facade-diagram.json
│ │ │ ├── Factory-diagram.json
│ │ │ ├── flowchart
│ │ │ │ ├── AbstractFactory-diagram.json
│ │ │ │ ├── Adapter-diagram.json
│ │ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ │ ├── Builder-diagram.json
│ │ │ │ ├── Chain-diagram.json
│ │ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ │ ├── Command-diagram.json
│ │ │ │ ├── Decorator-diagram.json
│ │ │ │ ├── Facade-diagram.json
│ │ │ │ ├── Factory-diagram.json
│ │ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ │ ├── generated_diagram.json
│ │ │ │ ├── integration.json
│ │ │ │ ├── Iterator-diagram.json
│ │ │ │ ├── Mediator-diagram.json
│ │ │ │ ├── Memento-diagram.json
│ │ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ │ ├── Observer-diagram.json
│ │ │ │ ├── Prototype-diagram.json
│ │ │ │ ├── Proxy-diagram.json
│ │ │ │ ├── README.json
│ │ │ │ ├── Singleton-diagram.json
│ │ │ │ ├── State-diagram.json
│ │ │ │ ├── Strategy-diagram.json
│ │ │ │ ├── TemplateMethod-diagram.json
│ │ │ │ ├── theme_dark.json
│ │ │ │ ├── theme_default.json
│ │ │ │ ├── theme_pastel.json
│ │ │ │ ├── theme_vibrant.json
│ │ │ │ └── Visitor-diagram.json
│ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ ├── generated_diagram.json
│ │ │ ├── index.json
│ │ │ ├── integration.json
│ │ │ ├── Iterator-diagram.json
│ │ │ ├── Mediator-diagram.json
│ │ │ ├── Memento-diagram.json
│ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ ├── Observer-diagram.json
│ │ │ ├── Prototype-diagram.json
│ │ │ ├── Proxy-diagram.json
│ │ │ ├── README.json
│ │ │ ├── Singleton-diagram.json
│ │ │ ├── State-diagram.json
│ │ │ ├── Strategy-diagram.json
│ │ │ ├── TemplateMethod-diagram.json
│ │ │ ├── theme_dark.json
│ │ │ ├── theme_default.json
│ │ │ ├── theme_pastel.json
│ │ │ ├── theme_vibrant.json
│ │ │ └── Visitor-diagram.json
│ │ └── types.py
│ ├── project_orchestration.py
│ ├── prompt_manager
│ │ ├── __init__.py
│ │ ├── loader.py
│ │ ├── manager.py
│ │ └── template.py
│ ├── prompts
│ │ ├── __dirname.json
│ │ ├── __image_1___describe_the_icon_in_one_sen___.json
│ │ ├── __init__.py
│ │ ├── __type.json
│ │ ├── _.json
│ │ ├── _DEFAULT_OPEN_DELIMITER.json
│ │ ├── _emojiRegex.json
│ │ ├── _UUID_CHARS.json
│ │ ├── a.json
│ │ ├── A.json
│ │ ├── Aa.json
│ │ ├── aAnnotationPadding.json
│ │ ├── absoluteThresholdGroup.json
│ │ ├── add.json
│ │ ├── ADDITIONAL_PROPERTY_FLAG.json
│ │ ├── Advanced_Multi-Server_Integration_Template.json
│ │ ├── allOptionsList.json
│ │ ├── analysis
│ │ │ ├── Data_Analysis_Template.json
│ │ │ ├── index.json
│ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ ├── Sequential_Data_Analysis_with_MCP_Integration.json
│ │ │ └── SOLID_Code_Analysis_Visualizer.json
│ │ ├── Analysis_Assistant.json
│ │ ├── Analyze_Mermaid_Diagram.json
│ │ ├── ANDROID_EVERGREEN_FIRST.json
│ │ ├── ANSI_ESCAPE_BELL.json
│ │ ├── architecture
│ │ │ ├── index.json
│ │ │ └── PromptCombiner_Interface.json
│ │ ├── Architecture_Design_Assistant.json
│ │ ├── argsTag.json
│ │ ├── ARROW.json
│ │ ├── assistant
│ │ │ ├── Analysis_Assistant.json
│ │ │ ├── Architecture_Design_Assistant.json
│ │ │ ├── Code_Refactoring_Assistant.json
│ │ │ ├── Code_Review_Assistant.json
│ │ │ ├── Database_Query_Assistant.json
│ │ │ ├── Debugging_Assistant.json
│ │ │ ├── Foresight_Assistant.json
│ │ │ ├── index.json
│ │ │ ├── MCP_Integration_Assistant.json
│ │ │ ├── Project_Analysis_Assistant.json
│ │ │ └── Research_Assistant.json
│ │ ├── astralRange.json
│ │ ├── at.json
│ │ ├── authorization_endpoint.json
│ │ ├── b.json
│ │ ├── BABELIGNORE_FILENAME.json
│ │ ├── BACKSLASH.json
│ │ ├── backupId.json
│ │ ├── BANG.json
│ │ ├── BASE64_MAP.json
│ │ ├── baseFlags.json
│ │ ├── Basic_Template.json
│ │ ├── bgModel.json
│ │ ├── bignum.json
│ │ ├── blockKeywordsStr.json
│ │ ├── BOMChar.json
│ │ ├── boundary.json
│ │ ├── brackets.json
│ │ ├── BROWSER_VAR.json
│ │ ├── bt.json
│ │ ├── BUILTIN.json
│ │ ├── BULLET.json
│ │ ├── c.json
│ │ ├── C.json
│ │ ├── CACHE_VERSION.json
│ │ ├── cacheControl.json
│ │ ├── cacheProp.json
│ │ ├── category.py
│ │ ├── CHANGE_EVENT.json
│ │ ├── CHAR_CODE_0.json
│ │ ├── chars.json
│ │ ├── cjsPattern.json
│ │ ├── cKeywords.json
│ │ ├── classForPercent.json
│ │ ├── classStr.json
│ │ ├── clientFirstMessageBare.json
│ │ ├── cmd.json
│ │ ├── Code_Diagram_Documentation_Creator.json
│ │ ├── Code_Refactoring_Assistant.json
│ │ ├── Code_Review_Assistant.json
│ │ ├── code.json
│ │ ├── coding
│ │ │ ├── __dirname.json
│ │ │ ├── _.json
│ │ │ ├── _DEFAULT_OPEN_DELIMITER.json
│ │ │ ├── _emojiRegex.json
│ │ │ ├── _UUID_CHARS.json
│ │ │ ├── a.json
│ │ │ ├── A.json
│ │ │ ├── aAnnotationPadding.json
│ │ │ ├── absoluteThresholdGroup.json
│ │ │ ├── add.json
│ │ │ ├── ADDITIONAL_PROPERTY_FLAG.json
│ │ │ ├── allOptionsList.json
│ │ │ ├── ANDROID_EVERGREEN_FIRST.json
│ │ │ ├── ANSI_ESCAPE_BELL.json
│ │ │ ├── argsTag.json
│ │ │ ├── ARROW.json
│ │ │ ├── astralRange.json
│ │ │ ├── at.json
│ │ │ ├── authorization_endpoint.json
│ │ │ ├── BABELIGNORE_FILENAME.json
│ │ │ ├── BACKSLASH.json
│ │ │ ├── BANG.json
│ │ │ ├── BASE64_MAP.json
│ │ │ ├── baseFlags.json
│ │ │ ├── bgModel.json
│ │ │ ├── bignum.json
│ │ │ ├── blockKeywordsStr.json
│ │ │ ├── BOMChar.json
│ │ │ ├── boundary.json
│ │ │ ├── brackets.json
│ │ │ ├── BROWSER_VAR.json
│ │ │ ├── bt.json
│ │ │ ├── BUILTIN.json
│ │ │ ├── BULLET.json
│ │ │ ├── c.json
│ │ │ ├── C.json
│ │ │ ├── CACHE_VERSION.json
│ │ │ ├── cacheControl.json
│ │ │ ├── cacheProp.json
│ │ │ ├── CHANGE_EVENT.json
│ │ │ ├── CHAR_CODE_0.json
│ │ │ ├── chars.json
│ │ │ ├── cjsPattern.json
│ │ │ ├── cKeywords.json
│ │ │ ├── classForPercent.json
│ │ │ ├── classStr.json
│ │ │ ├── clientFirstMessageBare.json
│ │ │ ├── cmd.json
│ │ │ ├── code.json
│ │ │ ├── colorCode.json
│ │ │ ├── comma.json
│ │ │ ├── command.json
│ │ │ ├── configJsContent.json
│ │ │ ├── connectionString.json
│ │ │ ├── cssClassStr.json
│ │ │ ├── currentBoundaryParse.json
│ │ │ ├── d.json
│ │ │ ├── data.json
│ │ │ ├── DATA.json
│ │ │ ├── dataWebpackPrefix.json
│ │ │ ├── debug.json
│ │ │ ├── decodeStateVectorV2.json
│ │ │ ├── DEFAULT_DELIMITER.json
│ │ │ ├── DEFAULT_DIAGRAM_DIRECTION.json
│ │ │ ├── DEFAULT_JS_PATTERN.json
│ │ │ ├── DEFAULT_LOG_TARGET.json
│ │ │ ├── defaultHelpOpt.json
│ │ │ ├── defaultHost.json
│ │ │ ├── deferY18nLookupPrefix.json
│ │ │ ├── DELIM.json
│ │ │ ├── delimiter.json
│ │ │ ├── DEPRECATION.json
│ │ │ ├── destMain.json
│ │ │ ├── DID_NOT_THROW.json
│ │ │ ├── direction.json
│ │ │ ├── displayValue.json
│ │ │ ├── DNS.json
│ │ │ ├── doc.json
│ │ │ ├── DOCUMENTATION_NOTE.json
│ │ │ ├── DOT.json
│ │ │ ├── DOTS.json
│ │ │ ├── dummyCompoundId.json
│ │ │ ├── e.json
│ │ │ ├── E.json
│ │ │ ├── earlyHintsLink.json
│ │ │ ├── elide.json
│ │ │ ├── EMPTY.json
│ │ │ ├── end.json
│ │ │ ├── endpoint.json
│ │ │ ├── environment.json
│ │ │ ├── ERR_CODE.json
│ │ │ ├── errMessage.json
│ │ │ ├── errMsg.json
│ │ │ ├── ERROR_MESSAGE.json
│ │ │ ├── error.json
│ │ │ ├── ERROR.json
│ │ │ ├── ERRORCLASS.json
│ │ │ ├── errorMessage.json
│ │ │ ├── es6Default.json
│ │ │ ├── ESC.json
│ │ │ ├── Escapable.json
│ │ │ ├── escapedChar.json
│ │ │ ├── escapeFuncStr.json
│ │ │ ├── escSlash.json
│ │ │ ├── ev.json
│ │ │ ├── event.json
│ │ │ ├── execaMessage.json
│ │ │ ├── EXPECTED_LABEL.json
│ │ │ ├── expected.json
│ │ │ ├── expectedString.json
│ │ │ ├── expression1.json
│ │ │ ├── EXTENSION.json
│ │ │ ├── f.json
│ │ │ ├── FAIL_TEXT.json
│ │ │ ├── FILE_BROWSER_FACTORY.json
│ │ │ ├── fill.json
│ │ │ ├── findPackageJson.json
│ │ │ ├── fnKey.json
│ │ │ ├── FORMAT.json
│ │ │ ├── formatted.json
│ │ │ ├── from.json
│ │ │ ├── fullpaths.json
│ │ │ ├── FUNC_ERROR_TEXT.json
│ │ │ ├── GenStateSuspendedStart.json
│ │ │ ├── GENSYNC_EXPECTED_START.json
│ │ │ ├── gutter.json
│ │ │ ├── h.json
│ │ │ ├── handlerFuncName.json
│ │ │ ├── HASH_UNDEFINED.json
│ │ │ ├── head.json
│ │ │ ├── helpMessage.json
│ │ │ ├── HINT_ARG.json
│ │ │ ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│ │ │ ├── i.json
│ │ │ ├── id.json
│ │ │ ├── identifier.json
│ │ │ ├── Identifier.json
│ │ │ ├── INDENT.json
│ │ │ ├── indentation.json
│ │ │ ├── index.json
│ │ │ ├── INDIRECTION_FRAGMENT.json
│ │ │ ├── input.json
│ │ │ ├── inputText.json
│ │ │ ├── insert.json
│ │ │ ├── insertPromptQuery.json
│ │ │ ├── INSPECT_MAX_BYTES.json
│ │ │ ├── intToCharMap.json
│ │ │ ├── IS_ITERABLE_SENTINEL.json
│ │ │ ├── IS_KEYED_SENTINEL.json
│ │ │ ├── isConfigType.json
│ │ │ ├── isoSentinel.json
│ │ │ ├── isSourceNode.json
│ │ │ ├── j.json
│ │ │ ├── JAKE_CMD.json
│ │ │ ├── JEST_GLOBAL_NAME.json
│ │ │ ├── JEST_GLOBALS_MODULE_NAME.json
│ │ │ ├── JSON_SYNTAX_CHAR.json
│ │ │ ├── json.json
│ │ │ ├── jsonType.json
│ │ │ ├── jupyter_namespaceObject.json
│ │ │ ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│ │ │ ├── k.json
│ │ │ ├── KERNEL_STATUS_ERROR_CLASS.json
│ │ │ ├── key.json
│ │ │ ├── l.json
│ │ │ ├── labelId.json
│ │ │ ├── LATEST_PROTOCOL_VERSION.json
│ │ │ ├── LETTERDASHNUMBER.json
│ │ │ ├── LF.json
│ │ │ ├── LIMIT_REPLACE_NODE.json
│ │ │ ├── logTime.json
│ │ │ ├── lstatkey.json
│ │ │ ├── lt.json
│ │ │ ├── m.json
│ │ │ ├── maliciousPayload.json
│ │ │ ├── mask.json
│ │ │ ├── match.json
│ │ │ ├── matchingDelim.json
│ │ │ ├── MAXIMUM_MESSAGE_SIZE.json
│ │ │ ├── mdcContent.json
│ │ │ ├── MERMAID_DOM_ID_PREFIX.json
│ │ │ ├── message.json
│ │ │ ├── messages.json
│ │ │ ├── meth.json
│ │ │ ├── minimatch.json
│ │ │ ├── MOCK_CONSTRUCTOR_NAME.json
│ │ │ ├── MOCKS_PATTERN.json
│ │ │ ├── moduleDirectory.json
│ │ │ ├── msg.json
│ │ │ ├── mtr.json
│ │ │ ├── multipartType.json
│ │ │ ├── n.json
│ │ │ ├── N.json
│ │ │ ├── name.json
│ │ │ ├── NATIVE_PLATFORM.json
│ │ │ ├── newUrl.json
│ │ │ ├── NM.json
│ │ │ ├── NO_ARGUMENTS.json
│ │ │ ├── NO_DIFF_MESSAGE.json
│ │ │ ├── NODE_MODULES.json
│ │ │ ├── nodeInternalPrefix.json
│ │ │ ├── nonASCIIidentifierStartChars.json
│ │ │ ├── nonKey.json
│ │ │ ├── NOT_A_DOT.json
│ │ │ ├── notCharacterOrDash.json
│ │ │ ├── notebookURL.json
│ │ │ ├── notSelector.json
│ │ │ ├── nullTag.json
│ │ │ ├── num.json
│ │ │ ├── NUMBER.json
│ │ │ ├── o.json
│ │ │ ├── O.json
│ │ │ ├── octChar.json
│ │ │ ├── octetStreamType.json
│ │ │ ├── operators.json
│ │ │ ├── out.json
│ │ │ ├── OUTSIDE_JEST_VM_PROTOCOL.json
│ │ │ ├── override.json
│ │ │ ├── p.json
│ │ │ ├── PACKAGE_FILENAME.json
│ │ │ ├── PACKAGE_JSON.json
│ │ │ ├── packageVersion.json
│ │ │ ├── paddedNumber.json
│ │ │ ├── page.json
│ │ │ ├── parseClass.json
│ │ │ ├── path.json
│ │ │ ├── pathExt.json
│ │ │ ├── pattern.json
│ │ │ ├── PatternBoolean.json
│ │ │ ├── pBuiltins.json
│ │ │ ├── pFloatForm.json
│ │ │ ├── pkg.json
│ │ │ ├── PLUGIN_ID_DOC_MANAGER.json
│ │ │ ├── plusChar.json
│ │ │ ├── PN_CHARS.json
│ │ │ ├── point.json
│ │ │ ├── prefix.json
│ │ │ ├── PRETTY_PLACEHOLDER.json
│ │ │ ├── property_prefix.json
│ │ │ ├── pubkey256.json
│ │ │ ├── Q.json
│ │ │ ├── qmark.json
│ │ │ ├── QO.json
│ │ │ ├── query.json
│ │ │ ├── querystringType.json
│ │ │ ├── queryText.json
│ │ │ ├── r.json
│ │ │ ├── R.json
│ │ │ ├── rangeStart.json
│ │ │ ├── re.json
│ │ │ ├── reI.json
│ │ │ ├── REQUIRED_FIELD_SYMBOL.json
│ │ │ ├── reserve.json
│ │ │ ├── resolvedDestination.json
│ │ │ ├── resolverDir.json
│ │ │ ├── responseType.json
│ │ │ ├── result.json
│ │ │ ├── ROOT_DESCRIBE_BLOCK_NAME.json
│ │ │ ├── ROOT_NAMESPACE_NAME.json
│ │ │ ├── ROOT_TASK_NAME.json
│ │ │ ├── route.json
│ │ │ ├── RUNNING_TEXT.json
│ │ │ ├── s.json
│ │ │ ├── SCHEMA_PATH.json
│ │ │ ├── se.json
│ │ │ ├── SEARCHABLE_CLASS.json
│ │ │ ├── secret.json
│ │ │ ├── selector.json
│ │ │ ├── SEMVER_SPEC_VERSION.json
│ │ │ ├── sensitiveHeaders.json
│ │ │ ├── sep.json
│ │ │ ├── separator.json
│ │ │ ├── SHAPE_STATE.json
│ │ │ ├── shape.json
│ │ │ ├── SHARED.json
│ │ │ ├── short.json
│ │ │ ├── side.json
│ │ │ ├── SNAPSHOT_VERSION.json
│ │ │ ├── SOURCE_MAPPING_PREFIX.json
│ │ │ ├── source.json
│ │ │ ├── sourceMapContent.json
│ │ │ ├── SPACE_SYMBOL.json
│ │ │ ├── SPACE.json
│ │ │ ├── sqlKeywords.json
│ │ │ ├── sranges.json
│ │ │ ├── st.json
│ │ │ ├── ST.json
│ │ │ ├── stack.json
│ │ │ ├── START_HIDING.json
│ │ │ ├── START_OF_LINE.json
│ │ │ ├── startNoTraversal.json
│ │ │ ├── STATES.json
│ │ │ ├── stats.json
│ │ │ ├── statSync.json
│ │ │ ├── storageStatus.json
│ │ │ ├── storageType.json
│ │ │ ├── str.json
│ │ │ ├── stringifiedObject.json
│ │ │ ├── stringPath.json
│ │ │ ├── stringResult.json
│ │ │ ├── stringTag.json
│ │ │ ├── strValue.json
│ │ │ ├── style.json
│ │ │ ├── SUB_NAME.json
│ │ │ ├── subkey.json
│ │ │ ├── SUBPROTOCOL.json
│ │ │ ├── SUITE_NAME.json
│ │ │ ├── symbolPattern.json
│ │ │ ├── symbolTag.json
│ │ │ ├── t.json
│ │ │ ├── T.json
│ │ │ ├── templateDir.json
│ │ │ ├── tempName.json
│ │ │ ├── text.json
│ │ │ ├── time.json
│ │ │ ├── titleSeparator.json
│ │ │ ├── tmpl.json
│ │ │ ├── tn.json
│ │ │ ├── toValue.json
│ │ │ ├── transform.json
│ │ │ ├── trustProxyDefaultSymbol.json
│ │ │ ├── typeArgumentsKey.json
│ │ │ ├── typeKey.json
│ │ │ ├── typeMessage.json
│ │ │ ├── typesRegistryPackageName.json
│ │ │ ├── u.json
│ │ │ ├── UNDEFINED.json
│ │ │ ├── unit.json
│ │ │ ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│ │ │ ├── ur.json
│ │ │ ├── USAGE.json
│ │ │ ├── value.json
│ │ │ ├── Vr.json
│ │ │ ├── watchmanURL.json
│ │ │ ├── webkit.json
│ │ │ ├── xhtml.json
│ │ │ ├── XP_DEFAULT_PATHEXT.json
│ │ │ └── y.json
│ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ ├── colorCode.json
│ │ ├── comma.json
│ │ ├── command.json
│ │ ├── completionShTemplate.json
│ │ ├── configJsContent.json
│ │ ├── connectionString.json
│ │ ├── Consolidated_TypeScript_Interfaces_Template.json
│ │ ├── Could_you_interpret_the_assumed_applicat___.json
│ │ ├── cssClassStr.json
│ │ ├── currentBoundaryParse.json
│ │ ├── d.json
│ │ ├── Data_Analysis_Template.json
│ │ ├── data.json
│ │ ├── DATA.json
│ │ ├── Database_Query_Assistant.json
│ │ ├── dataWebpackPrefix.json
│ │ ├── debug.json
│ │ ├── Debugging_Assistant.json
│ │ ├── decodeStateVectorV2.json
│ │ ├── DEFAULT_DELIMITER.json
│ │ ├── DEFAULT_DIAGRAM_DIRECTION.json
│ │ ├── DEFAULT_INDENT.json
│ │ ├── DEFAULT_JS_PATTERN.json
│ │ ├── DEFAULT_LOG_TARGET.json
│ │ ├── defaultHelpOpt.json
│ │ ├── defaultHost.json
│ │ ├── deferY18nLookupPrefix.json
│ │ ├── DELIM.json
│ │ ├── delimiter.json
│ │ ├── DEPRECATION.json
│ │ ├── DESCENDING.json
│ │ ├── destMain.json
│ │ ├── development
│ │ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ │ ├── Consolidated_TypeScript_Interfaces_Template.json
│ │ │ ├── Development_Workflow.json
│ │ │ ├── index.json
│ │ │ ├── MCP_Server_Development_Prompt_Combiner.json
│ │ │ └── Monorepo_Migration_and_Code_Organization_Guide.json
│ │ ├── Development_System_Prompt.json
│ │ ├── Development_Workflow.json
│ │ ├── devops
│ │ │ ├── Docker_Compose_Prompt_Combiner.json
│ │ │ ├── Docker_Containerization_Guide.json
│ │ │ └── index.json
│ │ ├── DID_NOT_THROW.json
│ │ ├── direction.json
│ │ ├── displayValue.json
│ │ ├── DNS.json
│ │ ├── doc.json
│ │ ├── Docker_Compose_Prompt_Combiner.json
│ │ ├── Docker_Containerization_Guide.json
│ │ ├── Docker_MCP_Servers_Orchestration_Guide.json
│ │ ├── DOCUMENTATION_NOTE.json
│ │ ├── DOT.json
│ │ ├── DOTS.json
│ │ ├── dummyCompoundId.json
│ │ ├── e.json
│ │ ├── E.json
│ │ ├── earlyHintsLink.json
│ │ ├── elide.json
│ │ ├── EMPTY.json
│ │ ├── encoded.json
│ │ ├── end.json
│ │ ├── endpoint.json
│ │ ├── environment.json
│ │ ├── ERR_CODE.json
│ │ ├── errMessage.json
│ │ ├── errMsg.json
│ │ ├── ERROR_MESSAGE.json
│ │ ├── error.json
│ │ ├── ERROR.json
│ │ ├── ERRORCLASS.json
│ │ ├── errorMessage.json
│ │ ├── es6Default.json
│ │ ├── ESC.json
│ │ ├── Escapable.json
│ │ ├── escapedChar.json
│ │ ├── escapeFuncStr.json
│ │ ├── escSlash.json
│ │ ├── ev.json
│ │ ├── event.json
│ │ ├── execaMessage.json
│ │ ├── EXPECTED_LABEL.json
│ │ ├── expected.json
│ │ ├── expectedString.json
│ │ ├── expression1.json
│ │ ├── EXTENSION.json
│ │ ├── f.json
│ │ ├── FAIL_TEXT.json
│ │ ├── FILE_BROWSER_FACTORY.json
│ │ ├── fill.json
│ │ ├── findPackageJson.json
│ │ ├── fnKey.json
│ │ ├── Foresight_Assistant.json
│ │ ├── FORMAT.json
│ │ ├── formatted.json
│ │ ├── from.json
│ │ ├── fullpaths.json
│ │ ├── FUNC_ERROR_TEXT.json
│ │ ├── general
│ │ │ └── index.json
│ │ ├── Generate_different_types_of_questions_ab___.json
│ │ ├── Generate_Mermaid_Diagram.json
│ │ ├── GenStateSuspendedStart.json
│ │ ├── GENSYNC_EXPECTED_START.json
│ │ ├── GitHub_Repository_Explorer.json
│ │ ├── gutter.json
│ │ ├── h.json
│ │ ├── handlerFuncName.json
│ │ ├── HASH_UNDEFINED.json
│ │ ├── head.json
│ │ ├── helpMessage.json
│ │ ├── HINT_ARG.json
│ │ ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│ │ ├── i.json
│ │ ├── id.json
│ │ ├── identifier.json
│ │ ├── Identifier.json
│ │ ├── INDENT.json
│ │ ├── indentation.json
│ │ ├── index.json
│ │ ├── INDIRECTION_FRAGMENT.json
│ │ ├── Initialize_project_setup_for_a_new_micro___.json
│ │ ├── input.json
│ │ ├── inputText.json
│ │ ├── insert.json
│ │ ├── insertPromptQuery.json
│ │ ├── INSPECT_MAX_BYTES.json
│ │ ├── install_dependencies__build__run__test____.json
│ │ ├── intToCharMap.json
│ │ ├── IS_ITERABLE_SENTINEL.json
│ │ ├── IS_KEYED_SENTINEL.json
│ │ ├── isConfigType.json
│ │ ├── isoSentinel.json
│ │ ├── isSourceNode.json
│ │ ├── j.json
│ │ ├── J.json
│ │ ├── JAKE_CMD.json
│ │ ├── JEST_GLOBAL_NAME.json
│ │ ├── JEST_GLOBALS_MODULE_NAME.json
│ │ ├── JSON_SYNTAX_CHAR.json
│ │ ├── json.json
│ │ ├── jsonType.json
│ │ ├── jupyter_namespaceObject.json
│ │ ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│ │ ├── k.json
│ │ ├── KERNEL_STATUS_ERROR_CLASS.json
│ │ ├── key.json
│ │ ├── l.json
│ │ ├── labelId.json
│ │ ├── LATEST_PROTOCOL_VERSION.json
│ │ ├── LETTERDASHNUMBER.json
│ │ ├── LF.json
│ │ ├── LIMIT_REPLACE_NODE.json
│ │ ├── LINE_FEED.json
│ │ ├── logTime.json
│ │ ├── lstatkey.json
│ │ ├── lt.json
│ │ ├── m.json
│ │ ├── maliciousPayload.json
│ │ ├── manager.py
│ │ ├── marker.json
│ │ ├── mask.json
│ │ ├── match.json
│ │ ├── matchingDelim.json
│ │ ├── MAXIMUM_MESSAGE_SIZE.json
│ │ ├── MCP_Integration_Assistant.json
│ │ ├── MCP_Resources_Explorer.json
│ │ ├── MCP_Resources_Integration_Guide.json
│ │ ├── MCP_Server_Development_Prompt_Combiner.json
│ │ ├── MCP_Server_Integration_Guide.json
│ │ ├── mcp-code-generator.json
│ │ ├── mdcContent.json
│ │ ├── Mermaid_Analysis_Expert.json
│ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ ├── Mermaid_Diagram_Generator.json
│ │ ├── Mermaid_Diagram_Modifier.json
│ │ ├── MERMAID_DOM_ID_PREFIX.json
│ │ ├── message.json
│ │ ├── messages.json
│ │ ├── meth.json
│ │ ├── minimatch.json
│ │ ├── MOBILE_QUERY.json
│ │ ├── MOCK_CONSTRUCTOR_NAME.json
│ │ ├── MOCKS_PATTERN.json
│ │ ├── Modify_Mermaid_Diagram.json
│ │ ├── moduleDirectory.json
│ │ ├── Monorepo_Migration_and_Code_Organization_Guide.json
│ │ ├── msg.json
│ │ ├── mtr.json
│ │ ├── Multi-Resource_Context_Assistant.json
│ │ ├── multipartType.json
│ │ ├── n.json
│ │ ├── N.json
│ │ ├── name.json
│ │ ├── NATIVE_PLATFORM.json
│ │ ├── newUrl.json
│ │ ├── NM.json
│ │ ├── NO_ARGUMENTS.json
│ │ ├── NO_DIFF_MESSAGE.json
│ │ ├── NODE_MODULES.json
│ │ ├── nodeInternalPrefix.json
│ │ ├── nonASCIIidentifierStartChars.json
│ │ ├── nonKey.json
│ │ ├── NOT_A_DOT.json
│ │ ├── notCharacterOrDash.json
│ │ ├── notebookURL.json
│ │ ├── notSelector.json
│ │ ├── nullTag.json
│ │ ├── num.json
│ │ ├── NUMBER.json
│ │ ├── o.json
│ │ ├── O.json
│ │ ├── octChar.json
│ │ ├── octetStreamType.json
│ │ ├── operators.json
│ │ ├── other
│ │ │ ├── __image_1___describe_the_icon_in_one_sen___.json
│ │ │ ├── __type.json
│ │ │ ├── Advanced_Multi-Server_Integration_Template.json
│ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ ├── Basic_Template.json
│ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ │ ├── completionShTemplate.json
│ │ │ ├── Could_you_interpret_the_assumed_applicat___.json
│ │ │ ├── DEFAULT_INDENT.json
│ │ │ ├── Docker_MCP_Servers_Orchestration_Guide.json
│ │ │ ├── Generate_different_types_of_questions_ab___.json
│ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ ├── GitHub_Repository_Explorer.json
│ │ │ ├── index.json
│ │ │ ├── Initialize_project_setup_for_a_new_micro___.json
│ │ │ ├── install_dependencies__build__run__test____.json
│ │ │ ├── LINE_FEED.json
│ │ │ ├── MCP_Resources_Explorer.json
│ │ │ ├── MCP_Resources_Integration_Guide.json
│ │ │ ├── MCP_Server_Integration_Guide.json
│ │ │ ├── mcp-code-generator.json
│ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ ├── Multi-Resource_Context_Assistant.json
│ │ │ ├── output.json
│ │ │ ├── sseUrl.json
│ │ │ ├── string.json
│ │ │ ├── Task_List_Helper.json
│ │ │ ├── Template-Based_MCP_Integration.json
│ │ │ ├── Test_Prompt.json
│ │ │ ├── type.json
│ │ │ ├── VERSION.json
│ │ │ ├── WIN_SLASH.json
│ │ │ └── You_are_limited_to_respond_Yes_or_No_onl___.json
│ │ ├── out.json
│ │ ├── output.json
│ │ ├── OUTSIDE_JEST_VM_PROTOCOL.json
│ │ ├── override.json
│ │ ├── p.json
│ │ ├── PACKAGE_FILENAME.json
│ │ ├── PACKAGE_JSON.json
│ │ ├── packageVersion.json
│ │ ├── paddedNumber.json
│ │ ├── page.json
│ │ ├── parseClass.json
│ │ ├── PATH_NODE_MODULES.json
│ │ ├── path.json
│ │ ├── pathExt.json
│ │ ├── pattern.json
│ │ ├── PatternBoolean.json
│ │ ├── pBuiltins.json
│ │ ├── pFloatForm.json
│ │ ├── pkg.json
│ │ ├── PLUGIN_ID_DOC_MANAGER.json
│ │ ├── plusChar.json
│ │ ├── PN_CHARS.json
│ │ ├── point.json
│ │ ├── prefix.json
│ │ ├── PRETTY_PLACEHOLDER.json
│ │ ├── Project_Analysis_Assistant.json
│ │ ├── ProjectsUpdatedInBackgroundEvent.json
│ │ ├── PromptCombiner_Interface.json
│ │ ├── promptId.json
│ │ ├── property_prefix.json
│ │ ├── pubkey256.json
│ │ ├── Q.json
│ │ ├── qmark.json
│ │ ├── QO.json
│ │ ├── query.json
│ │ ├── querystringType.json
│ │ ├── queryText.json
│ │ ├── r.json
│ │ ├── R.json
│ │ ├── rangeStart.json
│ │ ├── re.json
│ │ ├── reI.json
│ │ ├── REQUIRED_FIELD_SYMBOL.json
│ │ ├── Research_Assistant.json
│ │ ├── reserve.json
│ │ ├── resolvedDestination.json
│ │ ├── resolverDir.json
│ │ ├── responseType.json
│ │ ├── result.json
│ │ ├── ROOT_DESCRIBE_BLOCK_NAME.json
│ │ ├── ROOT_NAMESPACE_NAME.json
│ │ ├── ROOT_TASK_NAME.json
│ │ ├── route.json
│ │ ├── RUNNING_TEXT.json
│ │ ├── RXstyle.json
│ │ ├── s.json
│ │ ├── SCHEMA_PATH.json
│ │ ├── schemaQuery.json
│ │ ├── se.json
│ │ ├── SEARCHABLE_CLASS.json
│ │ ├── secret.json
│ │ ├── selector.json
│ │ ├── SEMVER_SPEC_VERSION.json
│ │ ├── sensitiveHeaders.json
│ │ ├── sep.json
│ │ ├── separator.json
│ │ ├── Sequential_Data_Analysis_with_MCP_Integration.json
│ │ ├── SHAPE_STATE.json
│ │ ├── shape.json
│ │ ├── SHARED.json
│ │ ├── short.json
│ │ ├── side.json
│ │ ├── SNAPSHOT_VERSION.json
│ │ ├── SOLID_Code_Analysis_Visualizer.json
│ │ ├── SOURCE_MAPPING_PREFIX.json
│ │ ├── source.json
│ │ ├── sourceMapContent.json
│ │ ├── SPACE_SYMBOL.json
│ │ ├── SPACE.json
│ │ ├── sqlKeywords.json
│ │ ├── sranges.json
│ │ ├── sseUrl.json
│ │ ├── st.json
│ │ ├── ST.json
│ │ ├── stack.json
│ │ ├── START_HIDING.json
│ │ ├── START_OF_LINE.json
│ │ ├── startNoTraversal.json
│ │ ├── STATES.json
│ │ ├── stats.json
│ │ ├── statSync.json
│ │ ├── status.json
│ │ ├── storageStatus.json
│ │ ├── storageType.json
│ │ ├── str.json
│ │ ├── string.json
│ │ ├── stringifiedObject.json
│ │ ├── stringPath.json
│ │ ├── stringResult.json
│ │ ├── stringTag.json
│ │ ├── strValue.json
│ │ ├── style.json
│ │ ├── SUB_NAME.json
│ │ ├── subkey.json
│ │ ├── SUBPROTOCOL.json
│ │ ├── SUITE_NAME.json
│ │ ├── symbolPattern.json
│ │ ├── symbolTag.json
│ │ ├── system
│ │ │ ├── Aa.json
│ │ │ ├── b.json
│ │ │ ├── Development_System_Prompt.json
│ │ │ ├── index.json
│ │ │ ├── marker.json
│ │ │ ├── PATH_NODE_MODULES.json
│ │ │ ├── ProjectsUpdatedInBackgroundEvent.json
│ │ │ ├── RXstyle.json
│ │ │ ├── status.json
│ │ │ └── versionMajorMinor.json
│ │ ├── t.json
│ │ ├── T.json
│ │ ├── Task_List_Helper.json
│ │ ├── Template-Based_MCP_Integration.json
│ │ ├── template.py
│ │ ├── templateDir.json
│ │ ├── tempName.json
│ │ ├── Test_Prompt.json
│ │ ├── text.json
│ │ ├── time.json
│ │ ├── titleSeparator.json
│ │ ├── tmpl.json
│ │ ├── tn.json
│ │ ├── TOPBAR_FACTORY.json
│ │ ├── toValue.json
│ │ ├── transform.json
│ │ ├── trustProxyDefaultSymbol.json
│ │ ├── txt.json
│ │ ├── type.json
│ │ ├── typeArgumentsKey.json
│ │ ├── typeKey.json
│ │ ├── typeMessage.json
│ │ ├── typesRegistryPackageName.json
│ │ ├── u.json
│ │ ├── UNDEFINED.json
│ │ ├── unit.json
│ │ ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│ │ ├── ur.json
│ │ ├── usage.json
│ │ ├── USAGE.json
│ │ ├── user
│ │ │ ├── backupId.json
│ │ │ ├── DESCENDING.json
│ │ │ ├── encoded.json
│ │ │ ├── index.json
│ │ │ ├── J.json
│ │ │ ├── MOBILE_QUERY.json
│ │ │ ├── promptId.json
│ │ │ ├── schemaQuery.json
│ │ │ ├── TOPBAR_FACTORY.json
│ │ │ ├── txt.json
│ │ │ └── usage.json
│ │ ├── value.json
│ │ ├── VERSION.json
│ │ ├── version.py
│ │ ├── versionMajorMinor.json
│ │ ├── Vr.json
│ │ ├── watchmanURL.json
│ │ ├── webkit.json
│ │ ├── WIN_SLASH.json
│ │ ├── xhtml.json
│ │ ├── XP_DEFAULT_PATHEXT.json
│ │ ├── y.json
│ │ └── You_are_limited_to_respond_Yes_or_No_onl___.json
│ ├── resources
│ │ ├── __init__.py
│ │ ├── code_examples
│ │ │ └── index.json
│ │ ├── config
│ │ │ └── index.json
│ │ ├── documentation
│ │ │ └── index.json
│ │ ├── images
│ │ │ └── index.json
│ │ ├── index.json
│ │ └── other
│ │ └── index.json
│ ├── server.py
│ ├── templates
│ │ ├── __init__.py
│ │ ├── AbstractFactory.json
│ │ ├── Adapter.json
│ │ ├── base.py
│ │ ├── Builder.json
│ │ ├── Chain.json
│ │ ├── Command.json
│ │ ├── component
│ │ │ ├── AbstractFactory.json
│ │ │ ├── Adapter.json
│ │ │ ├── Builder.json
│ │ │ ├── Chain.json
│ │ │ ├── Command.json
│ │ │ ├── Decorator.json
│ │ │ ├── Facade.json
│ │ │ ├── Factory.json
│ │ │ ├── Iterator.json
│ │ │ ├── Mediator.json
│ │ │ ├── Memento.json
│ │ │ ├── Observer.json
│ │ │ ├── Prototype.json
│ │ │ ├── Proxy.json
│ │ │ ├── Singleton.json
│ │ │ ├── State.json
│ │ │ ├── Strategy.json
│ │ │ ├── TemplateMethod.json
│ │ │ └── Visitor.json
│ │ ├── component.py
│ │ ├── Decorator.json
│ │ ├── Facade.json
│ │ ├── Factory.json
│ │ ├── index.json
│ │ ├── Iterator.json
│ │ ├── manager.py
│ │ ├── Mediator.json
│ │ ├── Memento.json
│ │ ├── Observer.json
│ │ ├── project.py
│ │ ├── Prototype.json
│ │ ├── Proxy.json
│ │ ├── renderer.py
│ │ ├── Singleton.json
│ │ ├── State.json
│ │ ├── Strategy.json
│ │ ├── template_manager.py
│ │ ├── TemplateMethod.json
│ │ ├── types.py
│ │ └── Visitor.json
│ └── utils
│ └── __init__.py
├── SUMMARY.md
├── TASK_COMPLETION_SUMMARY.md
├── templates
│ └── openssl
│ ├── files
│ │ ├── CMakeLists.txt.jinja2
│ │ ├── conanfile.py.jinja2
│ │ ├── main.cpp.jinja2
│ │ └── README.md.jinja2
│ ├── openssl-consumer.json
│ └── template.json
├── test_openssl_integration.sh
├── test_package
│ └── conanfile.py
└── tests
├── __init__.py
├── conftest.py
├── integration
│ ├── test_core_integration.py
│ ├── test_mermaid_integration.py
│ ├── test_prompt_manager_integration.py
│ └── test_server_integration.py
├── test_aws_mcp.py
├── test_base_classes.py
├── test_config.py
├── test_exceptions.py
├── test_mermaid.py
├── test_prompts.py
└── test_templates.py
```
# Files
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/other/MCP_Resources_Integration_Guide.json:
--------------------------------------------------------------------------------
```json
{
"name": "MCP Resources Integration Guide",
"description": "A comprehensive guide to working with and integrating resources across multiple MCP servers",
"type": "prompt",
"category": "other",
"content": "# MCP Resources Integration Guide\\n\\nYou are an expert on the Model Context Protocol (MCP) ecosystem, specializing in resource integration across multiple MCP servers. Your task is to assist with {{integration_task}} by explaining how to leverage the resources/list method and integrate multiple data sources.\\n\\n## Understanding MCP Resources\\n\\nResources in the MCP ecosystem are named data objects that can be referenced and accessed across different MCP servers. They enable:\\n\\n1. **Cross-server data access**: Retrieving and using data from multiple specialized servers\\n2. **Contextual enrichment**: Adding relevant information to prompt templates\\n3. **Dynamic content generation**: Creating outputs based on up-to-date information\\n4. **Workflow orchestration**: Coordinating complex operations involving multiple data sources\\n\\n## The `resources/list` Method\\n\\nThe `resources/list` method is a powerful capability that enables discovery and exploration of available contextual data sources. It can be used to:\\n\\n- **Discover available resources**: List all accessible data sources across connected MCP servers\\n- **Filter resources by type**: Find specific kinds of resources (files, database records, API results)\\n- **Explore metadata**: View descriptions, timestamps, and other metadata about available resources\\n- **Support dynamic workflows**: Enable applications to adapt based on available context\\n\\n### Basic Usage\\n\\n```\\n// Example: Listing all available resources\\n{\\n \\\\\\\"method\\\\\\\": \\\\\\\"resources/list\\\\\\\",\\n \\\\\\\"params\\\\\\\": {}\\n}\\n\\n// Example: Filtering resources by prefix\\n{\\n \\\\\\\"method\\\\\\\": \\\\\\\"resources/list\\\\\\\",\\n \\\\\\\"params\\\\\\\": {\\n \\\\\\\"prefix\\\\\\\": \\\\\\\"github://\\\\\\\"\\n }\\n}\\n```\\n\\n## Integrating Resources from Different MCP Servers\\n\\n### Available Resource Types by Server\\n\\n| Server Type | Resource Prefix | Example URI | Description |\\n|-------------|----------------|-------------|-------------|\\n| GitHub | github:// | github://owner/repo/path/to/file | Repository files and metadata |\\n| Filesystem | file:// | file:///path/to/local/file | Local file system access |\\n| PostgreSQL | postgres:// | postgres://database/table/record | Database records and query results |\\n| Memory | memory:// | memory://session/key | Stored session context |\\n| Web | https:// | https://api.example.com/data | Web content and API responses |\\n| {{custom_server}} | {{custom_prefix}} | {{custom_example}} | {{custom_description}} |\\n\\n### Resource Integration Patterns\\n\\n#### 1. Chain of Resources Pattern\\nConnect multiple resources sequentially, where the output of one resource operation becomes the input for the next:\\n\\n```\\n// Step 1: Retrieve configuration from GitHub\\nconst config = await getResource('github://org/repo/config.json');\\n\\n// Step 2: Use config to query database\\nconst queryResults = await getResource(`postgres://database/table?query=${config.queryParams}`);\\n\\n// Step 3: Process results and store in memory\\nawait setResource('memory://session/processed_data', processData(queryResults));\\n```\\n\\n#### 2. Aggregation Pattern\\nCombine data from multiple resources to create a comprehensive context:\\n\\n```\\n// Collect data from multiple sources\\nconst codebase = await getResource('github://org/repo/src');\\nconst documentation = await getResource('file:///local/docs');\\nconst issueTracking = await getResource('https://issues.example.com/api/project');\\n\\n// Combine into unified context\\nconst projectContext = {\\n code: codebase,\\n docs: documentation,\\n issues: issueTracking\\n};\\n```\\n\\n#### 3. Template Enrichment Pattern\\nUse resources to populate template variables dynamically:\\n\\n```\\n// Retrieve template\\nconst template = await getResource('prompts://templates/analysis');\\n\\n// Gather contextual data\\nconst repoStats = await getResource('github://org/repo/stats');\\nconst performanceData = await getResource('postgres://metrics/performance');\\n\\n// Apply template with resource data\\nconst enrichedPrompt = applyTemplate(template, {\\n project_metrics: repoStats,\\n performance_insights: performanceData\\n});\\n```\\n\\n## Implementation Guidelines for {{integration_task}}\\n\\n### Step 1: Resource Discovery\\nFirst, use the resources/list method to discover what data sources are available:\\n\\n```javascript\\n// Example resources/list implementation\\nasync function discoverResources() {\\n const resources = await callMCP({\\n method: 'resources/list',\\n params: {}\\n });\\n \\n console.log('Available resources:', resources);\\n return resources;\\n}\\n```\\n\\n### Step 2: Resource Access Patterns\\nImplement standardized patterns for accessing different resource types:\\n\\n```javascript\\n// Example resource access function\\nasync function getResource(uri) {\\n const serverType = getServerTypeFromUri(uri);\\n \\n const response = await callMCP({\\n server: serverType,\\n method: 'resources/get',\\n params: { uri }\\n });\\n \\n return response.data;\\n}\\n```\\n\\n### Step 3: Resource Integration\\nCombine resources using the appropriate integration pattern for your use case:\\n\\n{{integration_code}}\\n\\n### Step 4: Error Handling and Fallbacks\\nImplement robust error handling for cases where resources may be unavailable:\\n\\n```javascript\\ntry {\\n const resource = await getResource('github://org/repo/file.json');\\n // Process resource\\n} catch (error) {\\n console.error('Error accessing resource:', error);\\n // Use fallback resource or strategy\\n const fallbackResource = await getResource('file:///local/fallback.json');\\n}\\n```\\n\\n## Best Practices for Resource Integration\\n\\n1. **Cache appropriately**: Some resources may be expensive to fetch repeatedly\\n2. **Handle failures gracefully**: Use fallbacks when resources are unavailable\\n3. **Consider resource formats**: Different servers may return different data structures\\n4. **Manage dependencies**: Be mindful of resource dependencies and potential circular references\\n5. **Document resource usage**: Make resource URIs and usage patterns explicit\\n6. **Security awareness**: Consider access control implications when sharing resources\\n{{additional_practices}}\\n\\n## Implementation Examples for Common Scenarios\\n\\n### Example 1: Project Analysis Dashboard\\nCombine code repository statistics, issue tracking, and documentation:\\n\\n```javascript\\nasync function buildProjectDashboard() {\\n // Discover available resources\\n const resources = await discoverResources();\\n \\n // Check if required resources are available\\n const hasGitHub = resources.some(r => r.startsWith('github://'));\\n const hasIssues = resources.some(r => r.startsWith('https://issues.'));\\n \\n // Gather data from available sources\\n const repoData = hasGitHub ? \\n await getResource('github://org/project/stats') : \\n { error: 'GitHub data unavailable' };\\n \\n const issueData = hasIssues ?\\n await getResource('https://issues.example.com/api/project/stats') :\\n { error: 'Issue tracker unavailable' };\\n \\n // Combine into unified dashboard data\\n return {\\n code_metrics: repoData,\\n issue_metrics: issueData,\\n timestamp: new Date().toISOString()\\n };\\n}\\n```\\n\\n### Example 2: Dynamic Document Generation\\nGenerate documentation by combining templates with real-time data:\\n\\n```javascript\\nasync function generateDocumentation() {\\n // Get document template\\n const template = await getResource('prompts://templates/documentation');\\n \\n // Gather data from multiple sources\\n const apiSchema = await getResource('file:///api/schema.json');\\n const usageStats = await getResource('postgres://analytics/api_usage');\\n const exampleCode = await getResource('github://org/examples/api');\\n \\n // Generate documentation\\n return applyTemplate(template, {\\n schema: apiSchema,\\n usage: usageStats,\\n examples: exampleCode\\n });\\n}\\n```\\n\\n### Example 3: {{custom_example_name}}\\n{{custom_example_description}}\\n\\n```javascript\\n{{custom_example_code}}\\n```\\n\\n## Resources/List Method in Action\\n\\nThe resources/list method serves multiple important functions:\\n\\n1. **Discovery and Exploration**: Clients can discover what contextual resources are available\\n2. **Workflow Orchestration**: Automated workflows can determine which resources to use\\n3. **Enhanced UI/UX**: User interfaces can show available resources for selection\\n4. **Integration with External Services**: Bridge between clients and external data sources\\n\\nExample implementation of a resource explorer using resources/list:\\n\\n```javascript\\nasync function exploreResources(prefix = '') {\\n const resources = await callMCP({\\n method: 'resources/list',\\n params: { prefix }\\n });\\n \\n // Group resources by type\\n const resourcesByType = resources.reduce((groups, uri) => {\\n const type = uri.split('://')[0];\\n if (!groups[type]) groups[type] = [];\\n groups[type].push(uri);\\n return groups;\\n }, {});\\n \\n // Display available resources by type\\n for (const [type, uris] of Object.entries(resourcesByType)) {\\n console.log(`${type} resources (${uris.length}):`);\\n uris.forEach(uri => console.log(` - ${uri}`));\\n }\\n \\n return resourcesByType;\\n}\\n```\\n\\n## Conclusion\\n\\nEffective integration of resources across MCP servers is a powerful pattern that enables complex workflows, rich contextual awareness, and dynamic content generation. By understanding the resources/list method and implementing appropriate integration patterns, you can leverage the full potential of the MCP ecosystem for {{integration_task}}.\\n\\nWhat specific aspect of MCP resource integration would you like to explore further?\\\",\\n \\\"isTemplate\\\": true,\\n \\\"variables\\\": [\\n \\\"integration_task\\\",\\n \\\"custom_server\\\",\\n \\\"custom_prefix\\\",\\n \\\"custom_example\\\",\\n \\\"custom_description\\\",\\n \\\"integration_code\\\",\\n \\\"additional_practices\\\",\\n \\\"custom_example_name\\\",\\n \\\"custom_example_description\\\",\\n \\\"custom_example_code\\\"\\n ],\\n \\\"tags\\\": [\\n \\\"mcp\\\",\\n \\\"resources\\\",\\n \\\"integration\\\",\\n \\\"advanced\\\",\\n \\\"multi-server\\\",\\n \\\"template\\\"\\n ],\\n \\\"createdAt\\\": \\\"2025-03-15T16:00:00.000Z\\\",\\n \\\"updatedAt\\\": \\\"2025-03-15T16:00:00.000Z\\\",\\n \\\"version\\\": 1,\\n \\\"metadata\\\": {\\n \\\"recommended_servers\\\": [\\n \\\"github\\\",\\n \\\"filesystem\\\",\\n \\\"postgres\\\",\\n \\\"memory\\\",\\n \\\"prompts\\\"\\n ],\\n \\\"example_variables\\\": {\\n \\\"integration_task\\\": \\\"building a comprehensive project analysis tool\\\",\\n \\\"custom_server\\\": \\\"TimeSeries\\\",\\n \\\"custom_prefix\\\": \\\"timeseries://\\\",\\n \\\"custom_example\\\": \\\"timeseries://metrics/cpu-usage/7d\\\",\\n \\\"custom_description\\\": \\\"Historical time-series data for metrics and monitoring\\\",\\n \\\"integration_code\\\": \\\"async function integrateProjectAnalysis() {\\\\n // Get repository information\\\\n const repoInfo = await getResource('github://org/repo/info');\\\\n \\\\n // Fetch relevant code files based on repo structure\\\\n const codeFiles = await Promise.all(\\\\n repoInfo.main_modules.map(module => \\\\n getResource(`github://org/repo/src/${module}`)\\\\n )\\\\n );\\\\n \\\\n // Get database schema information\\\\n const dbSchema = await getResource('postgres://database/information_schema');\\\\n \\\\n // Combine everything into a unified context\\\\n const projectContext = {\\\\n repository: repoInfo,\\\\n code_modules: codeFiles,\\\\n database_structure: dbSchema,\\\\n analysis_timestamp: new Date().toISOString()\\\\n };\\\\n \\\\n // Store the combined context in memory for future reference\\\\n await setResource('memory://session/project_context', projectContext);\\\\n \\\\n return projectContext;\\\\n}\\\",\\n \\\"additional_practices\\\": \\\"7. **Version awareness**: Consider resource version compatibility\\\\n8. **Performance tracking**: Monitor resource access patterns and optimize frequent operations\\\\n9. **Scope limitation**: Only access resources directly relevant to the current task\\\\n10. **Progressive enhancement**: Design systems that work with minimal resources but enhance capabilities when more are available\\\",\\n \\\"custom_example_name\\\": \\\"Cross-Server Data Validation\\\",\\n \\\"custom_example_description\\\": \\\"Validate data consistency across different storage systems by comparing repositories, databases, and local files:\\\",\\n \\\"custom_example_code\\\": \\\"async function validateDataConsistency() {\\\\n // Get configuration schema from repository\\\\n const configSchema = await getResource('github://org/repo/schema/config.json');\\\\n \\\\n // Get actual configurations from database\\\\n const dbConfigs = await getResource('postgres://app/configurations');\\\\n \\\\n // Get local configuration files\\\\n const localConfigs = await getResource('file:///app/config/');\\\\n \\\\n // Compare configurations across systems\\\\n const validationResults = {\\\\n schema_valid: validateAgainstSchema(dbConfigs, configSchema),\\\\n db_local_match: compareConfigurations(dbConfigs, localConfigs),\\\\n mismatches: findMismatches(dbConfigs, localConfigs, configSchema)\\\\n };\\\\n \\\\n // Store validation results in memory\\\\n await setResource('memory://validation/config_results', validationResults);\\\\n \\\\n return validationResults;\\\\n}",
"variables": {},
"metadata": {
"source": "/home/sparrow/projects/mcp-prompts/prompts/mcp-resources-integration.json",
"imported": true
}
}
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/mermaid/generator.py:
--------------------------------------------------------------------------------
```python
"""
Mermaid diagram generator for MCP Project Orchestrator.
This module provides functionality for generating Mermaid diagram
definitions from various inputs and templates.
"""
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import json
from ..core import Config
from .types import DiagramType, DiagramConfig, DiagramMetadata
class MermaidGenerator:
"""Class for generating Mermaid diagram definitions."""
def __init__(self, config: Config):
"""Initialize the Mermaid generator.
Args:
config: Configuration instance
"""
self.config = config
templates_base = getattr(config.settings, 'templates_dir', Path('templates'))
self.templates_dir = templates_base / "mermaid"
self.templates: Dict[str, Dict[str, Any]] = {}
async def initialize(self) -> None:
"""Initialize the generator.
Creates the templates directory if it doesn't exist and
loads any existing templates.
"""
self.templates_dir.mkdir(parents=True, exist_ok=True)
await self.load_templates()
async def cleanup(self) -> None:
"""Clean up resources."""
self.templates.clear()
async def load_templates(self) -> None:
"""Load Mermaid diagram templates from the templates directory."""
for file_path in self.templates_dir.glob("*.json"):
try:
with open(file_path) as f:
template = json.load(f)
self.templates[file_path.stem] = template
except Exception as e:
pass # Skip invalid templates
def generate_flowchart(
self,
nodes: List[Dict[str, str]] | List[tuple],
edges: List[Dict[str, str]] | List[tuple],
direction: str = "TD",
config: Optional[DiagramConfig] = None,
) -> str:
"""Generate a flowchart diagram definition.
Args:
nodes: List of node definitions
edges: List of edge definitions
direction: Flow direction (TD, BT, LR, RL) - TD by default
config: Optional diagram configuration
Returns:
Mermaid flowchart definition
"""
if config is None:
config = DiagramConfig(type=DiagramType.FLOWCHART)
lines = [f"flowchart {direction}"]
# Add nodes
for node in nodes:
if isinstance(node, dict):
node_id = node["id"]
node_label = node.get("label", node_id)
else:
node_id, node_label = node[0], node[1]
node_shape = ""
if node_shape:
lines.append(f" {node_id}[{node_label}]{{{node_shape}}}")
else:
lines.append(f" {node_id}[{node_label}]")
# Add edges
for edge in edges:
if isinstance(edge, dict):
from_node = edge["from"]
to_node = edge["to"]
edge_label = edge.get("label", "")
edge_style = edge.get("style", "-->")
else:
from_node, to_node, edge_label = edge
edge_style = "-->"
if edge_label:
lines.append(f" {from_node} {edge_style}|{edge_label}| {to_node}")
else:
lines.append(f" {from_node} {edge_style} {to_node}")
return "\n".join(lines)
def generate_class(
self,
classes: List[Dict[str, Any]],
relationships: List[tuple],
config: Optional[DiagramConfig] = None,
) -> str:
"""Generate a class diagram definition.
Args:
classes: List of class definitions
relationships: List of class relationships
config: Optional diagram configuration
Returns:
Mermaid class diagram definition
"""
if config is None:
config = DiagramConfig(type=DiagramType.CLASS)
lines = ["classDiagram"]
# Add classes
for class_def in classes:
class_name = class_def["name"]
# Class definition
lines.append(f" class {class_name} {{")
# Properties
for prop in class_def.get("attributes", []) or class_def.get("properties", []):
if isinstance(prop, str):
lines.append(f" +{prop}")
continue
prop_name = prop.get("name", prop)
prop_type = prop.get("type", "")
prop_visibility = prop.get("visibility", "+")
if prop_type:
lines.append(f" {prop_visibility}{prop_name}: {prop_type}")
else:
lines.append(f" {prop_visibility}{prop_name}")
# Methods
for method in class_def.get("methods", []):
if isinstance(method, str):
lines.append(f" +{method}")
continue
method_name = method.get("name", method)
method_params = method.get("params", "")
method_return = method.get("return", "")
method_visibility = method.get("visibility", "+")
if method_return:
lines.append(
f" {method_visibility}{method_name}({method_params}) {method_return}"
)
else:
lines.append(f" {method_visibility}{method_name}({method_params})")
lines.append(" }")
# Add relationships
for rel in relationships:
if isinstance(rel, tuple):
from_class, to_class, rel_type = rel
# Map relationship type to Mermaid syntax
rel_symbol = "--"
if rel_type == "extends":
rel_symbol = "--|>"
elif rel_type == "implements":
rel_symbol = "..|>"
elif rel_type == "composition":
rel_symbol = "*--"
elif rel_type == "aggregation":
rel_symbol = "o--"
lines.append(f" {from_class} {rel_symbol} {to_class}")
else:
from_class = rel["from"]
to_class = rel["to"]
rel_type = rel.get("type", "--")
rel_label = rel.get("label", "")
if rel_label:
lines.append(f" {from_class} {rel_type} {to_class}: {rel_label}")
else:
lines.append(f" {from_class} {rel_type} {to_class}")
return "\n".join(lines)
def generate_sequence(
self,
participants: List[str],
messages: List[tuple],
config: Optional[DiagramConfig] = None,
) -> str:
"""Generate a sequence diagram definition.
Args:
participants: List of participant definitions
messages: List of message definitions
config: Optional diagram configuration
Returns:
Mermaid sequence diagram definition
"""
if config is None:
config = DiagramConfig(type=DiagramType.SEQUENCE)
lines = ["sequenceDiagram"]
# Add participants
for participant in participants:
if isinstance(participant, dict):
participant_id = participant["id"]
participant_label = participant.get("label", participant_id)
lines.append(f" participant {participant_id} as {participant_label}")
else:
participant_id = participant
lines.append(f" participant {participant_id}")
# Add messages
for message in messages:
if isinstance(message, dict):
from_participant = message["from"]
to_participant = message["to"]
message_text = message["text"]
message_type = message.get("type", "->")
activate = message.get("activate", False)
deactivate = message.get("deactivate", False)
else:
# Support tuples/lists with variable lengths:
# (from, to[, text[, type[, activate[, deactivate]]]])
if not isinstance(message, (tuple, list)) or len(message) < 2:
# Invalid message structure; skip safely
continue
from_participant = message[0]
to_participant = message[1]
message_text = message[2] if len(message) >= 3 else ""
message_type = message[3] if len(message) >= 4 else ">>" if False else "->>"
activate = bool(message[4]) if len(message) >= 5 else False
deactivate = bool(message[5]) if len(message) >= 6 else False
lines.append(f" {from_participant}{message_type}{to_participant}: {message_text}")
# Add optional activation/deactivation
if activate:
lines.append(f" activate {to_participant}")
if deactivate:
lines.append(f" deactivate {to_participant}")
return "\n".join(lines)
def generate_from_template(
self,
template_name: str,
variables: Dict[str, Any],
config: Optional[DiagramConfig] = None,
) -> Optional[str]:
"""Generate a diagram definition from a template.
Args:
template_name: Name of the template to use
variables: Variables to substitute in the template
config: Optional diagram configuration
Returns:
Generated diagram definition or None if template not found
"""
template = self.templates.get(template_name)
if not template:
return None
return self.generate_from_template_impl(template, variables, config)
def validate_diagram(self, definition: str, diagram_type: DiagramType) -> bool:
"""Validate a diagram definition.
Args:
definition: Diagram definition to validate
diagram_type: Expected type of diagram
Returns:
True if valid, False otherwise
"""
try:
# Basic validation - check if diagram starts with expected type
definition = definition.strip()
if diagram_type == DiagramType.FLOWCHART:
# Check for basic syntax errors like unclosed brackets
if definition.count('[') != definition.count(']'):
return False
return definition.startswith("flowchart")
elif diagram_type == DiagramType.SEQUENCE:
# Check for missing participants - sequence diagrams without participants are invalid
if "participant" not in definition:
return False
return definition.startswith("sequenceDiagram")
elif diagram_type == DiagramType.CLASS:
return definition.startswith("classDiagram")
return False
except Exception:
return False
def save_diagram(self, metadata: DiagramMetadata, definition: str, path: Path) -> None:
"""Save a diagram to disk.
Args:
metadata: Diagram metadata
definition: Diagram definition
path: Path to save to
"""
path.write_text(definition)
def load_diagram(self, path: Path) -> tuple:
"""Load a diagram from disk.
Args:
path: Path to load from
Returns:
Tuple of (metadata, definition)
"""
definition = path.read_text()
meta = DiagramMetadata(
name=path.stem,
description="",
type=DiagramType.FLOWCHART,
version="0.1.0",
author="",
tags=[],
)
return meta, definition
def generate_from_template_impl(
self,
template: Dict[str, Any],
variables: Dict[str, Any],
config: Optional[DiagramConfig],
) -> Optional[str]:
"""Internal implementation of template generation."""
try:
# Get the template content and diagram type
content = template["content"]
diagram_type = template.get("type", "flowchart")
# Replace variables in the template
for var_name, var_value in variables.items():
placeholder = f"{{{var_name}}}"
content = content.replace(placeholder, str(var_value))
# Add configuration if provided
if config:
config_dict = config.to_dict()
for key, value in config_dict.items():
if key in template.get("config", {}):
content = content.replace(f"{{{key}}}", str(value))
return content
except Exception:
return None
def save_template(
self,
name: str,
content: str,
diagram_type: DiagramType,
variables: Optional[Dict[str, str]] = None,
) -> None:
"""Save a new diagram template.
Args:
name: Template name
content: Template content
diagram_type: Type of diagram
variables: Optional dictionary of variable descriptions
"""
template = {
"name": name,
"type": str(diagram_type),
"content": content,
"variables": variables or {},
}
file_path = self.templates_dir / f"{name}.json"
with open(file_path, 'w') as f:
json.dump(template, f, indent=2)
self.templates[name] = template
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/fan_out_orchestrator.py:
--------------------------------------------------------------------------------
```python
"""
Cross-Repository Release Fan-Out Orchestrator
Manages coordinated releases across the OpenSSL ecosystem by triggering
workflows in dependent repositories based on dependency relationships.
"""
import asyncio
import json
import logging
from datetime import datetime
from typing import Dict, List, Optional, Set
from dataclasses import dataclass
from enum import Enum
import httpx
from github import Github
from github.Repository import Repository
logger = logging.getLogger(__name__)
class ReleaseType(Enum):
"""Types of releases that can trigger fan-out."""
FOUNDATION = "foundation" # openssl-conan-base, openssl-fips-policy
TOOLING = "tooling" # openssl-tools
DOMAIN = "domain" # openssl
ORCHESTRATION = "orchestration" # mcp-project-orchestrator
@dataclass
class RepositoryInfo:
"""Information about a repository in the ecosystem."""
name: str
full_name: str
release_type: ReleaseType
dependencies: List[str]
dependents: List[str]
version_file: Optional[str] = None
conanfile_path: str = "conanfile.py"
@dataclass
class ReleaseTrigger:
"""Represents a release trigger for fan-out."""
source_repo: str
version: str
release_type: ReleaseType
triggered_at: datetime
dependencies_updated: List[str]
class FanOutOrchestrator:
"""Orchestrates cross-repository releases and dependency updates."""
def __init__(self, github_token: str):
self.github = Github(github_token)
self.repositories = self._initialize_repositories()
self.dependency_graph = self._build_dependency_graph()
def _initialize_repositories(self) -> Dict[str, RepositoryInfo]:
"""Initialize repository information."""
return {
"openssl-conan-base": RepositoryInfo(
name="openssl-conan-base",
full_name="sparesparrow/openssl-conan-base",
release_type=ReleaseType.FOUNDATION,
dependencies=[],
dependents=["openssl-tools"],
version_file=None,
conanfile_path="conanfile.py"
),
"openssl-fips-policy": RepositoryInfo(
name="openssl-fips-policy",
full_name="sparesparrow/openssl-fips-policy",
release_type=ReleaseType.FOUNDATION,
dependencies=[],
dependents=["openssl-tools"],
version_file=None,
conanfile_path="conanfile.py"
),
"openssl-tools": RepositoryInfo(
name="openssl-tools",
full_name="sparesparrow/openssl-tools",
release_type=ReleaseType.TOOLING,
dependencies=["openssl-conan-base", "openssl-fips-policy"],
dependents=["openssl"],
version_file=None,
conanfile_path="conanfile.py"
),
"openssl": RepositoryInfo(
name="openssl",
full_name="sparesparrow/openssl",
release_type=ReleaseType.DOMAIN,
dependencies=["openssl-tools"],
dependents=[],
version_file="VERSION.dat",
conanfile_path="conanfile.py"
),
"mcp-project-orchestrator": RepositoryInfo(
name="mcp-project-orchestrator",
full_name="sparesparrow/mcp-project-orchestrator",
release_type=ReleaseType.ORCHESTRATION,
dependencies=[],
dependents=[],
version_file=None,
conanfile_path="conanfile.py"
)
}
def _build_dependency_graph(self) -> Dict[str, Set[str]]:
"""Build dependency graph for release ordering."""
graph = {}
for repo_name, repo_info in self.repositories.items():
graph[repo_name] = set(repo_info.dependencies)
return graph
async def trigger_release_cascade(self, source_repo: str, version: str,
release_type: ReleaseType) -> List[ReleaseTrigger]:
"""Trigger a release cascade starting from a source repository."""
triggers = []
# Create initial trigger
initial_trigger = ReleaseTrigger(
source_repo=source_repo,
version=version,
release_type=release_type,
triggered_at=datetime.utcnow(),
dependencies_updated=[]
)
triggers.append(initial_trigger)
# Find all dependent repositories
dependents = self._get_all_dependents(source_repo)
# Trigger releases in dependency order
for dependent in dependents:
try:
success = await self._trigger_dependent_release(
dependent, source_repo, version
)
if success:
trigger = ReleaseTrigger(
source_repo=dependent,
version=version,
release_type=self.repositories[dependent].release_type,
triggered_at=datetime.utcnow(),
dependencies_updated=[source_repo]
)
triggers.append(trigger)
logger.info(f"Successfully triggered release in {dependent}")
else:
logger.error(f"Failed to trigger release in {dependent}")
except Exception as e:
logger.error(f"Error triggering release in {dependent}: {e}")
return triggers
def _get_all_dependents(self, repo_name: str) -> List[str]:
"""Get all repositories that depend on the given repository."""
dependents = set()
to_process = [repo_name]
while to_process:
current = to_process.pop(0)
for repo_name_check, repo_info in self.repositories.items():
if current in repo_info.dependencies and repo_name_check not in dependents:
dependents.add(repo_name_check)
to_process.append(repo_name_check)
return list(dependents)
async def _trigger_dependent_release(self, dependent_repo: str,
source_repo: str, version: str) -> bool:
"""Trigger a release in a dependent repository."""
try:
repo = self.github.get_repo(self.repositories[dependent_repo].full_name)
# Trigger workflow_dispatch event
workflow_dispatch_inputs = {
"source_repository": source_repo,
"source_version": version,
"dependency_update": "true",
"triggered_by": "fan-out-orchestrator"
}
# Find the appropriate workflow to trigger
workflows = repo.get_workflows()
target_workflow = None
for workflow in workflows:
if workflow.name.lower() in ["release", "build", "ci"]:
target_workflow = workflow
break
if not target_workflow:
logger.warning(f"No suitable workflow found in {dependent_repo}")
return False
# Dispatch the workflow
target_workflow.create_dispatch(
ref="main", # or get default branch
inputs=workflow_dispatch_inputs
)
logger.info(f"Dispatched workflow in {dependent_repo}")
return True
except Exception as e:
logger.error(f"Error dispatching workflow in {dependent_repo}: {e}")
return False
async def update_dependency_versions(self, repo_name: str,
dependency_updates: Dict[str, str]) -> bool:
"""Update dependency versions in a repository's conanfile.py."""
try:
repo = self.github.get_repo(self.repositories[repo_name].full_name)
# Get the conanfile.py content
conanfile_content = repo.get_contents(
self.repositories[repo_name].conanfile_path
).decoded_content.decode('utf-8')
# Update dependency versions
updated_content = conanfile_content
for dep_name, new_version in dependency_updates.items():
# Simple string replacement - in production, use proper parsing
old_pattern = f'"{dep_name}/'
new_pattern = f'"{dep_name}/{new_version}@'
updated_content = updated_content.replace(old_pattern, new_pattern)
# Create a new branch and commit
branch_name = f"update-dependencies-{datetime.utcnow().strftime('%Y%m%d-%H%M%S')}"
# Get default branch
default_branch = repo.default_branch
# Create new branch
ref = repo.get_git_ref(f"heads/{default_branch}")
repo.create_git_ref(f"refs/heads/{branch_name}", ref.object.sha)
# Update file
repo.update_file(
path=self.repositories[repo_name].conanfile_path,
message=f"Update dependencies: {', '.join(f'{k}={v}' for k, v in dependency_updates.items())}",
content=updated_content,
sha=repo.get_contents(
self.repositories[repo_name].conanfile_path
).sha,
branch=branch_name
)
# Create pull request
pr = repo.create_pull(
title=f"Update dependencies from {list(dependency_updates.keys())[0]}",
body=f"""
## Dependency Updates
This PR updates the following dependencies:
{chr(10).join(f'- **{dep}**: {version}' for dep, version in dependency_updates.items())}
### Changes
- Updated `conanfile.py` with new dependency versions
- Triggered by fan-out orchestrator from {list(dependency_updates.keys())[0]}
### Testing
- [ ] Verify builds pass with new dependencies
- [ ] Run integration tests
- [ ] Check for any breaking changes
---
*This PR was automatically created by the Fan-Out Orchestrator*
""",
head=branch_name,
base=default_branch
)
logger.info(f"Created PR #{pr.number} in {repo_name} for dependency updates")
return True
except Exception as e:
logger.error(f"Error updating dependencies in {repo_name}: {e}")
return False
async def get_release_status(self, triggers: List[ReleaseTrigger]) -> Dict[str, str]:
"""Get the status of release triggers."""
status = {}
for trigger in triggers:
try:
repo = self.github.get_repo(self.repositories[trigger.source_repo].full_name)
workflows = repo.get_workflow_runs(
head_sha=trigger.triggered_at.isoformat(),
per_page=10
)
# Find the most recent workflow run
latest_run = None
for run in workflows:
if run.created_at >= trigger.triggered_at:
latest_run = run
break
if latest_run:
status[trigger.source_repo] = latest_run.conclusion or latest_run.status
else:
status[trigger.source_repo] = "not_found"
except Exception as e:
logger.error(f"Error getting status for {trigger.source_repo}: {e}")
status[trigger.source_repo] = "error"
return status
class ReleaseCoordinator:
"""Coordinates releases across the OpenSSL ecosystem."""
def __init__(self, github_token: str):
self.orchestrator = FanOutOrchestrator(github_token)
async def coordinate_foundation_release(self, version: str) -> Dict[str, any]:
"""Coordinate a foundation layer release (openssl-conan-base or openssl-fips-policy)."""
logger.info(f"Coordinating foundation release: {version}")
# Trigger tooling layer updates
tooling_triggers = await self.orchestrator.trigger_release_cascade(
"openssl-tools", version, ReleaseType.TOOLING
)
return {
"foundation_version": version,
"tooling_triggers": tooling_triggers,
"status": "coordinated"
}
async def coordinate_tooling_release(self, version: str) -> Dict[str, any]:
"""Coordinate a tooling layer release (openssl-tools)."""
logger.info(f"Coordinating tooling release: {version}")
# Trigger domain layer updates
domain_triggers = await self.orchestrator.trigger_release_cascade(
"openssl", version, ReleaseType.DOMAIN
)
return {
"tooling_version": version,
"domain_triggers": domain_triggers,
"status": "coordinated"
}
async def coordinate_domain_release(self, version: str) -> Dict[str, any]:
"""Coordinate a domain layer release (openssl)."""
logger.info(f"Coordinating domain release: {version}")
# Domain releases don't trigger other releases
# but we can update orchestration layer if needed
orchestration_triggers = await self.orchestrator.trigger_release_cascade(
"mcp-project-orchestrator", version, ReleaseType.ORCHESTRATION
)
return {
"domain_version": version,
"orchestration_triggers": orchestration_triggers,
"status": "coordinated"
}
async def main():
"""Main orchestration function."""
# Configuration
GITHUB_TOKEN = "your-github-token" # Should come from environment
# Initialize coordinator
coordinator = ReleaseCoordinator(GITHUB_TOKEN)
# Example: Coordinate a tooling release
result = await coordinator.coordinate_tooling_release("1.2.5")
logger.info(f"Release coordination result: {result}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.run(main())
```
--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/tests/test_cursor_deployer.py:
--------------------------------------------------------------------------------
```python
"""
Tests for Cursor configuration deployer.
This module contains tests for the CursorConfigDeployer class and related functionality.
"""
import pytest
import tempfile
import shutil
from pathlib import Path
from unittest.mock import Mock, patch
from mcp_orchestrator.cursor_deployer import CursorConfigDeployer
from mcp_orchestrator.platform_detector import PlatformDetector
from mcp_orchestrator.cursor_config import CursorConfig
class TestCursorConfigDeployer:
"""Test cases for CursorConfigDeployer."""
def setup_method(self):
"""Set up test fixtures."""
self.temp_dir = tempfile.mkdtemp()
self.repo_root = Path(self.temp_dir) / "test_repo"
self.package_root = Path(self.temp_dir) / "test_package"
# Create test repository
self.repo_root.mkdir(parents=True)
# Create test package structure
self.package_root.mkdir(parents=True)
(self.package_root / "cursor-rules" / "rules").mkdir(parents=True)
(self.package_root / "cursor-rules" / "prompts").mkdir(parents=True)
# Create test templates
self._create_test_templates()
# Create deployer
self.deployer = CursorConfigDeployer(self.repo_root, self.package_root)
def teardown_method(self):
"""Clean up test fixtures."""
shutil.rmtree(self.temp_dir)
def _create_test_templates(self):
"""Create test template files."""
# Create shared rule template
shared_template = self.package_root / "cursor-rules" / "rules" / "shared.mdc.jinja2"
shared_template.write_text("""---
title: Shared Rules
description: Common rules for all platforms
created: {{ timestamp }}
platform: shared
user: {{ user }}
---
# Shared Rules
This is a test shared rule template.
Platform: {{ os }}
User: {{ user }}
""")
# Create Linux rule template
linux_template = self.package_root / "cursor-rules" / "rules" / "linux-dev.mdc.jinja2"
linux_template.write_text("""---
title: Linux Development Rules
description: Linux-specific development rules
created: {{ timestamp }}
platform: linux
user: {{ user }}
---
# Linux Development Rules
This is a test Linux rule template.
OS: {{ os }}
Architecture: {{ architecture }}
""")
# Create prompt template
prompt_template = self.package_root / "cursor-rules" / "prompts" / "test-prompt.md.jinja2"
prompt_template.write_text("""# Test Prompt
This is a test prompt template.
Platform: {{ os }}
User: {{ user }}
""")
# Create MCP config template
mcp_template = self.package_root / "cursor-rules" / "mcp.json.jinja2"
mcp_template.write_text("""{
"mcpServers": {
"test-server": {
"command": "{{ platform_detector.get_mcp_command() }}",
"args": ["-y", "@test/mcp-server"],
"env": {
"PLATFORM": "{{ os }}",
"USER": "{{ user }}"
}
}
}
}
""")
def test_initialization(self):
"""Test deployer initialization."""
assert self.deployer.repo_root == self.repo_root
assert self.deployer.package_root == self.package_root
assert self.deployer.cursor_dir == self.repo_root / ".cursor"
assert self.deployer.templates_dir == self.package_root / "cursor-rules"
def test_detect_platform(self):
"""Test platform detection."""
platform_info = self.deployer.detect_platform()
assert "os" in platform_info
assert "architecture" in platform_info
assert "python_version" in platform_info
assert "user" in platform_info
assert "home" in platform_info
assert "is_ci" in platform_info
def test_deploy_basic(self):
"""Test basic deployment."""
self.deployer.deploy()
# Check that .cursor directory was created
assert self.deployer.cursor_dir.exists()
assert (self.deployer.cursor_dir / "rules").exists()
assert (self.deployer.cursor_dir / "prompts").exists()
# Check that shared rule was deployed
shared_rule = self.deployer.cursor_dir / "rules" / "shared.mdc"
assert shared_rule.exists()
# Check that platform-specific rule was deployed
platform_info = self.deployer.detect_platform()
os_name = platform_info["os"]
platform_rule = self.deployer.cursor_dir / "rules" / f"{os_name}-dev.mdc"
assert platform_rule.exists()
# Check that prompt was deployed
prompt_file = self.deployer.cursor_dir / "prompts" / "test-prompt.md"
assert prompt_file.exists()
# Check that MCP config was deployed
mcp_config = self.deployer.cursor_dir / "mcp.json"
assert mcp_config.exists()
def test_deploy_with_custom_rules(self):
"""Test deployment with custom rules."""
# Create custom rule file
custom_rule = Path(self.temp_dir) / "custom-rule.mdc"
custom_rule.write_text("""---
title: Custom Rule
description: A custom rule for testing
---
# Custom Rule
This is a custom rule.
""")
# Deploy with custom rules
self.deployer.deploy(custom_rules=[custom_rule])
# Check that custom rule was imported
custom_dir = self.deployer.cursor_dir / "rules" / "custom"
assert custom_dir.exists()
imported_rule = custom_dir / "custom-rule.mdc"
assert imported_rule.exists()
assert imported_rule.read_text() == custom_rule.read_text()
def test_deploy_opt_out(self):
"""Test deployment opt-out."""
# Deploy with opt-out
self.deployer.deploy(opt_out=True)
# Check that .cursor directory was not created
assert not self.deployer.cursor_dir.exists()
def test_deploy_force(self):
"""Test deployment with force flag."""
# Deploy once
self.deployer.deploy()
assert self.deployer.cursor_dir.exists()
# Deploy again with force
self.deployer.deploy(force=True)
assert self.deployer.cursor_dir.exists()
def test_deploy_existing_without_force(self):
"""Test deployment when .cursor already exists without force."""
# Deploy once
self.deployer.deploy()
assert self.deployer.cursor_dir.exists()
# Try to deploy again without force (should not overwrite)
with patch('builtins.print') as mock_print:
self.deployer.deploy(force=False)
mock_print.assert_called_with("ℹ️ .cursor/ already exists. Use --force to overwrite.")
def test_show_status(self):
"""Test status display."""
# Deploy configuration
self.deployer.deploy()
# Show status
with patch('builtins.print') as mock_print:
self.deployer.show_status()
# Check that status was printed
assert mock_print.call_count > 0
def test_dry_run(self):
"""Test dry run mode."""
with patch('builtins.print') as mock_print:
self.deployer.dry_run()
# Check that dry run information was printed
assert mock_print.call_count > 0
def test_render_template(self):
"""Test template rendering."""
# Create a simple template
template_path = self.package_root / "cursor-rules" / "test-template.jinja2"
template_path.write_text("Hello {{ user }} from {{ os }}!")
# Render template
output_path = self.deployer.cursor_dir / "test-output.txt"
self.deployer.cursor_dir.mkdir(parents=True)
platform_info = self.deployer.detect_platform()
self.deployer._render_template(
"test-template.jinja2",
output_path,
platform_info
)
# Check output
assert output_path.exists()
content = output_path.read_text()
assert "Hello" in content
assert platform_info["user"] in content
assert platform_info["os"] in content
class TestPlatformDetector:
"""Test cases for PlatformDetector."""
def test_detect_platform(self):
"""Test platform detection."""
detector = PlatformDetector()
platform_info = detector.detect_platform()
assert "os" in platform_info
assert "architecture" in platform_info
assert "python_version" in platform_info
assert "user" in platform_info
assert "home" in platform_info
assert "is_ci" in platform_info
def test_get_rule_template_name(self):
"""Test rule template name selection."""
detector = PlatformDetector()
template_name = detector.get_rule_template_name()
assert template_name in ["linux-dev", "macos-dev", "windows-dev", "ci-linux"]
def test_get_mcp_command(self):
"""Test MCP command selection."""
detector = PlatformDetector()
command = detector.get_mcp_command()
assert command in ["npx", "npx.cmd"]
def test_is_development_environment(self):
"""Test development environment detection."""
detector = PlatformDetector()
is_dev = detector.is_development_environment()
assert isinstance(is_dev, bool)
def test_get_conan_home(self):
"""Test Conan home directory detection."""
detector = PlatformDetector()
conan_home = detector.get_conan_home()
assert isinstance(conan_home, str)
assert len(conan_home) > 0
class TestCursorConfig:
"""Test cases for CursorConfig."""
def setup_method(self):
"""Set up test fixtures."""
self.temp_dir = tempfile.mkdtemp()
self.cursor_dir = Path(self.temp_dir) / ".cursor"
self.cursor_config = CursorConfig(self.cursor_dir)
def teardown_method(self):
"""Clean up test fixtures."""
shutil.rmtree(self.temp_dir)
def test_create_directory_structure(self):
"""Test directory structure creation."""
self.cursor_config.create_directory_structure()
assert self.cursor_dir.exists()
assert (self.cursor_dir / "rules").exists()
assert (self.cursor_dir / "prompts").exists()
assert (self.cursor_dir / "rules" / "custom").exists()
def test_write_rule(self):
"""Test rule writing."""
from mcp_orchestrator.cursor_config import CursorRule
rule = CursorRule(
title="Test Rule",
description="A test rule",
platform="test",
content="# Test Rule\n\nThis is a test rule.",
created="2024-01-01T00:00:00",
user="testuser"
)
self.cursor_config.create_directory_structure()
self.cursor_config.write_rule(rule, "test-rule")
rule_file = self.cursor_dir / "rules" / "test-rule.mdc"
assert rule_file.exists()
content = rule_file.read_text()
assert "Test Rule" in content
assert "testuser" in content
def test_write_prompt(self):
"""Test prompt writing."""
self.cursor_config.create_directory_structure()
self.cursor_config.write_prompt("Test Prompt", "This is a test prompt.", "test-prompt")
prompt_file = self.cursor_dir / "prompts" / "test-prompt.md"
assert prompt_file.exists()
content = prompt_file.read_text()
assert "# Test Prompt" in content
assert "This is a test prompt." in content
def test_write_mcp_config(self):
"""Test MCP configuration writing."""
from mcp_orchestrator.cursor_config import MCPServerConfig
servers = [
MCPServerConfig(
name="test-server",
command="npx",
args=["-y", "@test/server"],
env={"PLATFORM": "test"},
disabled=False
)
]
self.cursor_config.create_directory_structure()
self.cursor_config.write_mcp_config(servers)
mcp_file = self.cursor_dir / "mcp.json"
assert mcp_file.exists()
import json
config = json.loads(mcp_file.read_text())
assert "mcpServers" in config
assert "test-server" in config["mcpServers"]
def test_create_gitignore(self):
"""Test .gitignore creation."""
self.cursor_config.create_directory_structure()
self.cursor_config.create_gitignore()
gitignore_file = self.cursor_dir / ".gitignore"
assert gitignore_file.exists()
content = gitignore_file.read_text()
assert "rules/custom/" in content
assert "*.log" in content
def test_get_existing_rules(self):
"""Test getting existing rules."""
self.cursor_config.create_directory_structure()
# Create test rule files
(self.cursor_dir / "rules" / "rule1.mdc").write_text("Rule 1")
(self.cursor_dir / "rules" / "rule2.mdc").write_text("Rule 2")
rules = self.cursor_config.get_existing_rules()
assert "rule1" in rules
assert "rule2" in rules
def test_get_existing_prompts(self):
"""Test getting existing prompts."""
self.cursor_config.create_directory_structure()
# Create test prompt files
(self.cursor_dir / "prompts" / "prompt1.md").write_text("Prompt 1")
(self.cursor_dir / "prompts" / "prompt2.md").write_text("Prompt 2")
prompts = self.cursor_config.get_existing_prompts()
assert "prompt1" in prompts
assert "prompt2" in prompts
def test_has_mcp_config(self):
"""Test MCP configuration detection."""
self.cursor_config.create_directory_structure()
# Initially no MCP config
assert not self.cursor_config.has_mcp_config()
# Create MCP config
(self.cursor_dir / "mcp.json").write_text('{"test": "config"}')
assert self.cursor_config.has_mcp_config()
def test_is_configured(self):
"""Test configuration detection."""
# Initially not configured
assert not self.cursor_config.is_configured()
# Create directory structure
self.cursor_config.create_directory_structure()
# Still not configured (no rules or prompts)
assert not self.cursor_config.is_configured()
# Add a rule
(self.cursor_dir / "rules" / "test.mdc").write_text("Test rule")
assert self.cursor_config.is_configured()
```
--------------------------------------------------------------------------------
/REFACTORING_RECOMMENDATIONS.md:
--------------------------------------------------------------------------------
```markdown
# Refactoring Recommendations
## Executive Summary
This document outlines recommended refactoring and improvement opportunities for the MCP Project Orchestrator. The codebase is functional with all tests passing, but these improvements will enhance maintainability, performance, and extensibility.
## 🎯 Priority Matrix
| Priority | Impact | Effort | Recommendation |
|----------|---------|--------|----------------|
| P0 | High | Low | Config naming consolidation |
| P0 | High | Medium | Test coverage increase |
| P1 | High | Medium | Manager abstraction |
| P1 | Medium | Low | Error handling improvements |
| P2 | Medium | Medium | Plugin system |
| P2 | Low | Low | Code documentation |
| P3 | Medium | High | Performance optimization |
## 🔧 Critical Refactorings (P0)
### 1. Configuration Naming Consolidation
**Problem**: Inconsistent naming between `Config` and `MCPConfig`
- Tests import `MCPConfig`
- Some modules expect `Config`
- Creates confusion and maintenance burden
**Solution**:
```python
# Standardize on MCPConfig everywhere
# Update core/__init__.py
from .config import MCPConfig as Config, MCPConfig
# Update all imports to use MCPConfig consistently
# OR rename MCPConfig to Config in config.py
```
**Benefits**:
- Single source of truth
- Clearer imports
- Easier to understand
**Estimated Effort**: 2 hours
**Breaking Changes**: Minimal (alias preserves backward compatibility)
### 2. Test Coverage Improvement
**Current**: 27% overall coverage
**Target**: 80%+ coverage
**Focus Areas**:
```
Priority modules to test:
1. prompt_manager/manager.py (32% → 80%)
2. mermaid/generator.py (24% → 80%)
3. mermaid/renderer.py (43% → 80%)
4. core/config.py (61% → 85%)
```
**Approach**:
```python
# Add tests for:
# 1. Manager async methods
# 2. Edge cases and error conditions
# 3. Integration scenarios
# 4. Complex diagram generation
# Example new test:
def test_prompt_manager_async_operations(prompt_manager):
"""Test async prompt loading and caching."""
# Test async load
# Test cache behavior
# Test concurrent access
```
**Estimated Effort**: 1-2 weeks
**Benefits**: Better reliability, easier refactoring, confidence in changes
## 🏗️ Structural Improvements (P1)
### 3. Abstract Manager Base Class
**Problem**: PromptManager and TemplateManager have duplicate patterns
**Current Structure**:
```python
# templates/__init__.py
class TemplateManager:
def __init__(self, templates_dir): ...
def discover_templates(self): ...
def list_templates(self, filter): ...
def get_template(self, name): ...
# prompt_manager/manager.py
class PromptManager:
def __init__(self, config): ...
def discover_prompts(self): ...
def list_prompts(self, category): ...
def get_prompt(self, name): ...
```
**Proposed Solution**:
```python
# core/managers.py
from abc import ABC, abstractmethod
from typing import Generic, TypeVar, List, Optional
T = TypeVar('T')
class BaseResourceManager(ABC, Generic[T]):
"""Abstract base class for resource managers."""
def __init__(self, base_dir: Path):
self.base_dir = base_dir
self._resources: Dict[str, T] = {}
@abstractmethod
def discover_resources(self) -> None:
"""Discover and load resources from base directory."""
pass
@abstractmethod
def validate_resource(self, resource: T) -> bool:
"""Validate a resource."""
pass
def list_resources(self, **filters) -> List[str]:
"""List resources matching filters."""
return list(self._resources.keys())
def get_resource(self, name: str) -> Optional[T]:
"""Get a resource by name."""
return self._resources.get(name)
def save_resource(self, name: str, resource: T) -> None:
"""Save a resource."""
if not self.validate_resource(resource):
raise ValueError(f"Invalid resource: {name}")
self._resources[name] = resource
# Usage:
class TemplateManager(BaseResourceManager[BaseTemplate]):
def discover_resources(self):
# Template-specific discovery
pass
def validate_resource(self, resource):
return resource.validate()
```
**Benefits**:
- DRY principle
- Consistent API
- Easier to add new managers
- Shared testing utilities
**Estimated Effort**: 1 week
**Breaking Changes**: Minimal (preserve existing public APIs)
### 4. Enhanced Error Handling
**Current Issues**:
- Generic exceptions lose context
- No error codes for programmatic handling
- Limited debugging information
**Proposed Solution**:
```python
# core/exceptions.py
from enum import Enum
from typing import Optional, Dict, Any
class ErrorCode(Enum):
"""Standard error codes for MCP operations."""
TEMPLATE_NOT_FOUND = "E001"
TEMPLATE_INVALID = "E002"
VARIABLE_MISSING = "E003"
DIAGRAM_INVALID = "E004"
IO_ERROR = "E005"
CONFIG_ERROR = "E006"
class MCPError(Exception):
"""Enhanced MCP exception with context."""
def __init__(
self,
message: str,
code: ErrorCode,
details: Optional[Dict[str, Any]] = None,
cause: Optional[Exception] = None
):
super().__init__(message)
self.code = code
self.details = details or {}
self.cause = cause
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary for serialization."""
return {
"message": str(self),
"code": self.code.value,
"details": self.details,
"cause": str(self.cause) if self.cause else None
}
class TemplateNotFoundError(MCPError):
"""Template not found."""
def __init__(self, name: str):
super().__init__(
f"Template not found: {name}",
ErrorCode.TEMPLATE_NOT_FOUND,
{"template_name": name}
)
# Usage:
def get_template(self, name: str) -> Template:
if name not in self._templates:
raise TemplateNotFoundError(name)
return self._templates[name]
```
**Benefits**:
- Better debugging
- Programmatic error handling
- Detailed error reports
- Error tracking/monitoring
**Estimated Effort**: 3-4 days
## 🚀 Feature Enhancements (P2)
### 5. Plugin System
**Goal**: Allow external plugins for templates, diagrams, and tools
**Architecture**:
```python
# core/plugins.py
from typing import Protocol, List
from abc import abstractmethod
class TemplateProvider(Protocol):
"""Protocol for template providers."""
@abstractmethod
def list_templates(self) -> List[str]:
"""List available templates."""
...
@abstractmethod
def get_template(self, name: str) -> BaseTemplate:
"""Get a template by name."""
...
class PluginRegistry:
"""Central plugin registry."""
def __init__(self):
self._template_providers: List[TemplateProvider] = []
self._diagram_renderers: List[DiagramRenderer] = []
def register_template_provider(self, provider: TemplateProvider):
"""Register a template provider plugin."""
self._template_providers.append(provider)
def discover_plugins(self):
"""Discover plugins using entry points."""
import importlib.metadata
for entry_point in importlib.metadata.entry_points().select(
group='mcp_orchestrator.plugins'
):
plugin = entry_point.load()
plugin.register(self)
# pyproject.toml
[project.entry-points."mcp_orchestrator.plugins"]
my_plugin = "my_package.plugin:register"
```
**Benefits**:
- Extensibility without core changes
- Third-party integrations
- Community contributions
- Isolated plugin failures
**Estimated Effort**: 2 weeks
### 6. Event System
**Goal**: Decouple components with event-driven architecture
**Implementation**:
```python
# core/events.py
from dataclasses import dataclass
from typing import Callable, List, Any
from enum import Enum
class EventType(Enum):
TEMPLATE_APPLIED = "template.applied"
PROJECT_CREATED = "project.created"
DIAGRAM_GENERATED = "diagram.generated"
PROMPT_RENDERED = "prompt.rendered"
@dataclass
class Event:
"""Base event class."""
type: EventType
data: Any
source: str
class EventBus:
"""Simple event bus for pub/sub."""
def __init__(self):
self._listeners: Dict[EventType, List[Callable]] = {}
def subscribe(self, event_type: EventType, handler: Callable):
"""Subscribe to an event."""
if event_type not in self._listeners:
self._listeners[event_type] = []
self._listeners[event_type].append(handler)
def publish(self, event: Event):
"""Publish an event."""
for handler in self._listeners.get(event.type, []):
try:
handler(event)
except Exception as e:
# Log but don't fail
logger.error(f"Event handler failed: {e}")
# Usage:
event_bus = EventBus()
# Subscribe
def on_template_applied(event: Event):
logger.info(f"Template applied: {event.data['name']}")
event_bus.subscribe(EventType.TEMPLATE_APPLIED, on_template_applied)
# Publish
event_bus.publish(Event(
type=EventType.TEMPLATE_APPLIED,
data={"name": "fastapi-project"},
source="TemplateManager"
))
```
**Benefits**:
- Loose coupling
- Extensible workflows
- Audit logging
- Monitoring hooks
**Estimated Effort**: 1 week
## 📊 Performance Optimizations (P3)
### 7. Caching Strategy
**Current**: Minimal caching, repeated file I/O
**Proposed**:
```python
# core/cache.py
from functools import lru_cache, wraps
from typing import Callable, Any
import hashlib
import pickle
from pathlib import Path
class FileCache:
"""File-backed cache for expensive operations."""
def __init__(self, cache_dir: Path):
self.cache_dir = cache_dir
self.cache_dir.mkdir(parents=True, exist_ok=True)
def get(self, key: str) -> Any:
"""Get cached value."""
cache_file = self.cache_dir / self._hash_key(key)
if cache_file.exists():
with open(cache_file, 'rb') as f:
return pickle.load(f)
return None
def set(self, key: str, value: Any):
"""Set cached value."""
cache_file = self.cache_dir / self._hash_key(key)
with open(cache_file, 'wb') as f:
pickle.dump(value, f)
def _hash_key(self, key: str) -> str:
"""Hash key for filename."""
return hashlib.sha256(key.encode()).hexdigest()
def cached_property(func: Callable) -> property:
"""Cached property decorator."""
attr_name = f'_cached_{func.__name__}'
@wraps(func)
def wrapper(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, func(self))
return getattr(self, attr_name)
return property(wrapper)
# Usage:
class TemplateManager:
@cached_property
def available_templates(self) -> List[str]:
"""Cached list of templates."""
return self._discover_templates()
```
**Estimated Effort**: 3-4 days
### 8. Async Operations
**Current**: Synchronous I/O blocks
**Proposed**:
```python
# Use async for I/O-bound operations
import asyncio
import aiofiles
class AsyncTemplateManager:
async def load_template(self, name: str) -> Template:
"""Load template asynchronously."""
path = self.templates_dir / f"{name}.json"
async with aiofiles.open(path) as f:
content = await f.read()
return Template.from_json(content)
async def load_all_templates(self) -> List[Template]:
"""Load all templates concurrently."""
template_files = list(self.templates_dir.glob("*.json"))
tasks = [self.load_template(f.stem) for f in template_files]
return await asyncio.gather(*tasks)
```
**Estimated Effort**: 1 week
## 🧹 Code Quality Improvements
### 9. Type Hints Enhancement
**Current**: Basic type hints
**Target**: Comprehensive type coverage
```python
# Use Protocol for duck typing
from typing import Protocol
class Renderable(Protocol):
"""Protocol for renderable objects."""
def render(self, context: Dict[str, Any]) -> str: ...
def render_template(template: Renderable, context: Dict[str, Any]) -> str:
return template.render(context)
# Use Generic types
from typing import Generic, TypeVar
T = TypeVar('T', bound=BaseTemplate)
class TemplateRegistry(Generic[T]):
def __init__(self):
self._templates: Dict[str, T] = {}
def register(self, name: str, template: T) -> None:
self._templates[name] = template
def get(self, name: str) -> Optional[T]:
return self._templates.get(name)
```
### 10. Documentation Generation
**Setup Sphinx for API docs**:
```bash
# Install Sphinx
pip install sphinx sphinx-rtd-theme sphinx-autodoc-typehints
# Generate docs
sphinx-quickstart docs
sphinx-apidoc -o docs/api src/mcp_project_orchestrator
sphinx-build -b html docs docs/_build
```
**Configuration**:
```python
# docs/conf.py
import os
import sys
sys.path.insert(0, os.path.abspath('../src'))
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx_autodoc_typehints',
]
html_theme = 'sphinx_rtd_theme'
```
## 📋 Implementation Roadmap
### Phase 1: Foundation (Week 1-2)
- [ ] Config naming consolidation
- [ ] Test coverage to 50%
- [ ] Enhanced error handling
### Phase 2: Structure (Week 3-4)
- [ ] Abstract manager base class
- [ ] Test coverage to 65%
- [ ] Event system basics
### Phase 3: Features (Week 5-7)
- [ ] Plugin system
- [ ] Test coverage to 80%
- [ ] Caching implementation
### Phase 4: Polish (Week 8)
- [ ] Documentation generation
- [ ] Performance profiling
- [ ] Final testing and cleanup
## 🎯 Success Metrics
### Before Refactoring
- Test Coverage: 27%
- Modules: 36
- Code Smells: Medium
- Maintainability: Good
### After Refactoring (Target)
- Test Coverage: 80%+
- Modules: ~40 (well-organized)
- Code Smells: Low
- Maintainability: Excellent
- Performance: 2x faster template operations
- Plugin Ecosystem: 3+ community plugins
## 🔍 Code Review Checklist
For each refactoring:
- [ ] Tests updated and passing
- [ ] Documentation updated
- [ ] Type hints complete
- [ ] No breaking changes (or documented)
- [ ] Performance not regressed
- [ ] Security considered
- [ ] Accessibility maintained
- [ ] CI/CD passing
## 📚 References
- [Python Design Patterns](https://refactoring.guru/design-patterns/python)
- [Effective Python](https://effectivepython.com/)
- [Python Type Hints PEP 484](https://peps.python.org/pep-0484/)
- [Plugin Architecture in Python](https://realpython.com/python-application-layouts/)
---
**Last Updated**: 2025-10-01
**Maintainer**: MCP Project Orchestrator Team
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/ecosystem_monitor.py:
--------------------------------------------------------------------------------
```python
"""
OpenSSL Ecosystem Workflow Monitor
Monitors workflow runs across all OpenSSL repositories and provides
AI-assisted failure analysis and automated issue creation.
"""
import asyncio
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
import httpx
from github import Github
from github.WorkflowRun import WorkflowRun
from github.Repository import Repository
logger = logging.getLogger(__name__)
class FailureType(Enum):
"""Types of workflow failures for classification."""
BUILD_ERROR = "build_error"
TEST_FAILURE = "test_failure"
SECURITY_SCAN = "security_scan"
DEPENDENCY_ISSUE = "dependency_issue"
TIMEOUT = "timeout"
PERMISSION_DENIED = "permission_denied"
CACHE_ISSUE = "cache_issue"
UNKNOWN = "unknown"
@dataclass
class WorkflowFailure:
"""Represents a workflow failure with context."""
repository: str
workflow_name: str
run_id: int
failure_type: FailureType
error_message: str
failed_at: datetime
duration: int
platform: Optional[str] = None
branch: Optional[str] = None
commit_sha: Optional[str] = None
actor: Optional[str] = None
@dataclass
class FailurePattern:
"""Represents a recurring failure pattern."""
failure_type: FailureType
repositories: List[str]
frequency: int
first_seen: datetime
last_seen: datetime
common_error: str
suggested_fix: str
class EcosystemMonitor:
"""Monitors OpenSSL ecosystem workflows and analyzes failures."""
def __init__(self, github_token: str, repositories: List[str]):
self.github = Github(github_token)
self.repositories = repositories
self.failure_patterns: Dict[str, FailurePattern] = {}
async def monitor_workflows(self, hours_back: int = 4) -> List[WorkflowFailure]:
"""Monitor workflows across all repositories for failures."""
failures = []
cutoff_time = datetime.utcnow() - timedelta(hours=hours_back)
for repo_name in self.repositories:
try:
repo = self.github.get_repo(repo_name)
workflow_runs = repo.get_workflow_runs(
status="completed",
created=f">={cutoff_time.isoformat()}"
)
for run in workflow_runs:
if run.conclusion == "failure":
failure = await self._analyze_failure(run, repo_name)
if failure:
failures.append(failure)
except Exception as e:
logger.error(f"Error monitoring {repo_name}: {e}")
return failures
async def _analyze_failure(self, run: WorkflowRun, repo_name: str) -> Optional[WorkflowFailure]:
"""Analyze a failed workflow run and classify the failure."""
try:
# Get workflow run details
run_details = run.get_workflow_run()
jobs = run_details.get_jobs()
# Find the failed job
failed_job = None
for job in jobs:
if job.conclusion == "failure":
failed_job = job
break
if not failed_job:
return None
# Get job logs
logs = failed_job.get_logs()
error_message = self._extract_error_message(logs)
# Classify failure type
failure_type = self._classify_failure(error_message, failed_job.name)
return WorkflowFailure(
repository=repo_name,
workflow_name=run.name,
run_id=run.id,
failure_type=failure_type,
error_message=error_message,
failed_at=run.created_at,
duration=run.run_duration_ms or 0,
platform=self._extract_platform(failed_job.name),
branch=run.head_branch,
commit_sha=run.head_sha,
actor=run.actor.login if run.actor else None
)
except Exception as e:
logger.error(f"Error analyzing failure in {repo_name} run {run.id}: {e}")
return None
def _extract_error_message(self, logs: str) -> str:
"""Extract the most relevant error message from job logs."""
lines = logs.split('\n')
# Look for common error patterns
error_indicators = [
"ERROR:", "FAILED:", "Exception:", "Error:",
"fatal:", "error:", "failed:", "FAIL:"
]
for line in reversed(lines):
if any(indicator in line for indicator in error_indicators):
return line.strip()
# Fallback to last few lines
return '\n'.join(lines[-5:]).strip()
def _classify_failure(self, error_message: str, job_name: str) -> FailureType:
"""Classify the type of failure based on error message and job name."""
error_lower = error_message.lower()
job_lower = job_name.lower()
if "timeout" in error_lower or "timed out" in error_lower:
return FailureType.TIMEOUT
elif "permission denied" in error_lower or "unauthorized" in error_lower:
return FailureType.PERMISSION_DENIED
elif "cache" in error_lower and ("miss" in error_lower or "invalid" in error_lower):
return FailureType.CACHE_ISSUE
elif "dependency" in error_lower or "package not found" in error_lower:
return FailureType.DEPENDENCY_ISSUE
elif "security" in job_lower or "scan" in job_lower:
return FailureType.SECURITY_SCAN
elif "test" in job_lower or "pytest" in error_lower:
return FailureType.TEST_FAILURE
elif "build" in job_lower or "cmake" in error_lower or "conan" in error_lower:
return FailureType.BUILD_ERROR
else:
return FailureType.UNKNOWN
def _extract_platform(self, job_name: str) -> Optional[str]:
"""Extract platform information from job name."""
platforms = ["linux", "windows", "macos", "ubuntu", "centos", "alpine"]
for platform in platforms:
if platform in job_name.lower():
return platform
return None
def analyze_patterns(self, failures: List[WorkflowFailure]) -> List[FailurePattern]:
"""Analyze failures to identify recurring patterns."""
patterns = {}
# Group failures by type and error message
for failure in failures:
key = f"{failure.failure_type.value}:{failure.error_message[:100]}"
if key not in patterns:
patterns[key] = {
'failures': [],
'repositories': set(),
'first_seen': failure.failed_at,
'last_seen': failure.failed_at
}
patterns[key]['failures'].append(failure)
patterns[key]['repositories'].add(failure.repository)
patterns[key]['last_seen'] = max(patterns[key]['last_seen'], failure.failed_at)
patterns[key]['first_seen'] = min(patterns[key]['first_seen'], failure.failed_at)
# Convert to FailurePattern objects
failure_patterns = []
for key, data in patterns.items():
if len(data['failures']) >= 2: # Only patterns with 2+ occurrences
failure_type = data['failures'][0].failure_type
common_error = data['failures'][0].error_message
pattern = FailurePattern(
failure_type=failure_type,
repositories=list(data['repositories']),
frequency=len(data['failures']),
first_seen=data['first_seen'],
last_seen=data['last_seen'],
common_error=common_error,
suggested_fix=self._generate_fix_suggestion(failure_type, common_error)
)
failure_patterns.append(pattern)
return failure_patterns
def _generate_fix_suggestion(self, failure_type: FailureType, error_message: str) -> str:
"""Generate AI-assisted fix suggestions based on failure type."""
suggestions = {
FailureType.BUILD_ERROR: """
**Build Error Fix Suggestions:**
1. Check Conan profile compatibility
2. Verify dependency versions in conanfile.py
3. Clear Conan cache: `conan cache clean`
4. Update build tools and compilers
5. Check for missing system dependencies
""",
FailureType.TEST_FAILURE: """
**Test Failure Fix Suggestions:**
1. Review test logs for specific assertion failures
2. Check test data and fixtures
3. Verify test environment setup
4. Update test expectations if behavior changed
5. Check for flaky tests and add retries
""",
FailureType.SECURITY_SCAN: """
**Security Scan Fix Suggestions:**
1. Update vulnerable dependencies
2. Review security scan reports in GitHub Security tab
3. Address CRITICAL and HIGH severity issues first
4. Update SBOM generation if needed
5. Check for hardcoded secrets or credentials
""",
FailureType.DEPENDENCY_ISSUE: """
**Dependency Issue Fix Suggestions:**
1. Verify Conan remote configuration
2. Check package availability in remote
3. Update package versions in conanfile.py
4. Clear and rebuild Conan cache
5. Check network connectivity to package registry
""",
FailureType.TIMEOUT: """
**Timeout Fix Suggestions:**
1. Increase workflow timeout limits
2. Optimize build process (parallel builds, caching)
3. Check for resource constraints
4. Review long-running operations
5. Consider splitting large jobs into smaller ones
""",
FailureType.PERMISSION_DENIED: """
**Permission Denied Fix Suggestions:**
1. Check OIDC configuration and permissions
2. Verify repository secrets and environment variables
3. Review GitHub Actions permissions
4. Check Cloudsmith API key or OIDC setup
5. Verify workflow file permissions
""",
FailureType.CACHE_ISSUE: """
**Cache Issue Fix Suggestions:**
1. Clear GitHub Actions cache
2. Update cache keys to include relevant changes
3. Check cache size limits
4. Verify cache restoration logic
5. Consider cache invalidation strategy
"""
}
base_suggestion = suggestions.get(failure_type, "Review error logs and check common failure causes.")
# Add specific error context if available
if "conan" in error_message.lower():
base_suggestion += "\n\n**Conan-specific:** Check Conan configuration, remotes, and package availability."
elif "cmake" in error_message.lower():
base_suggestion += "\n\n**CMake-specific:** Verify CMakeLists.txt configuration and build settings."
elif "docker" in error_message.lower():
base_suggestion += "\n\n**Docker-specific:** Check Dockerfile syntax and base image availability."
return base_suggestion.strip()
class IssueCreator:
"""Creates GitHub issues for recurring failure patterns."""
def __init__(self, github_token: str):
self.github = Github(github_token)
async def create_failure_issue(self, pattern: FailurePattern, target_repo: str) -> Optional[int]:
"""Create a GitHub issue for a failure pattern."""
try:
repo = self.github.get_repo(target_repo)
title = f"🚨 Recurring {pattern.failure_type.value.replace('_', ' ').title()} Pattern Detected"
body = f"""
## Failure Pattern Analysis
**Pattern Type:** {pattern.failure_type.value.replace('_', ' ').title()}
**Frequency:** {pattern.frequency} occurrences
**Repositories Affected:** {', '.join(pattern.repositories)}
**First Seen:** {pattern.first_seen.isoformat()}
**Last Seen:** {pattern.last_seen.isoformat()}
### Common Error
```
{pattern.common_error}
```
### Suggested Fix
{pattern.suggested_fix}
### Next Steps
1. Review the error pattern and suggested fixes
2. Implement the recommended solution
3. Monitor for recurrence
4. Close this issue once resolved
---
*This issue was automatically created by the OpenSSL Ecosystem Monitor*
"""
labels = ["bug", "automated", "ecosystem-monitor"]
if pattern.failure_type == FailureType.SECURITY_SCAN:
labels.append("security")
elif pattern.failure_type == FailureType.BUILD_ERROR:
labels.append("build")
elif pattern.failure_type == FailureType.TEST_FAILURE:
labels.append("tests")
issue = repo.create_issue(
title=title,
body=body,
labels=labels
)
logger.info(f"Created issue #{issue.number} in {target_repo} for {pattern.failure_type.value}")
return issue.number
except Exception as e:
logger.error(f"Error creating issue in {target_repo}: {e}")
return None
async def main():
"""Main monitoring function."""
# Configuration
GITHUB_TOKEN = "your-github-token" # Should come from environment
REPOSITORIES = [
"sparesparrow/openssl",
"sparesparrow/openssl-conan-base",
"sparesparrow/openssl-fips-policy",
"sparesparrow/openssl-tools",
"sparesparrow/mcp-project-orchestrator"
]
# Initialize monitor
monitor = EcosystemMonitor(GITHUB_TOKEN, REPOSITORIES)
issue_creator = IssueCreator(GITHUB_TOKEN)
# Monitor workflows
logger.info("Starting ecosystem monitoring...")
failures = await monitor.monitor_workflows(hours_back=4)
if not failures:
logger.info("No failures detected in the last 4 hours")
return
logger.info(f"Found {len(failures)} failures across {len(set(f.repository for f in failures))} repositories")
# Analyze patterns
patterns = monitor.analyze_patterns(failures)
logger.info(f"Identified {len(patterns)} recurring failure patterns")
# Create issues for significant patterns
for pattern in patterns:
if pattern.frequency >= 3: # Only create issues for patterns with 3+ occurrences
# Create issue in the most affected repository
main_repo = max(pattern.repositories, key=lambda r: sum(1 for f in failures if f.repository == r))
issue_number = await issue_creator.create_failure_issue(pattern, main_repo)
if issue_number:
logger.info(f"Created issue #{issue_number} for pattern: {pattern.failure_type.value}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.run(main())
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/mcp-py/CustomBashTool.py:
--------------------------------------------------------------------------------
```python
import anthropic
client = anthropic.Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key="my_api_key",
)
# Replace placeholders like {{USER_REQUEST}} with real values,
# because the SDK does not support variables.
message = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1000,
temperature=0,
system="You are an AI assistant specialized in translating natural language requests into bash commands for Ubuntu Studio Linux with KDE desktop environment. Your task is to interpret user requests and provide the corresponding bash commands along with brief explanations.\n\nHere's the user's request:\n\n<user_request>\n{{USER_REQUEST}}\n</user_request>\n\nPlease follow these steps to process the request and generate a response:\n\n1. Analyze the request and identify the key components:\n - Action to be performed\n - Specific tools or services mentioned\n - Parameters or conditions specified\n\n2. Formulate an appropriate bash script that fulfills the user's request, considering:\n - Common Linux command-line tools and utilities\n - Specific tools mentioned in the request (prioritize these)\n - Necessary options or flags to achieve the desired outcome\n - Combining multiple steps using appropriate bash syntax (e.g., functions, loops, or conditionals) if needed\n - Including echo commands to communicate analysis content to the user\n - Adding interactive elements where appropriate (e.g., user prompts, menus)\n\n3. Evaluate the safety of the script:\n - Ensure the commands don't pose potential risks to the system\n - If a request seems unsafe, prepare a warning and suggest a safer alternative\n\n4. Generate a JSON response with the following structure:\n {\n \"name\": \"descriptive_filename.sh\",\n \"description\": \"Brief explanation of what the script does\",\n \"script\": \"The actual bash script\"\n }\n\n Note: The \"name\" field should be a descriptive filename based on the script's function, ending with .sh\n\nBefore providing your final response, wrap your thought process inside <thinking> tags. This analysis should include:\na. A clear list of the key components identified in the user's request\nb. Consideration of multiple command options, explaining the reasoning for your final choice\nc. An evaluation of potential security risks and explanation of any mitigation strategies\nd. Your approach to making the script interactive and informative\ne. Consideration of potential edge cases or variations in the user's request\nf. Explicit consideration of the KDE desktop environment and Ubuntu Studio-specific tools\ng. A step-by-step breakdown of how you'll construct the bash script\n\nRemember to always prioritize security and avoid potentially harmful commands. If the user's request is unclear or too complex to be translated into a single bash script, explain the limitations in your analysis process and suggest breaking down the request into smaller, more manageable tasks.\n\nAfter your analysis, provide the JSON response as described above.",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "<examples>\n<example>\n<USER_REQUEST>\nMonitor MIDI input from a USB MIDI controller\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n - Action: Monitor MIDI input\n - Specific tool: USB MIDI controller\n - Parameters: None specified\n\nb. Command options:\n 1. aconnect -i: Lists MIDI input ports but doesn't monitor events.\n 2. amidi -d: Dumps raw MIDI data, not suitable for monitoring in a user-friendly format.\n 3. aseqdump: Provides real-time display of MIDI events in a human-readable format.\n\n After considering these options, aseqdump seems most appropriate for monitoring MIDI input in a user-friendly way.\n\nc. Security evaluation:\n aseqdump is safe to use as it only reads MIDI input without modifying system settings. No significant security risks are associated with this command.\n\nd. Interactivity and information:\n The script will directly output MIDI events, providing real-time information without requiring additional interactivity.\n\ne. Edge cases:\n - Multiple MIDI devices connected: We'll use the -p option to specify the device name.\n - No MIDI device connected: We'll add an error check before running aseqdump.\n\nf. KDE and Ubuntu Studio consideration:\n This command is not specific to KDE or Ubuntu Studio, but it's compatible with these environments.\n\ng. Script construction:\n 1. Check if aseqdump is installed\n 2. Verify MIDI device connectivity\n 3. Run aseqdump with appropriate options\n 4. Provide helpful output messages\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n \"name\": \"monitor_midi_input.sh\",\n \"description\": \"Monitors MIDI input from a USB MIDI controller using aseqdump\",\n \"script\": \"#!/bin/bash\\n\\n# Check if aseqdump is installed\\nif ! command -v aseqdump &> /dev/null; then\\n echo \\\"Error: aseqdump is not installed. Please install it using 'sudo apt-get install alsa-utils'.\\\"\\n exit 1\\nfi\\n\\n# Check if MIDI device is connected\\nif ! aconnect -i | grep -q 'USB MIDI Controller'; then\\n echo \\\"Error: USB MIDI Controller not found. Please connect your device and try again.\\\"\\n exit 1\\nfi\\n\\necho \\\"Monitoring MIDI input from USB MIDI Controller. Press Ctrl+C to stop.\\\"\\naseqdump -p \\\"USB MIDI Controller\\\"\\n\"\n}\n</ideal_output>\n</example>\n<example>\n<USER_REQUEST>\nCreate an SSH tunnel\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n - Action: Create an SSH tunnel\n - Specific tool: SSH\n - Parameters: None specified (assumptions needed)\n\nb. Command options:\n 1. ssh -D: Creates a SOCKS proxy, not a typical SSH tunnel.\n 2. ssh -R: Creates a reverse tunnel, not commonly meant by \"SSH tunnel\".\n 3. ssh -L: Creates a local port forwarding tunnel, the most common type.\n\n After consideration, ssh -L is most appropriate for creating a standard SSH tunnel.\n\nc. Security evaluation:\n SSH tunnels encrypt traffic but have potential risks:\n 1. Trust in the remote host is crucial.\n 2. Local port conflicts should be avoided.\n 3. Users must understand this creates a potential entry point to their system.\n\n To mitigate risks, we'll use a non-standard high port number and add checks.\n\nd. Interactivity and information:\n We'll add prompts for user input to specify remote host, port, and local port.\n\ne. Edge cases:\n - Local port already in use: We'll add a check and allow retry.\n - SSH key authentication: We'll provide an option for this.\n\nf. KDE and Ubuntu Studio consideration:\n This command is not specific to KDE or Ubuntu Studio but is fully compatible.\n\ng. Script construction:\n 1. Prompt for remote host, remote port, and local port\n 2. Check if local port is available\n 3. Offer SSH key authentication option\n 4. Construct and execute SSH command\n 5. Provide helpful output messages\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n \"name\": \"create_ssh_tunnel.sh\",\n \"description\": \"Creates an SSH tunnel with user-specified parameters and security checks\",\n \"script\": \"#!/bin/bash\\n\\necho \\\"SSH Tunnel Creator\\\"\\n\\nread -p \\\"Enter remote host: \\\" remote_host\\nread -p \\\"Enter remote port: \\\" remote_port\\nread -p \\\"Enter local port (default 8022): \\\" local_port\\nlocal_port=${local_port:-8022}\\n\\n# Check if local port is available\\nif ss -tln | grep -q :$local_port; then\\n echo \\\"Error: Local port $local_port is already in use.\\\"\\n exit 1\\nfi\\n\\nread -p \\\"Use SSH key authentication? (y/n): \\\" use_key\\n\\nif [ \\\"$use_key\\\" = \\\"y\\\" ]; then\\n ssh_command=\\\"ssh -i ~/.ssh/id_rsa -L $local_port:localhost:$remote_port $remote_host\\\"\\nelse\\n ssh_command=\\\"ssh -L $local_port:localhost:$remote_port $remote_host\\\"\\nfi\\n\\necho \\\"Creating SSH tunnel...\\\"\\necho \\\"Local port $local_port will be forwarded to port $remote_port on $remote_host\\\"\\necho \\\"Press Ctrl+C to stop the tunnel\\\"\\n\\n$ssh_command\\n\"\n}\n</ideal_output>\n</example>\n<example>\n<USER_REQUEST>\nCheck recent journalctl errors\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n - Action: Check recent errors\n - Specific tool: journalctl\n - Parameters: Recent, errors\n\nb. Command options:\n 1. journalctl -xe: Shows journal end, not ideal for recent errors.\n 2. journalctl -f: Follows journal real-time, not suitable for checking recent errors.\n 3. journalctl -p err..emerg: Filters error to emergency priority levels.\n\n After analysis, combining journalctl -p err..emerg with -n for recent entries is most appropriate.\n\nc. Security evaluation:\n This command only reads from the system journal without modifications. However, output may contain sensitive information, so we'll add a warning.\n\nd. Interactivity and information:\n We'll add an option for the user to specify the number of recent entries to display.\n\ne. Edge cases:\n - No recent errors: We'll add a check and inform the user if no errors are found.\n - Very large number of errors: We'll add a confirmation prompt for large outputs.\n\nf. KDE and Ubuntu Studio consideration:\n While not specific to KDE or Ubuntu Studio, this command works in these environments and can help troubleshoot system issues.\n\ng. Script construction:\n 1. Prompt user for number of entries to display\n 2. Add confirmation for large number of entries\n 3. Execute journalctl command with appropriate options\n 4. Check for empty output and inform user if no errors found\n 5. Display a warning about potential sensitive information in output\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n \"name\": \"check_recent_journalctl_errors.sh\",\n \"description\": \"Checks and displays recent journalctl errors with user-specified entry count\",\n \"script\": \"#!/bin/bash\\n\\necho \\\"Recent Journalctl Errors Checker\\\"\\n\\nread -p \\\"Enter the number of recent entries to check (default 50): \\\" num_entries\\nnum_entries=${num_entries:-50}\\n\\nif [ $num_entries -gt 1000 ]; then\\n read -p \\\"You've requested a large number of entries. This may take a while and produce a lot of output. Continue? (y/n): \\\" confirm\\n if [ \\\"$confirm\\\" != \\\"y\\\" ]; then\\n echo \\\"Operation cancelled.\\\"\\n exit 0\\n fi\\nfi\\n\\necho \\\"Checking the last $num_entries journalctl entries for errors...\\\"\\necho \\\"Warning: The output may contain sensitive system information.\\\"\\n\\noutput=$(journalctl -p err..emerg -n $num_entries --no-pager)\\n\\nif [ -z \\\"$output\\\" ]; then\\n echo \\\"No errors found in the last $num_entries entries.\\\"\\nelse\\n echo \\\"$output\\\"\\nfi\\n\"\n}\n</ideal_output>\n</example>\n<example>\n<USER_REQUEST>\nMonitor and replay input\n</USER_REQUEST>\n<ideal_output>\n<thinking>\na. Key components identified:\n - Action 1: Monitor input\n - Action 2: Replay input\n - Specific tools: Not specified (need to choose appropriate tools)\n - Parameters: None specified (assumptions needed)\n\nb. Command options:\n For monitoring:\n 1. xinput: Provides detailed input device information and event monitoring.\n 2. evtest: Monitors input device events but requires root privileges.\n 3. xev: Monitors X events but limited to a specific window.\n\n For replaying:\n 1. xdotool: Can simulate keyboard and mouse input events.\n 2. xte: Part of xautomation package, for simple event replay.\n 3. autokey: Complex scripting tool for input automation.\n\n After analysis, xinput for monitoring and xdotool for replaying are most suitable.\n\nc. Security evaluation:\n Potential security implications:\n 1. Captures all input, including sensitive data like passwords.\n 2. Replay script could execute harmful commands if malicious input is captured.\n\n To mitigate risks:\n 1. Limit monitoring time.\n 2. Filter captured input to include only key and button press events.\n 3. Warn user to review replay script before execution.\n\nd. Interactivity and information:\n We'll add prompts for monitoring duration and review options before replay.\n\ne. Edge cases:\n - No input detected: Add a check and inform the user.\n - Very long input sequence: Add a confirmation for long replay scripts.\n\nf. KDE and Ubuntu Studio consideration:\n These tools work in KDE and Ubuntu Studio environments. We'll check for required packages.\n\ng. Script construction:\n 1. Check for required tools (xinput, xdotool)\n 2. Prompt for monitoring duration\n 3. Monitor input using xinput\n 4. Filter and process captured input\n 5. Generate replay script\n 6. Offer review and execution options\n\nNow, I'll construct the JSON response based on this analysis.\n</thinking>\n\n{\n \"name\": \"monitor_and_replay_input.sh\",\n \"description\": \"Monitors input events and creates a script to replay them, with security considerations\",\n \"script\": \"#!/bin/bash\\n\\necho \\\"Input Monitor and Replay Tool\\\"\\n\\n# Check for required tools\\nfor tool in xinput xdotool; do\\n if ! command -v $tool &> /dev/null; then\\n echo \\\"Error: $tool is not installed. Please install it and try again.\\\"\\n exit 1\\n fi\\ndone\\n\\nread -p \\\"Enter monitoring duration in seconds (default 10): \\\" duration\\nduration=${duration:-10}\\n\\necho \\\"Monitoring input for $duration seconds...\\\"\\ntimeout ${duration}s xinput test-xi2 --root > input_log.txt\\n\\nif [ ! -s input_log.txt ]; then\\n echo \\\"No input detected during the monitoring period.\\\"\\n exit 0\\nfi\\n\\necho \\\"Creating replay script...\\\"\\necho '#!/bin/bash' > replay_script.sh\\necho \\\"xdotool type $(cat input_log.txt | grep -E 'key press|button press' | awk '{print $NF}')\\\" >> replay_script.sh\\nchmod +x replay_script.sh\\n\\necho \\\"Replay script created as replay_script.sh\\\"\\necho \\\"Warning: Review the script content before executing to ensure it's safe.\\\"\\n\\nread -p \\\"Do you want to review the replay script? (y/n): \\\" review\\nif [ \\\"$review\\\" = \\\"y\\\" ]; then\\n less replay_script.sh\\nfi\\n\\nread -p \\\"Do you want to execute the replay script now? (y/n): \\\" execute\\nif [ \\\"$execute\\\" = \\\"y\\\" ]; then\\n echo \\\"Executing replay script in 5 seconds. Focus on the desired window.\\\"\\n sleep 5\\n ./replay_script.sh\\nelse\\n echo \\\"You can execute the script later by running: ./replay_script.sh\\\"\\nfi\\n\"\n}\n</ideal_output>\n</example>\n</examples>\n\n"
},
{
"type": "text",
"text": "one of docker containers provides VNC server. i need to connect to VNC trough SSH\nContainer Network Details\nNetwork: ha_bridge_network (172.21.0.0/16)\nContainer IP: 172.21.0.3\nExposed ports: 5900(VNC), 6080(noVNC), 8501(Streamlit), 22(SSH)\n"
}
]
}
]
)
print(message.content)
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/development/MCP_Server_Development_Prompt_Combiner.json:
--------------------------------------------------------------------------------
```json
{
"name": "MCP Server Development Prompt Combiner",
"description": "A specialized prompt combiner for MCP server development that integrates interface definitions, implementation patterns, and best practices",
"type": "prompt",
"category": "development",
"content": "/**\n * MCPServerDevPromptCombiner for {{project_name}}\n * \n * A specialized implementation of the PromptCombiner interface\n * focused on combining prompts for MCP server development workflows.\n */\n\nimport { PromptCombiner, CombinerContext, CombinedPromptResult, PromptSuggestion, CombinationValidationResult, WorkflowConfig, SavedWorkflow } from './prompt-combiner-interface';\nimport { PromptService } from '../services/prompt-service';\nimport { Prompt } from '../core/types';\n\n/**\n * MCP Server Development specific context\n */\nexport interface MCPServerDevContext extends CombinerContext {\n /** Server configuration */\n serverConfig?: {\n name: string;\n version: string;\n capabilities: string[];\n };\n \n /** Core technologies being used */\n technologies: {\n language: string;\n runtime: string;\n frameworks: string[];\n };\n \n /** MCP Server SDK version */\n sdkVersion: string;\n \n /** Tools to be implemented */\n tools?: {\n name: string;\n description: string;\n parameters?: Record<string, any>;\n }[];\n \n /** Resources to be implemented */\n resources?: {\n protocol: string;\n description: string;\n }[];\n \n /** Deployment target environment */\n deploymentTarget?: 'docker' | 'kubernetes' | 'serverless' | 'standalone';\n \n /** Additional MCP-specific context */\n {{additional_mcp_context}}\n}\n\n/**\n * Specialized result for MCP Server development combinations\n */\nexport interface MCPServerDevResult extends CombinedPromptResult {\n /** Generated interface definitions */\n interfaces?: string;\n \n /** Generated MCP tools implementation */\n toolsImplementation?: string;\n \n /** Generated MCP resources implementation */\n resourcesImplementation?: string;\n \n /** Server configuration */\n serverConfiguration?: string;\n \n /** Client integration examples */\n clientExamples?: string;\n \n /** Testing approach */\n testingApproach?: string;\n \n /** Dockerfile and Docker Compose configuration */\n dockerConfiguration?: string;\n \n /** Additional MCP-specific results */\n {{additional_mcp_results}}\n}\n\n/**\n * Implementation of MCPServerDevPromptCombiner\n */\nexport class MCPServerDevPromptCombiner implements PromptCombiner {\n constructor(private promptService: PromptService) {}\n \n /**\n * Combines MCP server development prompts\n * @param promptIds Array of prompt IDs to combine\n * @param context Optional MCP server development context\n * @returns Combined MCP server development result\n */\n async combinePrompts(promptIds: string[], context?: MCPServerDevContext): Promise<MCPServerDevResult> {\n // Implementation would include:\n // 1. Validating the prompts are compatible for MCP development\n // 2. Organizing prompts into logical sections (interfaces, tools, resources, etc.)\n // 3. Resolving dependencies between prompts\n // 4. Applying variables with MCP-specific knowledge\n // 5. Generating a comprehensive server implementation guide\n \n // This is a template structure - in a real implementation, this would contain\n // the actual logic for combining MCP server development prompts\n \n // For now, we'll outline the structure of how the implementation would work\n \n // Step 1: Load and categorize all prompts\n const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n \n const interfacePrompts = prompts.filter(p => p.tags?.includes('interfaces'));\n const toolPrompts = prompts.filter(p => p.tags?.includes('tools'));\n const resourcePrompts = prompts.filter(p => p.tags?.includes('resources'));\n const configPrompts = prompts.filter(p => p.tags?.includes('configuration'));\n const deploymentPrompts = prompts.filter(p => p.tags?.includes('deployment'));\n \n // Step 2: Apply variables to each prompt category\n const variables = context?.variables || {};\n \n // Combine interface definitions\n const interfaces = await this.combineCategory(interfacePrompts, variables);\n \n // Combine tool implementations\n const toolsImplementation = await this.combineCategory(toolPrompts, variables);\n \n // Combine resource implementations\n const resourcesImplementation = await this.combineCategory(resourcePrompts, variables);\n \n // Combine server configuration\n const serverConfiguration = await this.combineCategory(configPrompts, variables);\n \n // Combine deployment configuration\n const dockerConfiguration = await this.combineCategory(deploymentPrompts, variables);\n \n // Step 3: Create combined content with logical sections\n const combinedContent = `\n# MCP Server Implementation for ${variables.project_name || 'Your Project'}\n\n## Overview\n\nThis guide provides a comprehensive implementation plan for an MCP server using ${variables.language || 'TypeScript'} and the MCP SDK version ${context?.sdkVersion || 'latest'}.\n\n## Interface Definitions\n\n${interfaces.content}\n\n## Tools Implementation\n\n${toolsImplementation.content}\n\n## Resources Implementation\n\n${resourcesImplementation.content}\n\n## Server Configuration\n\n${serverConfiguration.content}\n\n## Deployment Configuration\n\n${dockerConfiguration.content}\n\n## Implementation Steps\n\n1. Set up the project structure\n2. Implement the interfaces\n3. Implement the MCP tools\n4. Implement the MCP resources\n5. Configure the server\n6. Set up deployment\n7. Implement tests\n8. Document the server\n `;\n \n // Step 4: Return the comprehensive result\n return {\n content: combinedContent,\n components: [\n ...interfaces.components,\n ...toolsImplementation.components,\n ...resourcesImplementation.components,\n ...serverConfiguration.components,\n ...dockerConfiguration.components\n ],\n appliedVariables: variables,\n interfaces: interfaces.content,\n toolsImplementation: toolsImplementation.content,\n resourcesImplementation: resourcesImplementation.content,\n serverConfiguration: serverConfiguration.content,\n dockerConfiguration: dockerConfiguration.content,\n // Add suggestion for what to implement first\n nextSteps: [\n { action: 'implement_interfaces', description: 'Start by implementing the core interfaces' },\n { action: 'implement_tools', description: 'Implement the MCP tools using the SDK' },\n { action: 'implement_resources', description: 'Implement the MCP resources' },\n { action: 'configure_server', description: 'Set up the server configuration' },\n { action: 'setup_deployment', description: 'Configure Docker and deployment' }\n ]\n };\n }\n \n /**\n * Helper method to combine prompts in a specific category\n * @param prompts Prompts in the category\n * @param variables Variables to apply\n * @returns Combined result for the category\n */\n private async combineCategory(prompts: Prompt[], variables: Record<string, any>): Promise<CombinedPromptResult> {\n // Implementation would combine prompts within a category\n // This is a simplified placeholder\n let content = '';\n const components: {id: string; name: string; contribution: string}[] = [];\n \n for (const prompt of prompts) {\n const result = await this.promptService.applyTemplate(prompt.id, variables);\n content += result.content + '\\n\\n';\n components.push({\n id: prompt.id,\n name: prompt.name,\n contribution: result.content\n });\n }\n \n return {\n content: content.trim(),\n components,\n appliedVariables: variables\n };\n }\n \n /**\n * Gets MCP server development prompt suggestions\n * @param category Optional category to filter by\n * @param context Current MCP context to inform suggestions\n * @returns Array of prompt suggestions for MCP development\n */\n async getPromptSuggestions(category?: string, context?: MCPServerDevContext): Promise<PromptSuggestion[]> {\n // Implementation would suggest prompts based on the current MCP development context\n // For example, if building a tool-heavy server, suggest more tool-related prompts\n // This is a placeholder for demonstration\n \n // In a real implementation, this would query the prompt service for relevant prompts\n // based on the specific MCP development needs\n \n return [\n {\n id: 'consolidated-interfaces-template',\n name: 'Consolidated TypeScript Interfaces',\n relevance: 95,\n compatibleWith: ['mcp-server-tools-implementation', 'docker-containerization-guide'],\n reason: 'Provides the interface foundation for your MCP server'\n },\n {\n id: 'mcp-server-tools-implementation',\n name: 'MCP Server Tools Implementation',\n relevance: 90,\n compatibleWith: ['consolidated-interfaces-template', 'mcp-server-resources-implementation'],\n reason: `${context?.tools?.length || 0} tools need implementation in your server`\n },\n {\n id: 'mcp-server-resources-implementation',\n name: 'MCP Server Resources Implementation',\n relevance: 85,\n compatibleWith: ['consolidated-interfaces-template', 'mcp-server-tools-implementation'],\n reason: `${context?.resources?.length || 0} resources need implementation in your server`\n },\n {\n id: 'docker-containerization-guide',\n name: 'Docker Containerization Guide',\n relevance: context?.deploymentTarget === 'docker' ? 100 : 70,\n compatibleWith: ['consolidated-interfaces-template'],\n reason: 'Provides Docker deployment configuration for your MCP server'\n },\n {\n id: 'development-system-prompt',\n name: 'Development System Prompt',\n relevance: 60,\n compatibleWith: [],\n reason: 'Helps with general development assistance for your MCP server'\n }\n ];\n }\n \n /**\n * Validates if the prompts can be combined for MCP server development\n * @param promptIds Array of prompt IDs to validate\n * @returns Validation result with any issues specific to MCP development\n */\n async validateCombination(promptIds: string[]): Promise<CombinationValidationResult> {\n // Implementation would validate that the prompts make sense for MCP development\n // For example, ensuring there are no conflicting tool definitions\n // This is a placeholder for demonstration\n \n const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n \n // Check for interface prompt\n const hasInterface = prompts.some(p => p.tags?.includes('interfaces'));\n if (!hasInterface) {\n return {\n isValid: false,\n issues: [{\n promptId: '',\n issue: 'Missing interface definition prompt',\n severity: 'error',\n suggestion: 'Add a prompt with interface definitions, such as consolidated-interfaces-template'\n }],\n suggestions: [{\n promptIds: [...promptIds, 'consolidated-interfaces-template'],\n reason: 'Adding interface definitions is essential for MCP server development'\n }]\n };\n }\n \n // In a real implementation, would do more validation specific to MCP development\n \n return {\n isValid: true\n };\n }\n \n /**\n * Creates a saved MCP server development workflow\n * @param name Name for the new workflow\n * @param promptIds Component prompt IDs\n * @param config Configuration for the combination\n * @returns The created MCP workflow\n */\n async saveWorkflow(name: string, promptIds: string[], config: WorkflowConfig): Promise<SavedWorkflow> {\n // Implementation would save an MCP development workflow\n // This is a placeholder for demonstration\n \n return {\n id: `mcp-dev-workflow-${Date.now()}`,\n name,\n promptIds,\n config,\n createdAt: new Date().toISOString(),\n updatedAt: new Date().toISOString(),\n version: 1,\n category: 'mcp-development',\n tags: ['mcp', 'development', 'server']\n };\n }\n \n /**\n * Loads a previously saved MCP server development workflow\n * @param workflowId ID of the saved workflow\n * @returns The loaded MCP workflow\n */\n async loadWorkflow(workflowId: string): Promise<SavedWorkflow> {\n // Implementation would load an MCP development workflow\n // This is a placeholder for demonstration\n \n throw new Error(`Workflow ${workflowId} not found or not implemented yet`);\n }\n}\n\n/**\n * Usage Examples\n * \n * ```typescript\n * // Creating a combiner\n * const promptService = new PromptService(storageAdapter);\n * const mcpCombiner = new MCPServerDevPromptCombiner(promptService);\n * \n * // Getting prompt suggestions for MCP development\n * const suggestions = await mcpCombiner.getPromptSuggestions('tools', {\n * technologies: {\n * language: 'TypeScript',\n * runtime: 'Node.js',\n * frameworks: ['Express']\n * },\n * sdkVersion: '1.6.0',\n * tools: [\n * { name: 'get_document', description: 'Retrieve a document by ID' },\n * { name: 'search_documents', description: 'Search for documents' }\n * ],\n * resources: [\n * { protocol: 'document', description: 'Document resource protocol' }\n * ],\n * deploymentTarget: 'docker'\n * });\n * \n * // Combining prompts for MCP development\n * const result = await mcpCombiner.combinePrompts([\n * 'consolidated-interfaces-template',\n * 'mcp-server-tools-implementation',\n * 'docker-containerization-guide'\n * ], {\n * variables: {\n * project_name: 'Document Management MCP Server',\n * language: 'TypeScript',\n * primary_entity: 'Document',\n * node_version: '20'\n * },\n * technologies: {\n * language: 'TypeScript',\n * runtime: 'Node.js',\n * frameworks: ['Express']\n * },\n * sdkVersion: '1.6.0',\n * deploymentTarget: 'docker'\n * });\n * \n * // Using the specialized result properties\n * console.log(result.interfaces); // Get just the interface definitions\n * console.log(result.toolsImplementation); // Get just the tools implementation\n * console.log(result.dockerConfiguration); // Get just the Docker configuration\n * ```\n */\n\n// ============================\n// Extension Guidelines\n// ============================\n\n/**\n * When extending MCPServerDevPromptCombiner, consider:\n * \n * 1. Adding support for specific MCP server types (e.g., FileSystem, GitHub, Memory)\n * 2. Enhancing the context with more MCP-specific properties\n * 3. Improving suggestion logic based on the development context\n * 4. Adding template validation specific to MCP compatibility\n * 5. {{additional_extension_guidelines}}\n */",
"variables": [
"project_name",
"additional_mcp_context",
"additional_mcp_results",
"additional_extension_guidelines"
],
"metadata": {
"source": "/home/sparrow/projects/mcp-prompts/prompts/mcp-server-dev-prompt-combiner.json",
"imported": true
}
}
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/MCP_Server_Development_Prompt_Combiner.json:
--------------------------------------------------------------------------------
```json
{
"name": "MCP Server Development Prompt Combiner",
"description": "A specialized prompt combiner for MCP server development that integrates interface definitions, implementation patterns, and best practices",
"type": "prompt",
"category": "development",
"content": "/**\n * MCPServerDevPromptCombiner for {{project_name}}\n * \n * A specialized implementation of the PromptCombiner interface\n * focused on combining prompts for MCP server development workflows.\n */\n\nimport { PromptCombiner, CombinerContext, CombinedPromptResult, PromptSuggestion, CombinationValidationResult, WorkflowConfig, SavedWorkflow } from './prompt-combiner-interface';\nimport { PromptService } from '../services/prompt-service';\nimport { Prompt } from '../core/types';\n\n/**\n * MCP Server Development specific context\n */\nexport interface MCPServerDevContext extends CombinerContext {\n /** Server configuration */\n serverConfig?: {\n name: string;\n version: string;\n capabilities: string[];\n };\n \n /** Core technologies being used */\n technologies: {\n language: string;\n runtime: string;\n frameworks: string[];\n };\n \n /** MCP Server SDK version */\n sdkVersion: string;\n \n /** Tools to be implemented */\n tools?: {\n name: string;\n description: string;\n parameters?: Record<string, any>;\n }[];\n \n /** Resources to be implemented */\n resources?: {\n protocol: string;\n description: string;\n }[];\n \n /** Deployment target environment */\n deploymentTarget?: 'docker' | 'kubernetes' | 'serverless' | 'standalone';\n \n /** Additional MCP-specific context */\n {{additional_mcp_context}}\n}\n\n/**\n * Specialized result for MCP Server development combinations\n */\nexport interface MCPServerDevResult extends CombinedPromptResult {\n /** Generated interface definitions */\n interfaces?: string;\n \n /** Generated MCP tools implementation */\n toolsImplementation?: string;\n \n /** Generated MCP resources implementation */\n resourcesImplementation?: string;\n \n /** Server configuration */\n serverConfiguration?: string;\n \n /** Client integration examples */\n clientExamples?: string;\n \n /** Testing approach */\n testingApproach?: string;\n \n /** Dockerfile and Docker Compose configuration */\n dockerConfiguration?: string;\n \n /** Additional MCP-specific results */\n {{additional_mcp_results}}\n}\n\n/**\n * Implementation of MCPServerDevPromptCombiner\n */\nexport class MCPServerDevPromptCombiner implements PromptCombiner {\n constructor(private promptService: PromptService) {}\n \n /**\n * Combines MCP server development prompts\n * @param promptIds Array of prompt IDs to combine\n * @param context Optional MCP server development context\n * @returns Combined MCP server development result\n */\n async combinePrompts(promptIds: string[], context?: MCPServerDevContext): Promise<MCPServerDevResult> {\n // Implementation would include:\n // 1. Validating the prompts are compatible for MCP development\n // 2. Organizing prompts into logical sections (interfaces, tools, resources, etc.)\n // 3. Resolving dependencies between prompts\n // 4. Applying variables with MCP-specific knowledge\n // 5. Generating a comprehensive server implementation guide\n \n // This is a template structure - in a real implementation, this would contain\n // the actual logic for combining MCP server development prompts\n \n // For now, we'll outline the structure of how the implementation would work\n \n // Step 1: Load and categorize all prompts\n const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n \n const interfacePrompts = prompts.filter(p => p.tags?.includes('interfaces'));\n const toolPrompts = prompts.filter(p => p.tags?.includes('tools'));\n const resourcePrompts = prompts.filter(p => p.tags?.includes('resources'));\n const configPrompts = prompts.filter(p => p.tags?.includes('configuration'));\n const deploymentPrompts = prompts.filter(p => p.tags?.includes('deployment'));\n \n // Step 2: Apply variables to each prompt category\n const variables = context?.variables || {};\n \n // Combine interface definitions\n const interfaces = await this.combineCategory(interfacePrompts, variables);\n \n // Combine tool implementations\n const toolsImplementation = await this.combineCategory(toolPrompts, variables);\n \n // Combine resource implementations\n const resourcesImplementation = await this.combineCategory(resourcePrompts, variables);\n \n // Combine server configuration\n const serverConfiguration = await this.combineCategory(configPrompts, variables);\n \n // Combine deployment configuration\n const dockerConfiguration = await this.combineCategory(deploymentPrompts, variables);\n \n // Step 3: Create combined content with logical sections\n const combinedContent = `\n# MCP Server Implementation for ${variables.project_name || 'Your Project'}\n\n## Overview\n\nThis guide provides a comprehensive implementation plan for an MCP server using ${variables.language || 'TypeScript'} and the MCP SDK version ${context?.sdkVersion || 'latest'}.\n\n## Interface Definitions\n\n${interfaces.content}\n\n## Tools Implementation\n\n${toolsImplementation.content}\n\n## Resources Implementation\n\n${resourcesImplementation.content}\n\n## Server Configuration\n\n${serverConfiguration.content}\n\n## Deployment Configuration\n\n${dockerConfiguration.content}\n\n## Implementation Steps\n\n1. Set up the project structure\n2. Implement the interfaces\n3. Implement the MCP tools\n4. Implement the MCP resources\n5. Configure the server\n6. Set up deployment\n7. Implement tests\n8. Document the server\n `;\n \n // Step 4: Return the comprehensive result\n return {\n content: combinedContent,\n components: [\n ...interfaces.components,\n ...toolsImplementation.components,\n ...resourcesImplementation.components,\n ...serverConfiguration.components,\n ...dockerConfiguration.components\n ],\n appliedVariables: variables,\n interfaces: interfaces.content,\n toolsImplementation: toolsImplementation.content,\n resourcesImplementation: resourcesImplementation.content,\n serverConfiguration: serverConfiguration.content,\n dockerConfiguration: dockerConfiguration.content,\n // Add suggestion for what to implement first\n nextSteps: [\n { action: 'implement_interfaces', description: 'Start by implementing the core interfaces' },\n { action: 'implement_tools', description: 'Implement the MCP tools using the SDK' },\n { action: 'implement_resources', description: 'Implement the MCP resources' },\n { action: 'configure_server', description: 'Set up the server configuration' },\n { action: 'setup_deployment', description: 'Configure Docker and deployment' }\n ]\n };\n }\n \n /**\n * Helper method to combine prompts in a specific category\n * @param prompts Prompts in the category\n * @param variables Variables to apply\n * @returns Combined result for the category\n */\n private async combineCategory(prompts: Prompt[], variables: Record<string, any>): Promise<CombinedPromptResult> {\n // Implementation would combine prompts within a category\n // This is a simplified placeholder\n let content = '';\n const components: {id: string; name: string; contribution: string}[] = [];\n \n for (const prompt of prompts) {\n const result = await this.promptService.applyTemplate(prompt.id, variables);\n content += result.content + '\\n\\n';\n components.push({\n id: prompt.id,\n name: prompt.name,\n contribution: result.content\n });\n }\n \n return {\n content: content.trim(),\n components,\n appliedVariables: variables\n };\n }\n \n /**\n * Gets MCP server development prompt suggestions\n * @param category Optional category to filter by\n * @param context Current MCP context to inform suggestions\n * @returns Array of prompt suggestions for MCP development\n */\n async getPromptSuggestions(category?: string, context?: MCPServerDevContext): Promise<PromptSuggestion[]> {\n // Implementation would suggest prompts based on the current MCP development context\n // For example, if building a tool-heavy server, suggest more tool-related prompts\n // This is a placeholder for demonstration\n \n // In a real implementation, this would query the prompt service for relevant prompts\n // based on the specific MCP development needs\n \n return [\n {\n id: 'consolidated-interfaces-template',\n name: 'Consolidated TypeScript Interfaces',\n relevance: 95,\n compatibleWith: ['mcp-server-tools-implementation', 'docker-containerization-guide'],\n reason: 'Provides the interface foundation for your MCP server'\n },\n {\n id: 'mcp-server-tools-implementation',\n name: 'MCP Server Tools Implementation',\n relevance: 90,\n compatibleWith: ['consolidated-interfaces-template', 'mcp-server-resources-implementation'],\n reason: `${context?.tools?.length || 0} tools need implementation in your server`\n },\n {\n id: 'mcp-server-resources-implementation',\n name: 'MCP Server Resources Implementation',\n relevance: 85,\n compatibleWith: ['consolidated-interfaces-template', 'mcp-server-tools-implementation'],\n reason: `${context?.resources?.length || 0} resources need implementation in your server`\n },\n {\n id: 'docker-containerization-guide',\n name: 'Docker Containerization Guide',\n relevance: context?.deploymentTarget === 'docker' ? 100 : 70,\n compatibleWith: ['consolidated-interfaces-template'],\n reason: 'Provides Docker deployment configuration for your MCP server'\n },\n {\n id: 'development-system-prompt',\n name: 'Development System Prompt',\n relevance: 60,\n compatibleWith: [],\n reason: 'Helps with general development assistance for your MCP server'\n }\n ];\n }\n \n /**\n * Validates if the prompts can be combined for MCP server development\n * @param promptIds Array of prompt IDs to validate\n * @returns Validation result with any issues specific to MCP development\n */\n async validateCombination(promptIds: string[]): Promise<CombinationValidationResult> {\n // Implementation would validate that the prompts make sense for MCP development\n // For example, ensuring there are no conflicting tool definitions\n // This is a placeholder for demonstration\n \n const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n \n // Check for interface prompt\n const hasInterface = prompts.some(p => p.tags?.includes('interfaces'));\n if (!hasInterface) {\n return {\n isValid: false,\n issues: [{\n promptId: '',\n issue: 'Missing interface definition prompt',\n severity: 'error',\n suggestion: 'Add a prompt with interface definitions, such as consolidated-interfaces-template'\n }],\n suggestions: [{\n promptIds: [...promptIds, 'consolidated-interfaces-template'],\n reason: 'Adding interface definitions is essential for MCP server development'\n }]\n };\n }\n \n // In a real implementation, would do more validation specific to MCP development\n \n return {\n isValid: true\n };\n }\n \n /**\n * Creates a saved MCP server development workflow\n * @param name Name for the new workflow\n * @param promptIds Component prompt IDs\n * @param config Configuration for the combination\n * @returns The created MCP workflow\n */\n async saveWorkflow(name: string, promptIds: string[], config: WorkflowConfig): Promise<SavedWorkflow> {\n // Implementation would save an MCP development workflow\n // This is a placeholder for demonstration\n \n return {\n id: `mcp-dev-workflow-${Date.now()}`,\n name,\n promptIds,\n config,\n createdAt: new Date().toISOString(),\n updatedAt: new Date().toISOString(),\n version: 1,\n category: 'mcp-development',\n tags: ['mcp', 'development', 'server']\n };\n }\n \n /**\n * Loads a previously saved MCP server development workflow\n * @param workflowId ID of the saved workflow\n * @returns The loaded MCP workflow\n */\n async loadWorkflow(workflowId: string): Promise<SavedWorkflow> {\n // Implementation would load an MCP development workflow\n // This is a placeholder for demonstration\n \n throw new Error(`Workflow ${workflowId} not found or not implemented yet`);\n }\n}\n\n/**\n * Usage Examples\n * \n * ```typescript\n * // Creating a combiner\n * const promptService = new PromptService(storageAdapter);\n * const mcpCombiner = new MCPServerDevPromptCombiner(promptService);\n * \n * // Getting prompt suggestions for MCP development\n * const suggestions = await mcpCombiner.getPromptSuggestions('tools', {\n * technologies: {\n * language: 'TypeScript',\n * runtime: 'Node.js',\n * frameworks: ['Express']\n * },\n * sdkVersion: '1.6.0',\n * tools: [\n * { name: 'get_document', description: 'Retrieve a document by ID' },\n * { name: 'search_documents', description: 'Search for documents' }\n * ],\n * resources: [\n * { protocol: 'document', description: 'Document resource protocol' }\n * ],\n * deploymentTarget: 'docker'\n * });\n * \n * // Combining prompts for MCP development\n * const result = await mcpCombiner.combinePrompts([\n * 'consolidated-interfaces-template',\n * 'mcp-server-tools-implementation',\n * 'docker-containerization-guide'\n * ], {\n * variables: {\n * project_name: 'Document Management MCP Server',\n * language: 'TypeScript',\n * primary_entity: 'Document',\n * node_version: '20'\n * },\n * technologies: {\n * language: 'TypeScript',\n * runtime: 'Node.js',\n * frameworks: ['Express']\n * },\n * sdkVersion: '1.6.0',\n * deploymentTarget: 'docker'\n * });\n * \n * // Using the specialized result properties\n * console.log(result.interfaces); // Get just the interface definitions\n * console.log(result.toolsImplementation); // Get just the tools implementation\n * console.log(result.dockerConfiguration); // Get just the Docker configuration\n * ```\n */\n\n// ============================\n// Extension Guidelines\n// ============================\n\n/**\n * When extending MCPServerDevPromptCombiner, consider:\n * \n * 1. Adding support for specific MCP server types (e.g., FileSystem, GitHub, Memory)\n * 2. Enhancing the context with more MCP-specific properties\n * 3. Improving suggestion logic based on the development context\n * 4. Adding template validation specific to MCP compatibility\n * 5. {{additional_extension_guidelines}}\n */",
"variables": [
"project_name",
"additional_mcp_context",
"additional_mcp_results",
"additional_extension_guidelines"
],
"metadata": {
"source": "/home/sparrow/projects/mcp-prompts/prompts/mcp-server-dev-prompt-combiner.json",
"imported": true
}
}
```
--------------------------------------------------------------------------------
/data/prompts/templates/mcp-server-dev-prompt-combiner.json:
--------------------------------------------------------------------------------
```json
{
"id": "mcp-server-dev-prompt-combiner",
"name": "MCP Server Development Prompt Combiner",
"description": "A specialized prompt combiner for MCP server development that integrates interface definitions, implementation patterns, and best practices",
"content": "/**\n * MCPServerDevPromptCombiner for {{project_name}}\n * \n * A specialized implementation of the PromptCombiner interface\n * focused on combining prompts for MCP server development workflows.\n */\n\nimport { PromptCombiner, CombinerContext, CombinedPromptResult, PromptSuggestion, CombinationValidationResult, WorkflowConfig, SavedWorkflow } from './prompt-combiner-interface';\nimport { PromptService } from '../services/prompt-service';\nimport { Prompt } from '../core/types';\n\n/**\n * MCP Server Development specific context\n */\nexport interface MCPServerDevContext extends CombinerContext {\n /** Server configuration */\n serverConfig?: {\n name: string;\n version: string;\n capabilities: string[];\n };\n \n /** Core technologies being used */\n technologies: {\n language: string;\n runtime: string;\n frameworks: string[];\n };\n \n /** MCP Server SDK version */\n sdkVersion: string;\n \n /** Tools to be implemented */\n tools?: {\n name: string;\n description: string;\n parameters?: Record<string, any>;\n }[];\n \n /** Resources to be implemented */\n resources?: {\n protocol: string;\n description: string;\n }[];\n \n /** Deployment target environment */\n deploymentTarget?: 'docker' | 'kubernetes' | 'serverless' | 'standalone';\n \n /** Additional MCP-specific context */\n {{additional_mcp_context}}\n}\n\n/**\n * Specialized result for MCP Server development combinations\n */\nexport interface MCPServerDevResult extends CombinedPromptResult {\n /** Generated interface definitions */\n interfaces?: string;\n \n /** Generated MCP tools implementation */\n toolsImplementation?: string;\n \n /** Generated MCP resources implementation */\n resourcesImplementation?: string;\n \n /** Server configuration */\n serverConfiguration?: string;\n \n /** Client integration examples */\n clientExamples?: string;\n \n /** Testing approach */\n testingApproach?: string;\n \n /** Dockerfile and Docker Compose configuration */\n dockerConfiguration?: string;\n \n /** Additional MCP-specific results */\n {{additional_mcp_results}}\n}\n\n/**\n * Implementation of MCPServerDevPromptCombiner\n */\nexport class MCPServerDevPromptCombiner implements PromptCombiner {\n constructor(private promptService: PromptService) {}\n \n /**\n * Combines MCP server development prompts\n * @param promptIds Array of prompt IDs to combine\n * @param context Optional MCP server development context\n * @returns Combined MCP server development result\n */\n async combinePrompts(promptIds: string[], context?: MCPServerDevContext): Promise<MCPServerDevResult> {\n // Implementation would include:\n // 1. Validating the prompts are compatible for MCP development\n // 2. Organizing prompts into logical sections (interfaces, tools, resources, etc.)\n // 3. Resolving dependencies between prompts\n // 4. Applying variables with MCP-specific knowledge\n // 5. Generating a comprehensive server implementation guide\n \n // This is a template structure - in a real implementation, this would contain\n // the actual logic for combining MCP server development prompts\n \n // For now, we'll outline the structure of how the implementation would work\n \n // Step 1: Load and categorize all prompts\n const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n \n const interfacePrompts = prompts.filter(p => p.tags?.includes('interfaces'));\n const toolPrompts = prompts.filter(p => p.tags?.includes('tools'));\n const resourcePrompts = prompts.filter(p => p.tags?.includes('resources'));\n const configPrompts = prompts.filter(p => p.tags?.includes('configuration'));\n const deploymentPrompts = prompts.filter(p => p.tags?.includes('deployment'));\n \n // Step 2: Apply variables to each prompt category\n const variables = context?.variables || {};\n \n // Combine interface definitions\n const interfaces = await this.combineCategory(interfacePrompts, variables);\n \n // Combine tool implementations\n const toolsImplementation = await this.combineCategory(toolPrompts, variables);\n \n // Combine resource implementations\n const resourcesImplementation = await this.combineCategory(resourcePrompts, variables);\n \n // Combine server configuration\n const serverConfiguration = await this.combineCategory(configPrompts, variables);\n \n // Combine deployment configuration\n const dockerConfiguration = await this.combineCategory(deploymentPrompts, variables);\n \n // Step 3: Create combined content with logical sections\n const combinedContent = `\n# MCP Server Implementation for ${variables.project_name || 'Your Project'}\n\n## Overview\n\nThis guide provides a comprehensive implementation plan for an MCP server using ${variables.language || 'TypeScript'} and the MCP SDK version ${context?.sdkVersion || 'latest'}.\n\n## Interface Definitions\n\n${interfaces.content}\n\n## Tools Implementation\n\n${toolsImplementation.content}\n\n## Resources Implementation\n\n${resourcesImplementation.content}\n\n## Server Configuration\n\n${serverConfiguration.content}\n\n## Deployment Configuration\n\n${dockerConfiguration.content}\n\n## Implementation Steps\n\n1. Set up the project structure\n2. Implement the interfaces\n3. Implement the MCP tools\n4. Implement the MCP resources\n5. Configure the server\n6. Set up deployment\n7. Implement tests\n8. Document the server\n `;\n \n // Step 4: Return the comprehensive result\n return {\n content: combinedContent,\n components: [\n ...interfaces.components,\n ...toolsImplementation.components,\n ...resourcesImplementation.components,\n ...serverConfiguration.components,\n ...dockerConfiguration.components\n ],\n appliedVariables: variables,\n interfaces: interfaces.content,\n toolsImplementation: toolsImplementation.content,\n resourcesImplementation: resourcesImplementation.content,\n serverConfiguration: serverConfiguration.content,\n dockerConfiguration: dockerConfiguration.content,\n // Add suggestion for what to implement first\n nextSteps: [\n { action: 'implement_interfaces', description: 'Start by implementing the core interfaces' },\n { action: 'implement_tools', description: 'Implement the MCP tools using the SDK' },\n { action: 'implement_resources', description: 'Implement the MCP resources' },\n { action: 'configure_server', description: 'Set up the server configuration' },\n { action: 'setup_deployment', description: 'Configure Docker and deployment' }\n ]\n };\n }\n \n /**\n * Helper method to combine prompts in a specific category\n * @param prompts Prompts in the category\n * @param variables Variables to apply\n * @returns Combined result for the category\n */\n private async combineCategory(prompts: Prompt[], variables: Record<string, any>): Promise<CombinedPromptResult> {\n // Implementation would combine prompts within a category\n // This is a simplified placeholder\n let content = '';\n const components: {id: string; name: string; contribution: string}[] = [];\n \n for (const prompt of prompts) {\n const result = await this.promptService.applyTemplate(prompt.id, variables);\n content += result.content + '\\n\\n';\n components.push({\n id: prompt.id,\n name: prompt.name,\n contribution: result.content\n });\n }\n \n return {\n content: content.trim(),\n components,\n appliedVariables: variables\n };\n }\n \n /**\n * Gets MCP server development prompt suggestions\n * @param category Optional category to filter by\n * @param context Current MCP context to inform suggestions\n * @returns Array of prompt suggestions for MCP development\n */\n async getPromptSuggestions(category?: string, context?: MCPServerDevContext): Promise<PromptSuggestion[]> {\n // Implementation would suggest prompts based on the current MCP development context\n // For example, if building a tool-heavy server, suggest more tool-related prompts\n // This is a placeholder for demonstration\n \n // In a real implementation, this would query the prompt service for relevant prompts\n // based on the specific MCP development needs\n \n return [\n {\n id: 'consolidated-interfaces-template',\n name: 'Consolidated TypeScript Interfaces',\n relevance: 95,\n compatibleWith: ['mcp-server-tools-implementation', 'docker-containerization-guide'],\n reason: 'Provides the interface foundation for your MCP server'\n },\n {\n id: 'mcp-server-tools-implementation',\n name: 'MCP Server Tools Implementation',\n relevance: 90,\n compatibleWith: ['consolidated-interfaces-template', 'mcp-server-resources-implementation'],\n reason: `${context?.tools?.length || 0} tools need implementation in your server`\n },\n {\n id: 'mcp-server-resources-implementation',\n name: 'MCP Server Resources Implementation',\n relevance: 85,\n compatibleWith: ['consolidated-interfaces-template', 'mcp-server-tools-implementation'],\n reason: `${context?.resources?.length || 0} resources need implementation in your server`\n },\n {\n id: 'docker-containerization-guide',\n name: 'Docker Containerization Guide',\n relevance: context?.deploymentTarget === 'docker' ? 100 : 70,\n compatibleWith: ['consolidated-interfaces-template'],\n reason: 'Provides Docker deployment configuration for your MCP server'\n },\n {\n id: 'development-system-prompt',\n name: 'Development System Prompt',\n relevance: 60,\n compatibleWith: [],\n reason: 'Helps with general development assistance for your MCP server'\n }\n ];\n }\n \n /**\n * Validates if the prompts can be combined for MCP server development\n * @param promptIds Array of prompt IDs to validate\n * @returns Validation result with any issues specific to MCP development\n */\n async validateCombination(promptIds: string[]): Promise<CombinationValidationResult> {\n // Implementation would validate that the prompts make sense for MCP development\n // For example, ensuring there are no conflicting tool definitions\n // This is a placeholder for demonstration\n \n const prompts = await Promise.all(promptIds.map(id => this.promptService.getPrompt(id)));\n \n // Check for interface prompt\n const hasInterface = prompts.some(p => p.tags?.includes('interfaces'));\n if (!hasInterface) {\n return {\n isValid: false,\n issues: [{\n promptId: '',\n issue: 'Missing interface definition prompt',\n severity: 'error',\n suggestion: 'Add a prompt with interface definitions, such as consolidated-interfaces-template'\n }],\n suggestions: [{\n promptIds: [...promptIds, 'consolidated-interfaces-template'],\n reason: 'Adding interface definitions is essential for MCP server development'\n }]\n };\n }\n \n // In a real implementation, would do more validation specific to MCP development\n \n return {\n isValid: true\n };\n }\n \n /**\n * Creates a saved MCP server development workflow\n * @param name Name for the new workflow\n * @param promptIds Component prompt IDs\n * @param config Configuration for the combination\n * @returns The created MCP workflow\n */\n async saveWorkflow(name: string, promptIds: string[], config: WorkflowConfig): Promise<SavedWorkflow> {\n // Implementation would save an MCP development workflow\n // This is a placeholder for demonstration\n \n return {\n id: `mcp-dev-workflow-${Date.now()}`,\n name,\n promptIds,\n config,\n createdAt: new Date().toISOString(),\n updatedAt: new Date().toISOString(),\n version: 1,\n category: 'mcp-development',\n tags: ['mcp', 'development', 'server']\n };\n }\n \n /**\n * Loads a previously saved MCP server development workflow\n * @param workflowId ID of the saved workflow\n * @returns The loaded MCP workflow\n */\n async loadWorkflow(workflowId: string): Promise<SavedWorkflow> {\n // Implementation would load an MCP development workflow\n // This is a placeholder for demonstration\n \n throw new Error(`Workflow ${workflowId} not found or not implemented yet`);\n }\n}\n\n/**\n * Usage Examples\n * \n * ```typescript\n * // Creating a combiner\n * const promptService = new PromptService(storageAdapter);\n * const mcpCombiner = new MCPServerDevPromptCombiner(promptService);\n * \n * // Getting prompt suggestions for MCP development\n * const suggestions = await mcpCombiner.getPromptSuggestions('tools', {\n * technologies: {\n * language: 'TypeScript',\n * runtime: 'Node.js',\n * frameworks: ['Express']\n * },\n * sdkVersion: '1.6.0',\n * tools: [\n * { name: 'get_document', description: 'Retrieve a document by ID' },\n * { name: 'search_documents', description: 'Search for documents' }\n * ],\n * resources: [\n * { protocol: 'document', description: 'Document resource protocol' }\n * ],\n * deploymentTarget: 'docker'\n * });\n * \n * // Combining prompts for MCP development\n * const result = await mcpCombiner.combinePrompts([\n * 'consolidated-interfaces-template',\n * 'mcp-server-tools-implementation',\n * 'docker-containerization-guide'\n * ], {\n * variables: {\n * project_name: 'Document Management MCP Server',\n * language: 'TypeScript',\n * primary_entity: 'Document',\n * node_version: '20'\n * },\n * technologies: {\n * language: 'TypeScript',\n * runtime: 'Node.js',\n * frameworks: ['Express']\n * },\n * sdkVersion: '1.6.0',\n * deploymentTarget: 'docker'\n * });\n * \n * // Using the specialized result properties\n * console.log(result.interfaces); // Get just the interface definitions\n * console.log(result.toolsImplementation); // Get just the tools implementation\n * console.log(result.dockerConfiguration); // Get just the Docker configuration\n * ```\n */\n\n// ============================\n// Extension Guidelines\n// ============================\n\n/**\n * When extending MCPServerDevPromptCombiner, consider:\n * \n * 1. Adding support for specific MCP server types (e.g., FileSystem, GitHub, Memory)\n * 2. Enhancing the context with more MCP-specific properties\n * 3. Improving suggestion logic based on the development context\n * 4. Adding template validation specific to MCP compatibility\n * 5. {{additional_extension_guidelines}}\n */",
"isTemplate": true,
"variables": [
"project_name",
"additional_mcp_context",
"additional_mcp_results",
"additional_extension_guidelines"
],
"tags": [
"development",
"mcp",
"server",
"prompt-engineering",
"integration"
],
"category": "development",
"createdAt": "2024-08-08T17:15:00.000Z",
"updatedAt": "2024-08-08T17:15:00.000Z",
"version": 1
}
```