This is page 7 of 24. Use http://codebase.md/sparesparrow/mcp-project-orchestrator?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .cursorrules
├── .env.example
├── .github
│ └── workflows
│ ├── build.yml
│ ├── ci-cd.yml
│ ├── ci.yml
│ ├── deploy.yml
│ ├── ecosystem-monitor.yml
│ ├── fan-out-orchestrator.yml
│ └── release.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AUTOMOTIVE_CAMERA_SYSTEM_SUMMARY.md
├── automotive-camera-system
│ ├── docs
│ │ └── IMPLEMENTACE_CS.md
│ └── README.md
├── AWS_MCP_IMPLEMENTATION_SUMMARY.md
├── AWS_MCP_QUICKSTART.md
├── AWS_SIP_TRUNK_DEPLOYMENT_COMPLETE.md
├── aws-sip-trunk
│ ├── .gitignore
│ ├── config
│ │ ├── extensions.conf.j2
│ │ └── pjsip.conf.j2
│ ├── DEPLOYMENT_SUMMARY.md
│ ├── docs
│ │ ├── DEPLOYMENT.md
│ │ └── TROUBLESHOOTING.md
│ ├── PROJECT_INDEX.md
│ ├── pyproject.toml
│ ├── QUICKSTART.md
│ ├── README.md
│ ├── scripts
│ │ ├── deploy-asterisk-aws.sh
│ │ └── user-data.sh
│ ├── terraform
│ │ ├── ec2.tf
│ │ ├── main.tf
│ │ ├── monitoring.tf
│ │ ├── networking.tf
│ │ ├── outputs.tf
│ │ ├── storage.tf
│ │ ├── terraform.tfvars.example
│ │ └── variables.tf
│ ├── tests
│ │ └── test_sip_connectivity.py
│ └── VERIFICATION_CHECKLIST.md
├── CLAUDE.md
├── component_templates.json
├── conanfile.py
├── config
│ ├── default.json
│ └── project_orchestration.json
├── Containerfile
├── cursor-templates
│ └── openssl
│ ├── linux-dev.mdc.jinja2
│ └── shared.mdc.jinja2
├── data
│ └── prompts
│ └── templates
│ ├── advanced-multi-server-template.json
│ ├── analysis-assistant.json
│ ├── analyze-mermaid-diagram.json
│ ├── architecture-design-assistant.json
│ ├── code-diagram-documentation-creator.json
│ ├── code-refactoring-assistant.json
│ ├── code-review-assistant.json
│ ├── collaborative-development.json
│ ├── consolidated-interfaces-template.json
│ ├── could-you-interpret-the-assumed-applicat.json
│ ├── data-analysis-template.json
│ ├── database-query-assistant.json
│ ├── debugging-assistant.json
│ ├── development-system-prompt-zcna0.json
│ ├── development-system-prompt.json
│ ├── development-workflow.json
│ ├── docker-compose-prompt-combiner.json
│ ├── docker-containerization-guide.json
│ ├── docker-mcp-servers-orchestration.json
│ ├── foresight-assistant.json
│ ├── generate-different-types-of-questions-ab.json
│ ├── generate-mermaid-diagram.json
│ ├── image-1-describe-the-icon-in-one-sen.json
│ ├── initialize-project-setup-for-a-new-micro.json
│ ├── install-dependencies-build-run-test.json
│ ├── mcp-code-generator.json
│ ├── mcp-integration-assistant.json
│ ├── mcp-resources-explorer.json
│ ├── mcp-resources-integration.json
│ ├── mcp-server-configurator.json
│ ├── mcp-server-dev-prompt-combiner.json
│ ├── mcp-server-integration-template.json
│ ├── mcp-template-system.json
│ ├── mermaid-analysis-expert.json
│ ├── mermaid-class-diagram-generator.json
│ ├── mermaid-diagram-generator.json
│ ├── mermaid-diagram-modifier.json
│ ├── modify-mermaid-diagram.json
│ ├── monorepo-migration-guide.json
│ ├── multi-resource-context.json
│ ├── project-analysis-assistant.json
│ ├── prompt-combiner-interface.json
│ ├── prompt-templates.json
│ ├── repository-explorer.json
│ ├── research-assistant.json
│ ├── sequential-data-analysis.json
│ ├── solid-code-analysis-visualizer.json
│ ├── task-list-helper-8ithy.json
│ ├── template-based-mcp-integration.json
│ ├── templates.json
│ ├── test-prompt.json
│ └── you-are-limited-to-respond-yes-or-no-onl.json
├── docs
│ ├── AWS_MCP.md
│ ├── AWS.md
│ ├── CONAN.md
│ └── integration.md
├── elevenlabs-agents
│ ├── agent-prompts.json
│ └── README.md
├── IMPLEMENTATION_STATUS.md
├── integration_plan.md
├── LICENSE
├── MANIFEST.in
├── mcp-project-orchestrator
│ └── openssl
│ ├── .github
│ │ └── workflows
│ │ └── validate-cursor-config.yml
│ ├── conanfile.py
│ ├── CURSOR_DEPLOYMENT_POLISH.md
│ ├── cursor-rules
│ │ ├── mcp.json.jinja2
│ │ ├── prompts
│ │ │ ├── fips-compliance.md.jinja2
│ │ │ ├── openssl-coding-standards.md.jinja2
│ │ │ └── pr-review.md.jinja2
│ │ └── rules
│ │ ├── ci-linux.mdc.jinja2
│ │ ├── linux-dev.mdc.jinja2
│ │ ├── macos-dev.mdc.jinja2
│ │ ├── shared.mdc.jinja2
│ │ └── windows-dev.mdc.jinja2
│ ├── docs
│ │ └── cursor-configuration-management.md
│ ├── examples
│ │ └── example-workspace
│ │ ├── .cursor
│ │ │ ├── mcp.json
│ │ │ └── rules
│ │ │ ├── linux-dev.mdc
│ │ │ └── shared.mdc
│ │ ├── .gitignore
│ │ ├── CMakeLists.txt
│ │ ├── conanfile.py
│ │ ├── profiles
│ │ │ ├── linux-gcc-debug.profile
│ │ │ └── linux-gcc-release.profile
│ │ ├── README.md
│ │ └── src
│ │ ├── crypto_utils.cpp
│ │ ├── crypto_utils.h
│ │ └── main.cpp
│ ├── IMPLEMENTATION_SUMMARY.md
│ ├── mcp_orchestrator
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── conan_integration.py
│ │ ├── cursor_config.py
│ │ ├── cursor_deployer.py
│ │ ├── deploy_cursor.py
│ │ ├── env_config.py
│ │ ├── platform_detector.py
│ │ └── yaml_validator.py
│ ├── openssl-cursor-example-workspace-20251014_121133.zip
│ ├── pyproject.toml
│ ├── README.md
│ ├── requirements.txt
│ ├── scripts
│ │ └── create_example_workspace.py
│ ├── setup.py
│ ├── test_deployment.py
│ └── tests
│ ├── __init__.py
│ ├── test_cursor_deployer.py
│ └── test_template_validation.py
├── printcast-agent
│ ├── .env.example
│ ├── config
│ │ └── asterisk
│ │ └── extensions.conf
│ ├── Containerfile
│ ├── docker-compose.yml
│ ├── pyproject.toml
│ ├── README.md
│ ├── scripts
│ │ └── docker-entrypoint.sh
│ ├── src
│ │ ├── integrations
│ │ │ ├── __init__.py
│ │ │ ├── asterisk.py
│ │ │ ├── content.py
│ │ │ ├── delivery.py
│ │ │ ├── elevenlabs.py
│ │ │ └── printing.py
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ └── orchestration
│ │ ├── __init__.py
│ │ └── workflow.py
│ └── tests
│ └── test_mcp_server.py
├── project_orchestration.json
├── project_templates.json
├── pyproject.toml
├── README.md
├── REFACTORING_COMPLETED.md
├── REFACTORING_RECOMMENDATIONS.md
├── requirements.txt
├── scripts
│ ├── archive
│ │ ├── init_claude_test.sh
│ │ ├── init_postgres.sh
│ │ ├── start_mcp_servers.sh
│ │ └── test_claude_desktop.sh
│ ├── consolidate_mermaid.py
│ ├── consolidate_prompts.py
│ ├── consolidate_resources.py
│ ├── consolidate_templates.py
│ ├── INSTRUCTIONS.md
│ ├── README.md
│ ├── setup_aws_mcp.sh
│ ├── setup_mcp.sh
│ ├── setup_orchestrator.sh
│ ├── setup_project.py
│ └── test_mcp.sh
├── src
│ └── mcp_project_orchestrator
│ ├── __init__.py
│ ├── __main__.py
│ ├── aws_mcp.py
│ ├── cli
│ │ └── __init__.py
│ ├── cli.py
│ ├── commands
│ │ └── openssl_cli.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── config.py
│ │ ├── exceptions.py
│ │ ├── fastmcp.py
│ │ ├── logging.py
│ │ └── managers.py
│ ├── cursor_deployer.py
│ ├── ecosystem_monitor.py
│ ├── fan_out_orchestrator.py
│ ├── fastmcp.py
│ ├── mcp-py
│ │ ├── AggregateVersions.py
│ │ ├── CustomBashTool.py
│ │ ├── FileAnnotator.py
│ │ ├── mcp-client.py
│ │ ├── mcp-server.py
│ │ ├── MermaidDiagramGenerator.py
│ │ ├── NamingAgent.py
│ │ └── solid-analyzer-agent.py
│ ├── mermaid
│ │ ├── __init__.py
│ │ ├── generator.py
│ │ ├── mermaid_orchestrator.py
│ │ ├── renderer.py
│ │ ├── templates
│ │ │ ├── AbstractFactory-diagram.json
│ │ │ ├── Adapter-diagram.json
│ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ ├── Builder-diagram.json
│ │ │ ├── Chain-diagram.json
│ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ ├── Command-diagram.json
│ │ │ ├── Decorator-diagram.json
│ │ │ ├── Facade-diagram.json
│ │ │ ├── Factory-diagram.json
│ │ │ ├── flowchart
│ │ │ │ ├── AbstractFactory-diagram.json
│ │ │ │ ├── Adapter-diagram.json
│ │ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ │ ├── Builder-diagram.json
│ │ │ │ ├── Chain-diagram.json
│ │ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ │ ├── Command-diagram.json
│ │ │ │ ├── Decorator-diagram.json
│ │ │ │ ├── Facade-diagram.json
│ │ │ │ ├── Factory-diagram.json
│ │ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ │ ├── generated_diagram.json
│ │ │ │ ├── integration.json
│ │ │ │ ├── Iterator-diagram.json
│ │ │ │ ├── Mediator-diagram.json
│ │ │ │ ├── Memento-diagram.json
│ │ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ │ ├── Observer-diagram.json
│ │ │ │ ├── Prototype-diagram.json
│ │ │ │ ├── Proxy-diagram.json
│ │ │ │ ├── README.json
│ │ │ │ ├── Singleton-diagram.json
│ │ │ │ ├── State-diagram.json
│ │ │ │ ├── Strategy-diagram.json
│ │ │ │ ├── TemplateMethod-diagram.json
│ │ │ │ ├── theme_dark.json
│ │ │ │ ├── theme_default.json
│ │ │ │ ├── theme_pastel.json
│ │ │ │ ├── theme_vibrant.json
│ │ │ │ └── Visitor-diagram.json
│ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ ├── generated_diagram.json
│ │ │ ├── index.json
│ │ │ ├── integration.json
│ │ │ ├── Iterator-diagram.json
│ │ │ ├── Mediator-diagram.json
│ │ │ ├── Memento-diagram.json
│ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ ├── Observer-diagram.json
│ │ │ ├── Prototype-diagram.json
│ │ │ ├── Proxy-diagram.json
│ │ │ ├── README.json
│ │ │ ├── Singleton-diagram.json
│ │ │ ├── State-diagram.json
│ │ │ ├── Strategy-diagram.json
│ │ │ ├── TemplateMethod-diagram.json
│ │ │ ├── theme_dark.json
│ │ │ ├── theme_default.json
│ │ │ ├── theme_pastel.json
│ │ │ ├── theme_vibrant.json
│ │ │ └── Visitor-diagram.json
│ │ └── types.py
│ ├── project_orchestration.py
│ ├── prompt_manager
│ │ ├── __init__.py
│ │ ├── loader.py
│ │ ├── manager.py
│ │ └── template.py
│ ├── prompts
│ │ ├── __dirname.json
│ │ ├── __image_1___describe_the_icon_in_one_sen___.json
│ │ ├── __init__.py
│ │ ├── __type.json
│ │ ├── _.json
│ │ ├── _DEFAULT_OPEN_DELIMITER.json
│ │ ├── _emojiRegex.json
│ │ ├── _UUID_CHARS.json
│ │ ├── a.json
│ │ ├── A.json
│ │ ├── Aa.json
│ │ ├── aAnnotationPadding.json
│ │ ├── absoluteThresholdGroup.json
│ │ ├── add.json
│ │ ├── ADDITIONAL_PROPERTY_FLAG.json
│ │ ├── Advanced_Multi-Server_Integration_Template.json
│ │ ├── allOptionsList.json
│ │ ├── analysis
│ │ │ ├── Data_Analysis_Template.json
│ │ │ ├── index.json
│ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ ├── Sequential_Data_Analysis_with_MCP_Integration.json
│ │ │ └── SOLID_Code_Analysis_Visualizer.json
│ │ ├── Analysis_Assistant.json
│ │ ├── Analyze_Mermaid_Diagram.json
│ │ ├── ANDROID_EVERGREEN_FIRST.json
│ │ ├── ANSI_ESCAPE_BELL.json
│ │ ├── architecture
│ │ │ ├── index.json
│ │ │ └── PromptCombiner_Interface.json
│ │ ├── Architecture_Design_Assistant.json
│ │ ├── argsTag.json
│ │ ├── ARROW.json
│ │ ├── assistant
│ │ │ ├── Analysis_Assistant.json
│ │ │ ├── Architecture_Design_Assistant.json
│ │ │ ├── Code_Refactoring_Assistant.json
│ │ │ ├── Code_Review_Assistant.json
│ │ │ ├── Database_Query_Assistant.json
│ │ │ ├── Debugging_Assistant.json
│ │ │ ├── Foresight_Assistant.json
│ │ │ ├── index.json
│ │ │ ├── MCP_Integration_Assistant.json
│ │ │ ├── Project_Analysis_Assistant.json
│ │ │ └── Research_Assistant.json
│ │ ├── astralRange.json
│ │ ├── at.json
│ │ ├── authorization_endpoint.json
│ │ ├── b.json
│ │ ├── BABELIGNORE_FILENAME.json
│ │ ├── BACKSLASH.json
│ │ ├── backupId.json
│ │ ├── BANG.json
│ │ ├── BASE64_MAP.json
│ │ ├── baseFlags.json
│ │ ├── Basic_Template.json
│ │ ├── bgModel.json
│ │ ├── bignum.json
│ │ ├── blockKeywordsStr.json
│ │ ├── BOMChar.json
│ │ ├── boundary.json
│ │ ├── brackets.json
│ │ ├── BROWSER_VAR.json
│ │ ├── bt.json
│ │ ├── BUILTIN.json
│ │ ├── BULLET.json
│ │ ├── c.json
│ │ ├── C.json
│ │ ├── CACHE_VERSION.json
│ │ ├── cacheControl.json
│ │ ├── cacheProp.json
│ │ ├── category.py
│ │ ├── CHANGE_EVENT.json
│ │ ├── CHAR_CODE_0.json
│ │ ├── chars.json
│ │ ├── cjsPattern.json
│ │ ├── cKeywords.json
│ │ ├── classForPercent.json
│ │ ├── classStr.json
│ │ ├── clientFirstMessageBare.json
│ │ ├── cmd.json
│ │ ├── Code_Diagram_Documentation_Creator.json
│ │ ├── Code_Refactoring_Assistant.json
│ │ ├── Code_Review_Assistant.json
│ │ ├── code.json
│ │ ├── coding
│ │ │ ├── __dirname.json
│ │ │ ├── _.json
│ │ │ ├── _DEFAULT_OPEN_DELIMITER.json
│ │ │ ├── _emojiRegex.json
│ │ │ ├── _UUID_CHARS.json
│ │ │ ├── a.json
│ │ │ ├── A.json
│ │ │ ├── aAnnotationPadding.json
│ │ │ ├── absoluteThresholdGroup.json
│ │ │ ├── add.json
│ │ │ ├── ADDITIONAL_PROPERTY_FLAG.json
│ │ │ ├── allOptionsList.json
│ │ │ ├── ANDROID_EVERGREEN_FIRST.json
│ │ │ ├── ANSI_ESCAPE_BELL.json
│ │ │ ├── argsTag.json
│ │ │ ├── ARROW.json
│ │ │ ├── astralRange.json
│ │ │ ├── at.json
│ │ │ ├── authorization_endpoint.json
│ │ │ ├── BABELIGNORE_FILENAME.json
│ │ │ ├── BACKSLASH.json
│ │ │ ├── BANG.json
│ │ │ ├── BASE64_MAP.json
│ │ │ ├── baseFlags.json
│ │ │ ├── bgModel.json
│ │ │ ├── bignum.json
│ │ │ ├── blockKeywordsStr.json
│ │ │ ├── BOMChar.json
│ │ │ ├── boundary.json
│ │ │ ├── brackets.json
│ │ │ ├── BROWSER_VAR.json
│ │ │ ├── bt.json
│ │ │ ├── BUILTIN.json
│ │ │ ├── BULLET.json
│ │ │ ├── c.json
│ │ │ ├── C.json
│ │ │ ├── CACHE_VERSION.json
│ │ │ ├── cacheControl.json
│ │ │ ├── cacheProp.json
│ │ │ ├── CHANGE_EVENT.json
│ │ │ ├── CHAR_CODE_0.json
│ │ │ ├── chars.json
│ │ │ ├── cjsPattern.json
│ │ │ ├── cKeywords.json
│ │ │ ├── classForPercent.json
│ │ │ ├── classStr.json
│ │ │ ├── clientFirstMessageBare.json
│ │ │ ├── cmd.json
│ │ │ ├── code.json
│ │ │ ├── colorCode.json
│ │ │ ├── comma.json
│ │ │ ├── command.json
│ │ │ ├── configJsContent.json
│ │ │ ├── connectionString.json
│ │ │ ├── cssClassStr.json
│ │ │ ├── currentBoundaryParse.json
│ │ │ ├── d.json
│ │ │ ├── data.json
│ │ │ ├── DATA.json
│ │ │ ├── dataWebpackPrefix.json
│ │ │ ├── debug.json
│ │ │ ├── decodeStateVectorV2.json
│ │ │ ├── DEFAULT_DELIMITER.json
│ │ │ ├── DEFAULT_DIAGRAM_DIRECTION.json
│ │ │ ├── DEFAULT_JS_PATTERN.json
│ │ │ ├── DEFAULT_LOG_TARGET.json
│ │ │ ├── defaultHelpOpt.json
│ │ │ ├── defaultHost.json
│ │ │ ├── deferY18nLookupPrefix.json
│ │ │ ├── DELIM.json
│ │ │ ├── delimiter.json
│ │ │ ├── DEPRECATION.json
│ │ │ ├── destMain.json
│ │ │ ├── DID_NOT_THROW.json
│ │ │ ├── direction.json
│ │ │ ├── displayValue.json
│ │ │ ├── DNS.json
│ │ │ ├── doc.json
│ │ │ ├── DOCUMENTATION_NOTE.json
│ │ │ ├── DOT.json
│ │ │ ├── DOTS.json
│ │ │ ├── dummyCompoundId.json
│ │ │ ├── e.json
│ │ │ ├── E.json
│ │ │ ├── earlyHintsLink.json
│ │ │ ├── elide.json
│ │ │ ├── EMPTY.json
│ │ │ ├── end.json
│ │ │ ├── endpoint.json
│ │ │ ├── environment.json
│ │ │ ├── ERR_CODE.json
│ │ │ ├── errMessage.json
│ │ │ ├── errMsg.json
│ │ │ ├── ERROR_MESSAGE.json
│ │ │ ├── error.json
│ │ │ ├── ERROR.json
│ │ │ ├── ERRORCLASS.json
│ │ │ ├── errorMessage.json
│ │ │ ├── es6Default.json
│ │ │ ├── ESC.json
│ │ │ ├── Escapable.json
│ │ │ ├── escapedChar.json
│ │ │ ├── escapeFuncStr.json
│ │ │ ├── escSlash.json
│ │ │ ├── ev.json
│ │ │ ├── event.json
│ │ │ ├── execaMessage.json
│ │ │ ├── EXPECTED_LABEL.json
│ │ │ ├── expected.json
│ │ │ ├── expectedString.json
│ │ │ ├── expression1.json
│ │ │ ├── EXTENSION.json
│ │ │ ├── f.json
│ │ │ ├── FAIL_TEXT.json
│ │ │ ├── FILE_BROWSER_FACTORY.json
│ │ │ ├── fill.json
│ │ │ ├── findPackageJson.json
│ │ │ ├── fnKey.json
│ │ │ ├── FORMAT.json
│ │ │ ├── formatted.json
│ │ │ ├── from.json
│ │ │ ├── fullpaths.json
│ │ │ ├── FUNC_ERROR_TEXT.json
│ │ │ ├── GenStateSuspendedStart.json
│ │ │ ├── GENSYNC_EXPECTED_START.json
│ │ │ ├── gutter.json
│ │ │ ├── h.json
│ │ │ ├── handlerFuncName.json
│ │ │ ├── HASH_UNDEFINED.json
│ │ │ ├── head.json
│ │ │ ├── helpMessage.json
│ │ │ ├── HINT_ARG.json
│ │ │ ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│ │ │ ├── i.json
│ │ │ ├── id.json
│ │ │ ├── identifier.json
│ │ │ ├── Identifier.json
│ │ │ ├── INDENT.json
│ │ │ ├── indentation.json
│ │ │ ├── index.json
│ │ │ ├── INDIRECTION_FRAGMENT.json
│ │ │ ├── input.json
│ │ │ ├── inputText.json
│ │ │ ├── insert.json
│ │ │ ├── insertPromptQuery.json
│ │ │ ├── INSPECT_MAX_BYTES.json
│ │ │ ├── intToCharMap.json
│ │ │ ├── IS_ITERABLE_SENTINEL.json
│ │ │ ├── IS_KEYED_SENTINEL.json
│ │ │ ├── isConfigType.json
│ │ │ ├── isoSentinel.json
│ │ │ ├── isSourceNode.json
│ │ │ ├── j.json
│ │ │ ├── JAKE_CMD.json
│ │ │ ├── JEST_GLOBAL_NAME.json
│ │ │ ├── JEST_GLOBALS_MODULE_NAME.json
│ │ │ ├── JSON_SYNTAX_CHAR.json
│ │ │ ├── json.json
│ │ │ ├── jsonType.json
│ │ │ ├── jupyter_namespaceObject.json
│ │ │ ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│ │ │ ├── k.json
│ │ │ ├── KERNEL_STATUS_ERROR_CLASS.json
│ │ │ ├── key.json
│ │ │ ├── l.json
│ │ │ ├── labelId.json
│ │ │ ├── LATEST_PROTOCOL_VERSION.json
│ │ │ ├── LETTERDASHNUMBER.json
│ │ │ ├── LF.json
│ │ │ ├── LIMIT_REPLACE_NODE.json
│ │ │ ├── logTime.json
│ │ │ ├── lstatkey.json
│ │ │ ├── lt.json
│ │ │ ├── m.json
│ │ │ ├── maliciousPayload.json
│ │ │ ├── mask.json
│ │ │ ├── match.json
│ │ │ ├── matchingDelim.json
│ │ │ ├── MAXIMUM_MESSAGE_SIZE.json
│ │ │ ├── mdcContent.json
│ │ │ ├── MERMAID_DOM_ID_PREFIX.json
│ │ │ ├── message.json
│ │ │ ├── messages.json
│ │ │ ├── meth.json
│ │ │ ├── minimatch.json
│ │ │ ├── MOCK_CONSTRUCTOR_NAME.json
│ │ │ ├── MOCKS_PATTERN.json
│ │ │ ├── moduleDirectory.json
│ │ │ ├── msg.json
│ │ │ ├── mtr.json
│ │ │ ├── multipartType.json
│ │ │ ├── n.json
│ │ │ ├── N.json
│ │ │ ├── name.json
│ │ │ ├── NATIVE_PLATFORM.json
│ │ │ ├── newUrl.json
│ │ │ ├── NM.json
│ │ │ ├── NO_ARGUMENTS.json
│ │ │ ├── NO_DIFF_MESSAGE.json
│ │ │ ├── NODE_MODULES.json
│ │ │ ├── nodeInternalPrefix.json
│ │ │ ├── nonASCIIidentifierStartChars.json
│ │ │ ├── nonKey.json
│ │ │ ├── NOT_A_DOT.json
│ │ │ ├── notCharacterOrDash.json
│ │ │ ├── notebookURL.json
│ │ │ ├── notSelector.json
│ │ │ ├── nullTag.json
│ │ │ ├── num.json
│ │ │ ├── NUMBER.json
│ │ │ ├── o.json
│ │ │ ├── O.json
│ │ │ ├── octChar.json
│ │ │ ├── octetStreamType.json
│ │ │ ├── operators.json
│ │ │ ├── out.json
│ │ │ ├── OUTSIDE_JEST_VM_PROTOCOL.json
│ │ │ ├── override.json
│ │ │ ├── p.json
│ │ │ ├── PACKAGE_FILENAME.json
│ │ │ ├── PACKAGE_JSON.json
│ │ │ ├── packageVersion.json
│ │ │ ├── paddedNumber.json
│ │ │ ├── page.json
│ │ │ ├── parseClass.json
│ │ │ ├── path.json
│ │ │ ├── pathExt.json
│ │ │ ├── pattern.json
│ │ │ ├── PatternBoolean.json
│ │ │ ├── pBuiltins.json
│ │ │ ├── pFloatForm.json
│ │ │ ├── pkg.json
│ │ │ ├── PLUGIN_ID_DOC_MANAGER.json
│ │ │ ├── plusChar.json
│ │ │ ├── PN_CHARS.json
│ │ │ ├── point.json
│ │ │ ├── prefix.json
│ │ │ ├── PRETTY_PLACEHOLDER.json
│ │ │ ├── property_prefix.json
│ │ │ ├── pubkey256.json
│ │ │ ├── Q.json
│ │ │ ├── qmark.json
│ │ │ ├── QO.json
│ │ │ ├── query.json
│ │ │ ├── querystringType.json
│ │ │ ├── queryText.json
│ │ │ ├── r.json
│ │ │ ├── R.json
│ │ │ ├── rangeStart.json
│ │ │ ├── re.json
│ │ │ ├── reI.json
│ │ │ ├── REQUIRED_FIELD_SYMBOL.json
│ │ │ ├── reserve.json
│ │ │ ├── resolvedDestination.json
│ │ │ ├── resolverDir.json
│ │ │ ├── responseType.json
│ │ │ ├── result.json
│ │ │ ├── ROOT_DESCRIBE_BLOCK_NAME.json
│ │ │ ├── ROOT_NAMESPACE_NAME.json
│ │ │ ├── ROOT_TASK_NAME.json
│ │ │ ├── route.json
│ │ │ ├── RUNNING_TEXT.json
│ │ │ ├── s.json
│ │ │ ├── SCHEMA_PATH.json
│ │ │ ├── se.json
│ │ │ ├── SEARCHABLE_CLASS.json
│ │ │ ├── secret.json
│ │ │ ├── selector.json
│ │ │ ├── SEMVER_SPEC_VERSION.json
│ │ │ ├── sensitiveHeaders.json
│ │ │ ├── sep.json
│ │ │ ├── separator.json
│ │ │ ├── SHAPE_STATE.json
│ │ │ ├── shape.json
│ │ │ ├── SHARED.json
│ │ │ ├── short.json
│ │ │ ├── side.json
│ │ │ ├── SNAPSHOT_VERSION.json
│ │ │ ├── SOURCE_MAPPING_PREFIX.json
│ │ │ ├── source.json
│ │ │ ├── sourceMapContent.json
│ │ │ ├── SPACE_SYMBOL.json
│ │ │ ├── SPACE.json
│ │ │ ├── sqlKeywords.json
│ │ │ ├── sranges.json
│ │ │ ├── st.json
│ │ │ ├── ST.json
│ │ │ ├── stack.json
│ │ │ ├── START_HIDING.json
│ │ │ ├── START_OF_LINE.json
│ │ │ ├── startNoTraversal.json
│ │ │ ├── STATES.json
│ │ │ ├── stats.json
│ │ │ ├── statSync.json
│ │ │ ├── storageStatus.json
│ │ │ ├── storageType.json
│ │ │ ├── str.json
│ │ │ ├── stringifiedObject.json
│ │ │ ├── stringPath.json
│ │ │ ├── stringResult.json
│ │ │ ├── stringTag.json
│ │ │ ├── strValue.json
│ │ │ ├── style.json
│ │ │ ├── SUB_NAME.json
│ │ │ ├── subkey.json
│ │ │ ├── SUBPROTOCOL.json
│ │ │ ├── SUITE_NAME.json
│ │ │ ├── symbolPattern.json
│ │ │ ├── symbolTag.json
│ │ │ ├── t.json
│ │ │ ├── T.json
│ │ │ ├── templateDir.json
│ │ │ ├── tempName.json
│ │ │ ├── text.json
│ │ │ ├── time.json
│ │ │ ├── titleSeparator.json
│ │ │ ├── tmpl.json
│ │ │ ├── tn.json
│ │ │ ├── toValue.json
│ │ │ ├── transform.json
│ │ │ ├── trustProxyDefaultSymbol.json
│ │ │ ├── typeArgumentsKey.json
│ │ │ ├── typeKey.json
│ │ │ ├── typeMessage.json
│ │ │ ├── typesRegistryPackageName.json
│ │ │ ├── u.json
│ │ │ ├── UNDEFINED.json
│ │ │ ├── unit.json
│ │ │ ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│ │ │ ├── ur.json
│ │ │ ├── USAGE.json
│ │ │ ├── value.json
│ │ │ ├── Vr.json
│ │ │ ├── watchmanURL.json
│ │ │ ├── webkit.json
│ │ │ ├── xhtml.json
│ │ │ ├── XP_DEFAULT_PATHEXT.json
│ │ │ └── y.json
│ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ ├── colorCode.json
│ │ ├── comma.json
│ │ ├── command.json
│ │ ├── completionShTemplate.json
│ │ ├── configJsContent.json
│ │ ├── connectionString.json
│ │ ├── Consolidated_TypeScript_Interfaces_Template.json
│ │ ├── Could_you_interpret_the_assumed_applicat___.json
│ │ ├── cssClassStr.json
│ │ ├── currentBoundaryParse.json
│ │ ├── d.json
│ │ ├── Data_Analysis_Template.json
│ │ ├── data.json
│ │ ├── DATA.json
│ │ ├── Database_Query_Assistant.json
│ │ ├── dataWebpackPrefix.json
│ │ ├── debug.json
│ │ ├── Debugging_Assistant.json
│ │ ├── decodeStateVectorV2.json
│ │ ├── DEFAULT_DELIMITER.json
│ │ ├── DEFAULT_DIAGRAM_DIRECTION.json
│ │ ├── DEFAULT_INDENT.json
│ │ ├── DEFAULT_JS_PATTERN.json
│ │ ├── DEFAULT_LOG_TARGET.json
│ │ ├── defaultHelpOpt.json
│ │ ├── defaultHost.json
│ │ ├── deferY18nLookupPrefix.json
│ │ ├── DELIM.json
│ │ ├── delimiter.json
│ │ ├── DEPRECATION.json
│ │ ├── DESCENDING.json
│ │ ├── destMain.json
│ │ ├── development
│ │ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ │ ├── Consolidated_TypeScript_Interfaces_Template.json
│ │ │ ├── Development_Workflow.json
│ │ │ ├── index.json
│ │ │ ├── MCP_Server_Development_Prompt_Combiner.json
│ │ │ └── Monorepo_Migration_and_Code_Organization_Guide.json
│ │ ├── Development_System_Prompt.json
│ │ ├── Development_Workflow.json
│ │ ├── devops
│ │ │ ├── Docker_Compose_Prompt_Combiner.json
│ │ │ ├── Docker_Containerization_Guide.json
│ │ │ └── index.json
│ │ ├── DID_NOT_THROW.json
│ │ ├── direction.json
│ │ ├── displayValue.json
│ │ ├── DNS.json
│ │ ├── doc.json
│ │ ├── Docker_Compose_Prompt_Combiner.json
│ │ ├── Docker_Containerization_Guide.json
│ │ ├── Docker_MCP_Servers_Orchestration_Guide.json
│ │ ├── DOCUMENTATION_NOTE.json
│ │ ├── DOT.json
│ │ ├── DOTS.json
│ │ ├── dummyCompoundId.json
│ │ ├── e.json
│ │ ├── E.json
│ │ ├── earlyHintsLink.json
│ │ ├── elide.json
│ │ ├── EMPTY.json
│ │ ├── encoded.json
│ │ ├── end.json
│ │ ├── endpoint.json
│ │ ├── environment.json
│ │ ├── ERR_CODE.json
│ │ ├── errMessage.json
│ │ ├── errMsg.json
│ │ ├── ERROR_MESSAGE.json
│ │ ├── error.json
│ │ ├── ERROR.json
│ │ ├── ERRORCLASS.json
│ │ ├── errorMessage.json
│ │ ├── es6Default.json
│ │ ├── ESC.json
│ │ ├── Escapable.json
│ │ ├── escapedChar.json
│ │ ├── escapeFuncStr.json
│ │ ├── escSlash.json
│ │ ├── ev.json
│ │ ├── event.json
│ │ ├── execaMessage.json
│ │ ├── EXPECTED_LABEL.json
│ │ ├── expected.json
│ │ ├── expectedString.json
│ │ ├── expression1.json
│ │ ├── EXTENSION.json
│ │ ├── f.json
│ │ ├── FAIL_TEXT.json
│ │ ├── FILE_BROWSER_FACTORY.json
│ │ ├── fill.json
│ │ ├── findPackageJson.json
│ │ ├── fnKey.json
│ │ ├── Foresight_Assistant.json
│ │ ├── FORMAT.json
│ │ ├── formatted.json
│ │ ├── from.json
│ │ ├── fullpaths.json
│ │ ├── FUNC_ERROR_TEXT.json
│ │ ├── general
│ │ │ └── index.json
│ │ ├── Generate_different_types_of_questions_ab___.json
│ │ ├── Generate_Mermaid_Diagram.json
│ │ ├── GenStateSuspendedStart.json
│ │ ├── GENSYNC_EXPECTED_START.json
│ │ ├── GitHub_Repository_Explorer.json
│ │ ├── gutter.json
│ │ ├── h.json
│ │ ├── handlerFuncName.json
│ │ ├── HASH_UNDEFINED.json
│ │ ├── head.json
│ │ ├── helpMessage.json
│ │ ├── HINT_ARG.json
│ │ ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│ │ ├── i.json
│ │ ├── id.json
│ │ ├── identifier.json
│ │ ├── Identifier.json
│ │ ├── INDENT.json
│ │ ├── indentation.json
│ │ ├── index.json
│ │ ├── INDIRECTION_FRAGMENT.json
│ │ ├── Initialize_project_setup_for_a_new_micro___.json
│ │ ├── input.json
│ │ ├── inputText.json
│ │ ├── insert.json
│ │ ├── insertPromptQuery.json
│ │ ├── INSPECT_MAX_BYTES.json
│ │ ├── install_dependencies__build__run__test____.json
│ │ ├── intToCharMap.json
│ │ ├── IS_ITERABLE_SENTINEL.json
│ │ ├── IS_KEYED_SENTINEL.json
│ │ ├── isConfigType.json
│ │ ├── isoSentinel.json
│ │ ├── isSourceNode.json
│ │ ├── j.json
│ │ ├── J.json
│ │ ├── JAKE_CMD.json
│ │ ├── JEST_GLOBAL_NAME.json
│ │ ├── JEST_GLOBALS_MODULE_NAME.json
│ │ ├── JSON_SYNTAX_CHAR.json
│ │ ├── json.json
│ │ ├── jsonType.json
│ │ ├── jupyter_namespaceObject.json
│ │ ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│ │ ├── k.json
│ │ ├── KERNEL_STATUS_ERROR_CLASS.json
│ │ ├── key.json
│ │ ├── l.json
│ │ ├── labelId.json
│ │ ├── LATEST_PROTOCOL_VERSION.json
│ │ ├── LETTERDASHNUMBER.json
│ │ ├── LF.json
│ │ ├── LIMIT_REPLACE_NODE.json
│ │ ├── LINE_FEED.json
│ │ ├── logTime.json
│ │ ├── lstatkey.json
│ │ ├── lt.json
│ │ ├── m.json
│ │ ├── maliciousPayload.json
│ │ ├── manager.py
│ │ ├── marker.json
│ │ ├── mask.json
│ │ ├── match.json
│ │ ├── matchingDelim.json
│ │ ├── MAXIMUM_MESSAGE_SIZE.json
│ │ ├── MCP_Integration_Assistant.json
│ │ ├── MCP_Resources_Explorer.json
│ │ ├── MCP_Resources_Integration_Guide.json
│ │ ├── MCP_Server_Development_Prompt_Combiner.json
│ │ ├── MCP_Server_Integration_Guide.json
│ │ ├── mcp-code-generator.json
│ │ ├── mdcContent.json
│ │ ├── Mermaid_Analysis_Expert.json
│ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ ├── Mermaid_Diagram_Generator.json
│ │ ├── Mermaid_Diagram_Modifier.json
│ │ ├── MERMAID_DOM_ID_PREFIX.json
│ │ ├── message.json
│ │ ├── messages.json
│ │ ├── meth.json
│ │ ├── minimatch.json
│ │ ├── MOBILE_QUERY.json
│ │ ├── MOCK_CONSTRUCTOR_NAME.json
│ │ ├── MOCKS_PATTERN.json
│ │ ├── Modify_Mermaid_Diagram.json
│ │ ├── moduleDirectory.json
│ │ ├── Monorepo_Migration_and_Code_Organization_Guide.json
│ │ ├── msg.json
│ │ ├── mtr.json
│ │ ├── Multi-Resource_Context_Assistant.json
│ │ ├── multipartType.json
│ │ ├── n.json
│ │ ├── N.json
│ │ ├── name.json
│ │ ├── NATIVE_PLATFORM.json
│ │ ├── newUrl.json
│ │ ├── NM.json
│ │ ├── NO_ARGUMENTS.json
│ │ ├── NO_DIFF_MESSAGE.json
│ │ ├── NODE_MODULES.json
│ │ ├── nodeInternalPrefix.json
│ │ ├── nonASCIIidentifierStartChars.json
│ │ ├── nonKey.json
│ │ ├── NOT_A_DOT.json
│ │ ├── notCharacterOrDash.json
│ │ ├── notebookURL.json
│ │ ├── notSelector.json
│ │ ├── nullTag.json
│ │ ├── num.json
│ │ ├── NUMBER.json
│ │ ├── o.json
│ │ ├── O.json
│ │ ├── octChar.json
│ │ ├── octetStreamType.json
│ │ ├── operators.json
│ │ ├── other
│ │ │ ├── __image_1___describe_the_icon_in_one_sen___.json
│ │ │ ├── __type.json
│ │ │ ├── Advanced_Multi-Server_Integration_Template.json
│ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ ├── Basic_Template.json
│ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ │ ├── completionShTemplate.json
│ │ │ ├── Could_you_interpret_the_assumed_applicat___.json
│ │ │ ├── DEFAULT_INDENT.json
│ │ │ ├── Docker_MCP_Servers_Orchestration_Guide.json
│ │ │ ├── Generate_different_types_of_questions_ab___.json
│ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ ├── GitHub_Repository_Explorer.json
│ │ │ ├── index.json
│ │ │ ├── Initialize_project_setup_for_a_new_micro___.json
│ │ │ ├── install_dependencies__build__run__test____.json
│ │ │ ├── LINE_FEED.json
│ │ │ ├── MCP_Resources_Explorer.json
│ │ │ ├── MCP_Resources_Integration_Guide.json
│ │ │ ├── MCP_Server_Integration_Guide.json
│ │ │ ├── mcp-code-generator.json
│ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ ├── Multi-Resource_Context_Assistant.json
│ │ │ ├── output.json
│ │ │ ├── sseUrl.json
│ │ │ ├── string.json
│ │ │ ├── Task_List_Helper.json
│ │ │ ├── Template-Based_MCP_Integration.json
│ │ │ ├── Test_Prompt.json
│ │ │ ├── type.json
│ │ │ ├── VERSION.json
│ │ │ ├── WIN_SLASH.json
│ │ │ └── You_are_limited_to_respond_Yes_or_No_onl___.json
│ │ ├── out.json
│ │ ├── output.json
│ │ ├── OUTSIDE_JEST_VM_PROTOCOL.json
│ │ ├── override.json
│ │ ├── p.json
│ │ ├── PACKAGE_FILENAME.json
│ │ ├── PACKAGE_JSON.json
│ │ ├── packageVersion.json
│ │ ├── paddedNumber.json
│ │ ├── page.json
│ │ ├── parseClass.json
│ │ ├── PATH_NODE_MODULES.json
│ │ ├── path.json
│ │ ├── pathExt.json
│ │ ├── pattern.json
│ │ ├── PatternBoolean.json
│ │ ├── pBuiltins.json
│ │ ├── pFloatForm.json
│ │ ├── pkg.json
│ │ ├── PLUGIN_ID_DOC_MANAGER.json
│ │ ├── plusChar.json
│ │ ├── PN_CHARS.json
│ │ ├── point.json
│ │ ├── prefix.json
│ │ ├── PRETTY_PLACEHOLDER.json
│ │ ├── Project_Analysis_Assistant.json
│ │ ├── ProjectsUpdatedInBackgroundEvent.json
│ │ ├── PromptCombiner_Interface.json
│ │ ├── promptId.json
│ │ ├── property_prefix.json
│ │ ├── pubkey256.json
│ │ ├── Q.json
│ │ ├── qmark.json
│ │ ├── QO.json
│ │ ├── query.json
│ │ ├── querystringType.json
│ │ ├── queryText.json
│ │ ├── r.json
│ │ ├── R.json
│ │ ├── rangeStart.json
│ │ ├── re.json
│ │ ├── reI.json
│ │ ├── REQUIRED_FIELD_SYMBOL.json
│ │ ├── Research_Assistant.json
│ │ ├── reserve.json
│ │ ├── resolvedDestination.json
│ │ ├── resolverDir.json
│ │ ├── responseType.json
│ │ ├── result.json
│ │ ├── ROOT_DESCRIBE_BLOCK_NAME.json
│ │ ├── ROOT_NAMESPACE_NAME.json
│ │ ├── ROOT_TASK_NAME.json
│ │ ├── route.json
│ │ ├── RUNNING_TEXT.json
│ │ ├── RXstyle.json
│ │ ├── s.json
│ │ ├── SCHEMA_PATH.json
│ │ ├── schemaQuery.json
│ │ ├── se.json
│ │ ├── SEARCHABLE_CLASS.json
│ │ ├── secret.json
│ │ ├── selector.json
│ │ ├── SEMVER_SPEC_VERSION.json
│ │ ├── sensitiveHeaders.json
│ │ ├── sep.json
│ │ ├── separator.json
│ │ ├── Sequential_Data_Analysis_with_MCP_Integration.json
│ │ ├── SHAPE_STATE.json
│ │ ├── shape.json
│ │ ├── SHARED.json
│ │ ├── short.json
│ │ ├── side.json
│ │ ├── SNAPSHOT_VERSION.json
│ │ ├── SOLID_Code_Analysis_Visualizer.json
│ │ ├── SOURCE_MAPPING_PREFIX.json
│ │ ├── source.json
│ │ ├── sourceMapContent.json
│ │ ├── SPACE_SYMBOL.json
│ │ ├── SPACE.json
│ │ ├── sqlKeywords.json
│ │ ├── sranges.json
│ │ ├── sseUrl.json
│ │ ├── st.json
│ │ ├── ST.json
│ │ ├── stack.json
│ │ ├── START_HIDING.json
│ │ ├── START_OF_LINE.json
│ │ ├── startNoTraversal.json
│ │ ├── STATES.json
│ │ ├── stats.json
│ │ ├── statSync.json
│ │ ├── status.json
│ │ ├── storageStatus.json
│ │ ├── storageType.json
│ │ ├── str.json
│ │ ├── string.json
│ │ ├── stringifiedObject.json
│ │ ├── stringPath.json
│ │ ├── stringResult.json
│ │ ├── stringTag.json
│ │ ├── strValue.json
│ │ ├── style.json
│ │ ├── SUB_NAME.json
│ │ ├── subkey.json
│ │ ├── SUBPROTOCOL.json
│ │ ├── SUITE_NAME.json
│ │ ├── symbolPattern.json
│ │ ├── symbolTag.json
│ │ ├── system
│ │ │ ├── Aa.json
│ │ │ ├── b.json
│ │ │ ├── Development_System_Prompt.json
│ │ │ ├── index.json
│ │ │ ├── marker.json
│ │ │ ├── PATH_NODE_MODULES.json
│ │ │ ├── ProjectsUpdatedInBackgroundEvent.json
│ │ │ ├── RXstyle.json
│ │ │ ├── status.json
│ │ │ └── versionMajorMinor.json
│ │ ├── t.json
│ │ ├── T.json
│ │ ├── Task_List_Helper.json
│ │ ├── Template-Based_MCP_Integration.json
│ │ ├── template.py
│ │ ├── templateDir.json
│ │ ├── tempName.json
│ │ ├── Test_Prompt.json
│ │ ├── text.json
│ │ ├── time.json
│ │ ├── titleSeparator.json
│ │ ├── tmpl.json
│ │ ├── tn.json
│ │ ├── TOPBAR_FACTORY.json
│ │ ├── toValue.json
│ │ ├── transform.json
│ │ ├── trustProxyDefaultSymbol.json
│ │ ├── txt.json
│ │ ├── type.json
│ │ ├── typeArgumentsKey.json
│ │ ├── typeKey.json
│ │ ├── typeMessage.json
│ │ ├── typesRegistryPackageName.json
│ │ ├── u.json
│ │ ├── UNDEFINED.json
│ │ ├── unit.json
│ │ ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│ │ ├── ur.json
│ │ ├── usage.json
│ │ ├── USAGE.json
│ │ ├── user
│ │ │ ├── backupId.json
│ │ │ ├── DESCENDING.json
│ │ │ ├── encoded.json
│ │ │ ├── index.json
│ │ │ ├── J.json
│ │ │ ├── MOBILE_QUERY.json
│ │ │ ├── promptId.json
│ │ │ ├── schemaQuery.json
│ │ │ ├── TOPBAR_FACTORY.json
│ │ │ ├── txt.json
│ │ │ └── usage.json
│ │ ├── value.json
│ │ ├── VERSION.json
│ │ ├── version.py
│ │ ├── versionMajorMinor.json
│ │ ├── Vr.json
│ │ ├── watchmanURL.json
│ │ ├── webkit.json
│ │ ├── WIN_SLASH.json
│ │ ├── xhtml.json
│ │ ├── XP_DEFAULT_PATHEXT.json
│ │ ├── y.json
│ │ └── You_are_limited_to_respond_Yes_or_No_onl___.json
│ ├── resources
│ │ ├── __init__.py
│ │ ├── code_examples
│ │ │ └── index.json
│ │ ├── config
│ │ │ └── index.json
│ │ ├── documentation
│ │ │ └── index.json
│ │ ├── images
│ │ │ └── index.json
│ │ ├── index.json
│ │ └── other
│ │ └── index.json
│ ├── server.py
│ ├── templates
│ │ ├── __init__.py
│ │ ├── AbstractFactory.json
│ │ ├── Adapter.json
│ │ ├── base.py
│ │ ├── Builder.json
│ │ ├── Chain.json
│ │ ├── Command.json
│ │ ├── component
│ │ │ ├── AbstractFactory.json
│ │ │ ├── Adapter.json
│ │ │ ├── Builder.json
│ │ │ ├── Chain.json
│ │ │ ├── Command.json
│ │ │ ├── Decorator.json
│ │ │ ├── Facade.json
│ │ │ ├── Factory.json
│ │ │ ├── Iterator.json
│ │ │ ├── Mediator.json
│ │ │ ├── Memento.json
│ │ │ ├── Observer.json
│ │ │ ├── Prototype.json
│ │ │ ├── Proxy.json
│ │ │ ├── Singleton.json
│ │ │ ├── State.json
│ │ │ ├── Strategy.json
│ │ │ ├── TemplateMethod.json
│ │ │ └── Visitor.json
│ │ ├── component.py
│ │ ├── Decorator.json
│ │ ├── Facade.json
│ │ ├── Factory.json
│ │ ├── index.json
│ │ ├── Iterator.json
│ │ ├── manager.py
│ │ ├── Mediator.json
│ │ ├── Memento.json
│ │ ├── Observer.json
│ │ ├── project.py
│ │ ├── Prototype.json
│ │ ├── Proxy.json
│ │ ├── renderer.py
│ │ ├── Singleton.json
│ │ ├── State.json
│ │ ├── Strategy.json
│ │ ├── template_manager.py
│ │ ├── TemplateMethod.json
│ │ ├── types.py
│ │ └── Visitor.json
│ └── utils
│ └── __init__.py
├── SUMMARY.md
├── TASK_COMPLETION_SUMMARY.md
├── templates
│ └── openssl
│ ├── files
│ │ ├── CMakeLists.txt.jinja2
│ │ ├── conanfile.py.jinja2
│ │ ├── main.cpp.jinja2
│ │ └── README.md.jinja2
│ ├── openssl-consumer.json
│ └── template.json
├── test_openssl_integration.sh
├── test_package
│ └── conanfile.py
└── tests
├── __init__.py
├── conftest.py
├── integration
│ ├── test_core_integration.py
│ ├── test_mermaid_integration.py
│ ├── test_prompt_manager_integration.py
│ └── test_server_integration.py
├── test_aws_mcp.py
├── test_base_classes.py
├── test_config.py
├── test_exceptions.py
├── test_mermaid.py
├── test_prompts.py
└── test_templates.py
```
# Files
--------------------------------------------------------------------------------
/conanfile.py:
--------------------------------------------------------------------------------
```python
1 | from conan import ConanFile
2 | from conan.tools.files import copy, save
3 | import os
4 |
5 |
6 | class MCPProjectOrchestratorConan(ConanFile):
7 | name = "mcp-project-orchestrator"
8 | version = "0.1.0"
9 | license = "MIT"
10 | url = "https://github.com/sparesparrow/mcp-project-orchestrator"
11 | description = (
12 | "Main Conan manager and Python environment source for orchestrating MCP development flow."
13 | )
14 | topics = ("mcp", "orchestrator", "conan", "python", "templates", "prompts", "mermaid")
15 |
16 | # Pure Python application; no C/C++ settings required
17 | settings = None
18 | package_type = "application"
19 |
20 | exports_sources = (
21 | "src/*",
22 | "pyproject.toml",
23 | "README.md",
24 | "LICENSE",
25 | "project_orchestration.json",
26 | "project_templates.json",
27 | "component_templates.json",
28 | "config/*",
29 | "data/*",
30 | "docs/*",
31 | )
32 |
33 | def package(self):
34 | """Package the Python sources and supporting resources.
35 |
36 | We ship the source tree under a 'python' folder and expose it via
37 | PYTHONPATH using the run environment so consumers can import the
38 | package or execute the CLI through the provided launcher script.
39 | """
40 | # Licenses and docs
41 | copy(self, "LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
42 | copy(self, "README.md", dst=os.path.join(self.package_folder, "res"), src=self.source_folder)
43 | copy(self, "docs/*", dst=os.path.join(self.package_folder, "res", "docs"), src=self.source_folder)
44 |
45 | # Python sources
46 | copy(self, "src/*", dst=os.path.join(self.package_folder, "python"), src=self.source_folder)
47 |
48 | # Configuration and data assets used at runtime
49 | copy(self, "project_orchestration.json", dst=os.path.join(self.package_folder, "assets"), src=self.source_folder)
50 | copy(self, "project_templates.json", dst=os.path.join(self.package_folder, "assets"), src=self.source_folder)
51 | copy(self, "component_templates.json", dst=os.path.join(self.package_folder, "assets"), src=self.source_folder)
52 | copy(self, "config/*", dst=os.path.join(self.package_folder, "assets", "config"), src=self.source_folder)
53 | copy(self, "data/*", dst=os.path.join(self.package_folder, "assets", "data"), src=self.source_folder)
54 |
55 | # Simple launcher script to run the server via CLI
56 | bin_dir = os.path.join(self.package_folder, "bin")
57 | os.makedirs(bin_dir, exist_ok=True)
58 | launcher = """#!/usr/bin/env bash
59 | set -euo pipefail
60 | exec python -m mcp_project_orchestrator.fastmcp "$@"
61 | """
62 | save(self, os.path.join(bin_dir, "mcp-orchestrator"), launcher)
63 | os.chmod(os.path.join(bin_dir, "mcp-orchestrator"), 0o755)
64 |
65 | def package_info(self):
66 | """Expose run-time environment so consumers can import and run tools.
67 |
68 | - Adds the packaged sources to PYTHONPATH
69 | - Adds the 'bin' directory to PATH for the 'mcp-orchestrator' launcher
70 | """
71 | pythonpath = os.path.join(self.package_folder, "python")
72 | bindir = os.path.join(self.package_folder, "bin")
73 |
74 | # Make available in consumers' run environment when using VirtualRunEnv
75 | self.runenv_info.append_path("PYTHONPATH", pythonpath)
76 | self.runenv_info.append_path("PATH", bindir)
77 |
78 |
```
--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/.github/workflows/validate-cursor-config.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Validate Cursor Configuration
2 |
3 | on:
4 | push:
5 | branches: [ main, develop ]
6 | paths:
7 | - 'mcp-project-orchestrator/openssl/**'
8 | - '.cursor/**'
9 | pull_request:
10 | branches: [ main ]
11 | paths:
12 | - 'mcp-project-orchestrator/openssl/**'
13 | - '.cursor/**'
14 |
15 | jobs:
16 | validate-cursor-config:
17 | runs-on: ubuntu-latest
18 |
19 | steps:
20 | - name: Checkout code
21 | uses: actions/checkout@v4
22 |
23 | - name: Set up Python
24 | uses: actions/setup-python@v4
25 | with:
26 | python-version: '3.11'
27 |
28 | - name: Install dependencies
29 | run: |
30 | cd mcp-project-orchestrator/openssl
31 | pip install -e .
32 | pip install pyyaml
33 |
34 | - name: Validate YAML frontmatter
35 | run: |
36 | cd mcp-project-orchestrator/openssl
37 | python -m mcp_orchestrator.yaml_validator .cursor/rules/ || echo "No .cursor/rules directory found"
38 |
39 | - name: Validate template rendering
40 | run: |
41 | cd mcp-project-orchestrator/openssl
42 | python -m pytest tests/test_template_validation.py -v
43 |
44 | - name: Validate MCP configuration schema
45 | run: |
46 | cd mcp-project-orchestrator/openssl
47 | python -c "
48 | import json
49 | from pathlib import Path
50 | from mcp_orchestrator.cursor_deployer import CursorConfigDeployer
51 |
52 | # Test template rendering
53 | repo_root = Path('.')
54 | package_root = Path('.')
55 | deployer = CursorConfigDeployer(repo_root, package_root)
56 |
57 | platform_info = deployer.detect_platform()
58 | platform_info['repo_root'] = str(repo_root)
59 |
60 | # Render MCP config template
61 | content = deployer._render_template_content('cursor-rules/mcp.json.jinja2', platform_info)
62 |
63 | # Validate JSON
64 | config = json.loads(content)
65 |
66 | # Check required fields
67 | required_fields = ['mcpServers', 'globalShortcut', 'logging', 'features', 'platform']
68 | for field in required_fields:
69 | assert field in config, f'Missing required field: {field}'
70 |
71 | # Validate mcpServers
72 | assert isinstance(config['mcpServers'], dict)
73 | for server_name, server_config in config['mcpServers'].items():
74 | assert 'command' in server_config
75 | assert 'args' in server_config
76 | assert 'env' in server_config
77 |
78 | print('✅ MCP configuration schema validation passed')
79 | "
80 |
81 | - name: Test CLI commands
82 | run: |
83 | cd mcp-project-orchestrator/openssl
84 | python -m mcp_orchestrator.cli --help
85 | python -m mcp_orchestrator.deploy_cursor --help
86 |
87 | - name: Test environment variable validation
88 | run: |
89 | cd mcp-project-orchestrator/openssl
90 | python -c "
91 | from mcp_orchestrator.env_config import EnvironmentConfig
92 | env_config = EnvironmentConfig()
93 |
94 | # Test validation
95 | is_valid, missing = env_config.validate_required('openssl')
96 | print(f'Required variables valid: {is_valid}')
97 | print(f'Missing variables: {missing}')
98 |
99 | # Test error messages
100 | errors = env_config.get_validation_errors('openssl')
101 | print(f'Validation errors: {len(errors)}')
102 |
103 | print('✅ Environment variable validation passed')
104 | "
```
--------------------------------------------------------------------------------
/data/prompts/templates/multi-resource-context.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "id": "multi-resource-context",
3 | "name": "Multi-Resource Context Assistant",
4 | "description": "An advanced template that combines multiple resource types for comprehensive context gathering and analysis",
5 | "content": "You are an AI assistant with access to multiple integrated data sources. You have been configured with access to the following resources:\n\n1. **Project Files**: @resource://filesystem/{{project_path}}\n2. **GitHub Repository**: @resource://github/repo/{{owner}}/{{repo_name}}\n3. **Database Schema**: @resource://postgres/schema/{{database_name}}\n4. **Web Information**: @resource://puppeteer/url/{{web_url}}\n\n**Task Context:**\n\nYou are tasked with analyzing and providing insights on the {{project_type}} named '{{project_name}}'. This analysis should integrate information from all available resources to provide a comprehensive understanding.\n\n**Primary Analysis Goals:**\n\n1. **Project Architecture**\n - Analyze the project structure from filesystem\n - Understand the codebase organization from GitHub repository\n - Map database relationships to code structures\n - Identify integration points with external systems\n\n2. **Development Patterns**\n - Identify coding patterns and styles across the project\n - Analyze GitHub commits for development workflow patterns\n - Map database schema design to application architecture\n - Correlate external documentation with implementation details\n\n3. **Integration Points**\n - Identify how different components interface with each other\n - Analyze API endpoints and their connections\n - Map database operations to application functionality\n - Understand external service integration points\n\n4. **Documentation Analysis**\n - Review README and documentation files\n - Compare implementation with documentation\n - Identify gaps between documentation and implementation\n - Suggest documentation improvements\n\n5. **Recommendations**\n - Provide integrated recommendations that consider all aspects\n - Suggest architectural improvements\n - Identify potential performance optimizations\n - Recommend security enhancements\n\n**Resource-Specific Focus:**\n\n{{#if filesystem_focus}}\n**Filesystem Focus**: {{filesystem_focus}}\n{{/if}}\n\n{{#if github_focus}}\n**GitHub Focus**: {{github_focus}}\n{{/if}}\n\n{{#if database_focus}}\n**Database Focus**: {{database_focus}}\n{{/if}}\n\n{{#if web_focus}}\n**Web Focus**: {{web_focus}}\n{{/if}}\n\n{{#if additional_context}}\n**Additional Context**:\n{{additional_context}}\n{{/if}}",
6 | "isTemplate": true,
7 | "variables": [
8 | "project_path",
9 | "owner",
10 | "repo_name",
11 | "database_name",
12 | "web_url",
13 | "project_type",
14 | "project_name",
15 | "filesystem_focus",
16 | "github_focus",
17 | "database_focus",
18 | "web_focus",
19 | "additional_context"
20 | ],
21 | "tags": [
22 | "comprehensive",
23 | "multi-resource",
24 | "integrated",
25 | "development",
26 | "analysis",
27 | "resource-enabled",
28 | "full-context"
29 | ],
30 | "metadata": {
31 | "version": "1.0.0",
32 | "author": "MCP Prompts Team",
33 | "requires": ["filesystem", "github", "postgres", "puppeteer"],
34 | "resourcePatterns": [
35 | "filesystem/{{project_path}}",
36 | "github/repo/{{owner}}/{{repo_name}}",
37 | "postgres/schema/{{database_name}}",
38 | "puppeteer/url/{{web_url}}"
39 | ]
40 | }
41 | }
```
--------------------------------------------------------------------------------
/data/prompts/templates/mcp-resources-explorer.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "id": "mcp-resources-explorer",
3 | "name": "MCP Resources Explorer",
4 | "description": "A template for exploring and leveraging resources across multiple MCP servers",
5 | "content": "You are a specialized AI assistant that focuses on working with MCP resources. You have access to multiple MCP servers with different resource capabilities, and your task is to help navigate, discover, and utilize these resources effectively.\n\n### Resource Context:\n{{resource_context}}\n\n### Available MCP Servers with Resources:\n- **filesystem**: Access files and directories on the local system\n- **github**: Browse repositories, issues, and pull requests\n- **postgres**: Query and explore database structures\n- **memory**: Access stored contextual information\n- **{{additional_resource_servers}}**\n\n### Resource Exploration Task:\n{{exploration_task}}\n\n### Resource Integration Guidelines:\n1. Begin by using the `resources/list` method where available to discover available resources\n2. For file-based resources, examine directory structures before diving into specific files\n3. For database resources, understand the schema before executing queries\n4. When working with multiple resources, consider relationships between them\n5. Prioritize resources based on relevance to the current task\n6. {{custom_guidelines}}\n\n### Resource URI Format:\n When referring to resources, use the following format:\n- Filesystem: `@filesystem:/path/to/file`\n- GitHub: `@github:owner/repo/path/to/file`\n- Postgres: `@postgres:database/schema/table`\n- Memory: `@memory:context_id`\n\n### Response Structure:\n1. **Resource Discovery**: List the resources you've identified as relevant\n2. **Resource Analysis**: Examine the contents and relationships between resources\n3. **Resource Integration**: Show how these resources can work together\n4. **Recommendations**: Suggest optimal ways to leverage these resources\n5. **Next Steps**: Identify additional resources that might be helpful\n\nApproach this {{task_type}} exploration systematically, leveraging MCP resource capabilities to provide comprehensive insights.",
6 | "isTemplate": true,
7 | "variables": [
8 | "resource_context",
9 | "exploration_task",
10 | "task_type",
11 | "additional_resource_servers",
12 | "custom_guidelines"
13 | ],
14 | "tags": [
15 | "mcp-resources",
16 | "resource-integration",
17 | "template",
18 | "discovery"
19 | ],
20 | "createdAt": "2025-03-15T14:00:00.000Z",
21 | "updatedAt": "2025-03-15T14:00:00.000Z",
22 | "version": 1,
23 | "metadata": {
24 | "resource_capabilities": [
25 | "list",
26 | "get",
27 | "search",
28 | "query",
29 | "aggregate",
30 | "transform"
31 | ],
32 | "example_variables": {
33 | "resource_context": "A project with source code on GitHub, configuration in local files, and data in a PostgreSQL database",
34 | "exploration_task": "Map the relationships between database tables, code repositories, and configuration files to create a comprehensive system overview",
35 | "task_type": "system architecture analysis",
36 | "additional_resource_servers": "brave-search: Access web resources for documentation and best practices",
37 | "custom_guidelines": "Focus on identifying security-related configurations and data handling patterns across all resources"
38 | },
39 | "recommended_tools": [
40 | "resources/list",
41 | "resources/get",
42 | "resources/search"
43 | ]
44 | }
45 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/templates/types.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Type definitions for the template system.
3 |
4 | This module defines enums and types used in the template system
5 | for project and component templates.
6 | """
7 |
8 | from enum import Enum, auto
9 | from typing import Any, Dict, List, Optional, Union
10 | from dataclasses import dataclass, field
11 |
12 | class TemplateType(Enum):
13 | """Types of templates supported."""
14 |
15 | PROJECT = "project"
16 | COMPONENT = "component"
17 | DOCUMENTATION = "documentation"
18 | WORKFLOW = "workflow"
19 | CONFIGURATION = "configuration"
20 |
21 | def __str__(self) -> str:
22 | return self.value
23 |
24 | class TemplateCategory(Enum):
25 | """Categories for organizing templates."""
26 |
27 | MICROSERVICES = "microservices"
28 | MONOLITH = "monolith"
29 | LIBRARY = "library"
30 | CLI = "cli"
31 | WEB_APP = "web_app"
32 | API = "api"
33 | DATABASE = "database"
34 | TESTING = "testing"
35 | DEPLOYMENT = "deployment"
36 | DOCUMENTATION = "documentation"
37 |
38 | def __str__(self) -> str:
39 | return self.value
40 |
41 | @dataclass
42 | class TemplateMetadata:
43 | """Metadata for a template."""
44 |
45 | name: str
46 | description: str
47 | type: TemplateType
48 | category: Optional[TemplateCategory] = None
49 | version: str = "1.0.0"
50 | author: Optional[str] = None
51 | tags: List[str] = field(default_factory=list)
52 | dependencies: List[str] = field(default_factory=list)
53 | variables: Dict[str, str] = field(default_factory=dict)
54 |
55 | def to_dict(self) -> Dict[str, Union[str, List[str], Dict[str, str]]]:
56 | """Convert metadata to dictionary format.
57 |
58 | Returns:
59 | Dictionary representation of the metadata
60 | """
61 | return {
62 | "name": self.name,
63 | "description": self.description,
64 | "type": str(self.type),
65 | "category": str(self.category) if self.category else None,
66 | "version": self.version,
67 | "author": self.author,
68 | "tags": self.tags,
69 | "dependencies": self.dependencies,
70 | "variables": self.variables,
71 | }
72 |
73 | @classmethod
74 | def from_dict(cls, data: Dict[str, Any]) -> "TemplateMetadata":
75 | """Create metadata from dictionary.
76 |
77 | Args:
78 | data: Dictionary containing metadata fields
79 |
80 | Returns:
81 | TemplateMetadata instance
82 | """
83 | # Convert string values to enums
84 | if "type" in data:
85 | data["type"] = TemplateType(data["type"])
86 | if "category" in data and data["category"]:
87 | data["category"] = TemplateCategory(data["category"])
88 |
89 | return cls(**data)
90 |
91 | @dataclass
92 | class TemplateFile:
93 | """Represents a file in a template."""
94 |
95 | path: str
96 | content: str
97 | is_executable: bool = False
98 | variables: Dict[str, str] = field(default_factory=dict)
99 |
100 | def to_dict(self) -> Dict[str, Union[str, bool, Dict[str, str]]]:
101 | """Convert file data to dictionary format.
102 |
103 | Returns:
104 | Dictionary representation of the file data
105 | """
106 | return {
107 | "path": self.path,
108 | "content": self.content,
109 | "is_executable": self.is_executable,
110 | "variables": self.variables,
111 | }
112 |
113 | @classmethod
114 | def from_dict(cls, data: Dict[str, Any]) -> "TemplateFile":
115 | """Create file data from dictionary.
116 |
117 | Args:
118 | data: Dictionary containing file data fields
119 |
120 | Returns:
121 | TemplateFile instance
122 | """
123 | return cls(**data)
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/commands/openssl_cli.py:
--------------------------------------------------------------------------------
```python
1 | """OpenSSL project orchestration commands."""
2 | import click
3 | import os
4 | from pathlib import Path
5 | from typing import Optional
6 |
7 | @click.command()
8 | @click.option('--template', default='openssl-consumer',
9 | help='Project template (openssl-consumer, openssl-fips)')
10 | @click.option('--project-name', prompt='Project name', help='Name of the project')
11 | @click.option('--openssl-version', default='3.4.1', help='OpenSSL version')
12 | @click.option('--deployment-target',
13 | type=click.Choice(['general', 'fips-government', 'embedded']),
14 | default='general', help='Deployment target')
15 | @click.option('--enable-fips', is_flag=True, help='Enable FIPS mode')
16 | @click.option('--author-name', default='Developer', help='Author name')
17 | @click.option('--author-email', default='[email protected]', help='Author email')
18 | def create_openssl_project(template, project_name, openssl_version,
19 | deployment_target, enable_fips, author_name, author_email):
20 | """Create a new OpenSSL project"""
21 |
22 | from ..templates import TemplateManager
23 |
24 | click.echo(f"🔐 Creating OpenSSL project: {project_name}")
25 |
26 | # Auto-enable FIPS for government deployment
27 | if deployment_target == 'fips-government':
28 | enable_fips = True
29 |
30 | variables = {
31 | 'project_name': project_name,
32 | 'openssl_version': openssl_version,
33 | 'deployment_target': deployment_target,
34 | 'enable_fips': enable_fips,
35 | 'author_name': author_name,
36 | 'author_email': author_email
37 | }
38 |
39 | try:
40 | template_manager = TemplateManager("templates")
41 | template_manager.apply_template(f"openssl/{template}", variables, project_name)
42 |
43 | click.echo("✅ Project created successfully!")
44 | click.echo(f"\nNext steps:")
45 | click.echo(f"cd {project_name}")
46 | click.echo("conan remote add ${CONAN_REPOSITORY_NAME} ${CONAN_REPOSITORY_URL} --force")
47 | click.echo("conan install . --build=missing")
48 | click.echo("cmake --preset conan-default && cmake --build --preset conan-release")
49 |
50 | if enable_fips:
51 | click.echo("\n🔒 FIPS mode enabled for government deployment")
52 |
53 | except Exception as e:
54 | click.echo(f"❌ Failed to create project: {e}")
55 |
56 | @click.command()
57 | @click.option('--project-type', default='openssl', help='Project type')
58 | @click.option('--platform',
59 | type=click.Choice(['linux', 'windows', 'macos']),
60 | default='linux', help='Development platform')
61 | @click.option('--force', is_flag=True, help='Overwrite existing .cursor/')
62 | def deploy_cursor(project_type, platform, force):
63 | """Deploy Cursor AI configuration"""
64 |
65 | import os
66 | from pathlib import Path
67 | from ..cursor_deployer import CursorConfigDeployer
68 |
69 | click.echo(f"🤖 Deploying Cursor configuration for {project_type} on {platform}")
70 |
71 | repo_root = Path.cwd()
72 | package_root = Path(__file__).parent.parent.parent
73 |
74 | deployer = CursorConfigDeployer(repo_root, package_root)
75 |
76 | try:
77 | deployer.deploy(force=force, platform=platform, project_type=project_type)
78 | click.echo("✅ Cursor configuration deployed!")
79 | click.echo("\nNext steps:")
80 | click.echo("1. Open project in Cursor IDE")
81 | click.echo("2. Go to Settings > MCP and refresh")
82 | click.echo("3. Cursor will now have OpenSSL development context")
83 |
84 | except Exception as e:
85 | click.echo(f"❌ Failed to deploy Cursor config: {e}")
86 |
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/Advanced_Multi-Server_Integration_Template.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "Advanced Multi-Server Integration Template",
3 | "description": "A comprehensive template that coordinates multiple MCP servers for complex tasks requiring diverse capabilities",
4 | "type": "prompt",
5 | "category": "other",
6 | "content": "# Advanced Multi-Server Assistant\n\nYou are an advanced AI assistant with access to multiple specialized MCP servers that significantly enhance your capabilities. Your task is to help with {{primary_task}} by coordinating these diverse tools and resources effectively.\n\n## Available MCP Servers and Capabilities\n\n### Core Resources and Data Access\n- **filesystem**: Access files and directories on the local system\n - Use for: examining code, reading configuration files, accessing project documentation\n- **github**: Interact with repositories, issues, pull requests, and code on GitHub\n - Use for: code exploration, commit history analysis, repository management\n- **postgres**: Execute SQL queries and interact with database content\n - Use for: data analysis, schema exploration, complex data retrieval\n\n### Knowledge Management\n- **prompts**: Access and apply specialized templates for different tasks\n - Use for: structured workflows, consistent outputs, domain-specific prompting\n- **memory**: Store and retrieve key information across conversation sessions\n - Use for: retaining context, tracking progress on multi-step tasks\n\n### Enhanced Reasoning\n- **sequential-thinking**: Break down complex problems into logical steps\n - Use for: multi-step reasoning, maintaining clarity in complex analyses\n- **mcp-compass**: Navigate between different capabilities with strategic direction\n - Use for: orchestrating complex workflows involving multiple servers\n\n### Specialized Capabilities\n- **puppeteer**: Automate browser interactions and web scraping\n - Use for: testing web applications, extracting data from websites\n- **elevenlabs**: Convert text to realistic speech\n - Use for: creating audio versions of content, accessibility enhancements\n- **brave-search**: Perform web searches for up-to-date information\n - Use for: research, finding relevant resources, staying current\n\n## Integration Strategy\n\nI will coordinate these capabilities based on your needs by:\n1. **Understanding the primary goal** of {{primary_task}}\n2. **Identifying which MCP servers** are most relevant for this task\n3. **Creating a workflow** that efficiently combines their capabilities\n4. **Executing tasks** in an optimal sequence\n5. **Synthesizing results** into a comprehensive response\n\n## Specialized Task Approach\n\nFor your specific task in {{domain_expertise}}, I'll focus on using:\n- {{primary_server_1}}\n- {{primary_server_2}}\n- {{primary_server_3}}\n\nAdditional servers may be utilized as needed based on our conversation.\n\n## Guiding Principles\n\n- I'll prioritize {{priority_principle}} in my approach\n- I'll maintain awareness of {{ethical_consideration}} throughout our interaction\n- I'll structure my responses to emphasize {{output_focus}}\n\nLet's begin by clarifying your specific needs for {{primary_task}} and how I can best leverage these MCP servers to assist you.",
7 | "variables": [
8 | "primary_task",
9 | "domain_expertise",
10 | "primary_server_1",
11 | "primary_server_2",
12 | "primary_server_3",
13 | "priority_principle",
14 | "ethical_consideration",
15 | "output_focus"
16 | ],
17 | "metadata": {
18 | "source": "/home/sparrow/projects/mcp-prompts/fixed_prompts/advanced-multi-server-template.json",
19 | "imported": true
20 | }
21 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/other/Advanced_Multi-Server_Integration_Template.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "Advanced Multi-Server Integration Template",
3 | "description": "A comprehensive template that coordinates multiple MCP servers for complex tasks requiring diverse capabilities",
4 | "type": "prompt",
5 | "category": "other",
6 | "content": "# Advanced Multi-Server Assistant\n\nYou are an advanced AI assistant with access to multiple specialized MCP servers that significantly enhance your capabilities. Your task is to help with {{primary_task}} by coordinating these diverse tools and resources effectively.\n\n## Available MCP Servers and Capabilities\n\n### Core Resources and Data Access\n- **filesystem**: Access files and directories on the local system\n - Use for: examining code, reading configuration files, accessing project documentation\n- **github**: Interact with repositories, issues, pull requests, and code on GitHub\n - Use for: code exploration, commit history analysis, repository management\n- **postgres**: Execute SQL queries and interact with database content\n - Use for: data analysis, schema exploration, complex data retrieval\n\n### Knowledge Management\n- **prompts**: Access and apply specialized templates for different tasks\n - Use for: structured workflows, consistent outputs, domain-specific prompting\n- **memory**: Store and retrieve key information across conversation sessions\n - Use for: retaining context, tracking progress on multi-step tasks\n\n### Enhanced Reasoning\n- **sequential-thinking**: Break down complex problems into logical steps\n - Use for: multi-step reasoning, maintaining clarity in complex analyses\n- **mcp-compass**: Navigate between different capabilities with strategic direction\n - Use for: orchestrating complex workflows involving multiple servers\n\n### Specialized Capabilities\n- **puppeteer**: Automate browser interactions and web scraping\n - Use for: testing web applications, extracting data from websites\n- **elevenlabs**: Convert text to realistic speech\n - Use for: creating audio versions of content, accessibility enhancements\n- **brave-search**: Perform web searches for up-to-date information\n - Use for: research, finding relevant resources, staying current\n\n## Integration Strategy\n\nI will coordinate these capabilities based on your needs by:\n1. **Understanding the primary goal** of {{primary_task}}\n2. **Identifying which MCP servers** are most relevant for this task\n3. **Creating a workflow** that efficiently combines their capabilities\n4. **Executing tasks** in an optimal sequence\n5. **Synthesizing results** into a comprehensive response\n\n## Specialized Task Approach\n\nFor your specific task in {{domain_expertise}}, I'll focus on using:\n- {{primary_server_1}}\n- {{primary_server_2}}\n- {{primary_server_3}}\n\nAdditional servers may be utilized as needed based on our conversation.\n\n## Guiding Principles\n\n- I'll prioritize {{priority_principle}} in my approach\n- I'll maintain awareness of {{ethical_consideration}} throughout our interaction\n- I'll structure my responses to emphasize {{output_focus}}\n\nLet's begin by clarifying your specific needs for {{primary_task}} and how I can best leverage these MCP servers to assist you.",
7 | "variables": [
8 | "primary_task",
9 | "domain_expertise",
10 | "primary_server_1",
11 | "primary_server_2",
12 | "primary_server_3",
13 | "priority_principle",
14 | "ethical_consideration",
15 | "output_focus"
16 | ],
17 | "metadata": {
18 | "source": "/home/sparrow/projects/mcp-prompts/fixed_prompts/advanced-multi-server-template.json",
19 | "imported": true
20 | }
21 | }
```
--------------------------------------------------------------------------------
/printcast-agent/docker-compose.yml:
--------------------------------------------------------------------------------
```yaml
1 | version: '3.8'
2 |
3 | services:
4 | # Main PrintCast MCP Server
5 | printcast:
6 | build:
7 | context: .
8 | dockerfile: Containerfile
9 | container_name: printcast-agent
10 | restart: unless-stopped
11 | ports:
12 | - "8000:8000" # MCP Server
13 | - "5038:5038" # Asterisk AMI
14 | - "5060:5060/udp" # SIP UDP
15 | - "5060:5060/tcp" # SIP TCP
16 | - "10000-10100:10000-10100/udp" # RTP ports
17 | volumes:
18 | - ./config:/app/config
19 | - ./logs:/var/log/printcast
20 | - ./data:/var/lib/printcast
21 | - /var/run/cups:/var/run/cups # CUPS socket
22 | env_file:
23 | - .env
24 | environment:
25 | - ASTERISK_ENABLED=true
26 | - PYTHONUNBUFFERED=1
27 | networks:
28 | - printcast-network
29 | depends_on:
30 | - redis
31 | - postgres
32 |
33 | # Redis for caching and task queue
34 | redis:
35 | image: redis:7-alpine
36 | container_name: printcast-redis
37 | restart: unless-stopped
38 | ports:
39 | - "6379:6379"
40 | volumes:
41 | - redis-data:/data
42 | networks:
43 | - printcast-network
44 | command: redis-server --appendonly yes
45 |
46 | # PostgreSQL for persistent data
47 | postgres:
48 | image: postgres:15-alpine
49 | container_name: printcast-db
50 | restart: unless-stopped
51 | ports:
52 | - "5432:5432"
53 | environment:
54 | POSTGRES_DB: printcast
55 | POSTGRES_USER: printcast
56 | POSTGRES_PASSWORD: ${DB_PASSWORD:-printcast123}
57 | volumes:
58 | - postgres-data:/var/lib/postgresql/data
59 | - ./scripts/init.sql:/docker-entrypoint-initdb.d/init.sql
60 | networks:
61 | - printcast-network
62 |
63 | # Asterisk PBX (optional, if not using host Asterisk)
64 | asterisk:
65 | image: andrius/asterisk:latest
66 | container_name: printcast-asterisk
67 | restart: unless-stopped
68 | ports:
69 | - "5038:5038" # AMI
70 | - "5060:5060/udp" # SIP
71 | - "5060:5060/tcp" # SIP
72 | - "10000-10100:10000-10100/udp" # RTP
73 | volumes:
74 | - ./config/asterisk:/etc/asterisk
75 | - asterisk-data:/var/lib/asterisk
76 | - asterisk-logs:/var/log/asterisk
77 | networks:
78 | - printcast-network
79 | profiles:
80 | - asterisk
81 |
82 | # CUPS print server (optional, if not using host CUPS)
83 | cups:
84 | image: ydkn/cups:latest
85 | container_name: printcast-cups
86 | restart: unless-stopped
87 | ports:
88 | - "631:631"
89 | volumes:
90 | - /var/run/dbus:/var/run/dbus
91 | - cups-data:/etc/cups
92 | environment:
93 | CUPS_USER: admin
94 | CUPS_PASSWORD: ${CUPS_PASSWORD:-admin}
95 | networks:
96 | - printcast-network
97 | profiles:
98 | - printing
99 |
100 | # Monitoring with Prometheus
101 | prometheus:
102 | image: prom/prometheus:latest
103 | container_name: printcast-prometheus
104 | restart: unless-stopped
105 | ports:
106 | - "9090:9090"
107 | volumes:
108 | - ./config/prometheus:/etc/prometheus
109 | - prometheus-data:/prometheus
110 | command:
111 | - '--config.file=/etc/prometheus/prometheus.yml'
112 | - '--storage.tsdb.path=/prometheus'
113 | networks:
114 | - printcast-network
115 | profiles:
116 | - monitoring
117 |
118 | # Grafana for visualization
119 | grafana:
120 | image: grafana/grafana:latest
121 | container_name: printcast-grafana
122 | restart: unless-stopped
123 | ports:
124 | - "3000:3000"
125 | volumes:
126 | - grafana-data:/var/lib/grafana
127 | - ./config/grafana:/etc/grafana/provisioning
128 | environment:
129 | GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
130 | GF_INSTALL_PLUGINS: redis-datasource
131 | networks:
132 | - printcast-network
133 | depends_on:
134 | - prometheus
135 | profiles:
136 | - monitoring
137 |
138 | networks:
139 | printcast-network:
140 | driver: bridge
141 |
142 | volumes:
143 | redis-data:
144 | postgres-data:
145 | asterisk-data:
146 | asterisk-logs:
147 | cups-data:
148 | prometheus-data:
149 | grafana-data:
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/core/base.py:
--------------------------------------------------------------------------------
```python
1 | """Base classes for MCP Project Orchestrator components.
2 |
3 | This module provides abstract base classes that define the core interfaces
4 | for templates, components, and managers in the MCP Project Orchestrator.
5 | """
6 |
7 | from abc import ABC, abstractmethod
8 | from pathlib import Path
9 | from typing import Any, Dict, List, Optional, Union
10 |
11 | from pydantic import BaseModel
12 |
13 |
14 | class BaseComponent(ABC):
15 | """Abstract base class for all MCP components."""
16 |
17 | def __init__(self, name: str, config: Optional[Dict[str, Any]] = None):
18 | """Initialize a base component.
19 |
20 | Args:
21 | name: The name of the component
22 | config: Optional configuration dictionary
23 | """
24 | self.name = name
25 | self.config = config or {}
26 |
27 | @abstractmethod
28 | async def initialize(self) -> None:
29 | """Initialize the component."""
30 | pass
31 |
32 | @abstractmethod
33 | async def cleanup(self) -> None:
34 | """Clean up component resources."""
35 | pass
36 |
37 |
38 | class BaseTemplate(ABC):
39 | """Abstract base class for all MCP templates."""
40 |
41 | def __init__(self, template_path: Union[str, Path]):
42 | """Initialize a base template.
43 |
44 | Args:
45 | template_path: Path to the template file or directory
46 | """
47 | self.template_path = Path(template_path)
48 |
49 | @abstractmethod
50 | async def render(self, context: Dict[str, Any]) -> str:
51 | """Render the template with the given context.
52 |
53 | Args:
54 | context: Dictionary containing template variables
55 |
56 | Returns:
57 | str: The rendered template
58 | """
59 | pass
60 |
61 | @abstractmethod
62 | async def validate(self) -> bool:
63 | """Validate the template structure and content.
64 |
65 | Returns:
66 | bool: True if valid, False otherwise
67 | """
68 | pass
69 |
70 |
71 | class BaseManager(ABC):
72 | """Abstract base class for all MCP managers."""
73 |
74 | def __init__(self, config_path: Optional[Union[str, Path]] = None):
75 | """Initialize a base manager.
76 |
77 | Args:
78 | config_path: Optional path to configuration file
79 | """
80 | self.config_path = Path(config_path) if config_path else None
81 | self.components: Dict[str, BaseComponent] = {}
82 |
83 | @abstractmethod
84 | async def load_config(self) -> None:
85 | """Load manager configuration."""
86 | pass
87 |
88 | @abstractmethod
89 | async def register_component(self, component: BaseComponent) -> None:
90 | """Register a new component with the manager.
91 |
92 | Args:
93 | component: The component to register
94 | """
95 | pass
96 |
97 | @abstractmethod
98 | async def get_component(self, name: str) -> Optional[BaseComponent]:
99 | """Get a registered component by name.
100 |
101 | Args:
102 | name: Name of the component
103 |
104 | Returns:
105 | Optional[BaseComponent]: The component if found, None otherwise
106 | """
107 | pass
108 |
109 | @abstractmethod
110 | async def list_components(self) -> List[str]:
111 | """List all registered components.
112 |
113 | Returns:
114 | List[str]: List of component names
115 | """
116 | pass
117 |
118 | class BaseOrchestrator(BaseComponent):
119 | """Base class for orchestrator components.
120 |
121 | This class provides a common interface for components that manage
122 | resources and interact with other parts of the system.
123 | """
124 |
125 | def __init__(self, config):
126 | """Initialize a base orchestrator.
127 |
128 | Args:
129 | config: Configuration instance
130 | """
131 | super().__init__(name=config.name if hasattr(config, "name") else "orchestrator", config=config)
132 | self.config = config
133 |
```
--------------------------------------------------------------------------------
/tests/test_exceptions.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for exception handling."""
2 |
3 | import pytest
4 | from mcp_project_orchestrator.core.exceptions import (
5 | MCPException,
6 | ConfigError,
7 | TemplateError,
8 | PromptError,
9 | MermaidError,
10 | ValidationError,
11 | ResourceError
12 | )
13 |
14 |
15 | def test_mcp_exception_basic():
16 | """Test basic MCPException."""
17 | exc = MCPException("Test error")
18 | assert "Test error" in str(exc)
19 | assert exc.message == "Test error"
20 | assert isinstance(exc, Exception)
21 | assert hasattr(exc, 'code')
22 | assert hasattr(exc, 'details')
23 |
24 |
25 | def test_config_error():
26 | """Test ConfigError."""
27 | exc = ConfigError("Invalid config", "/path/to/config")
28 | assert "Invalid config" in str(exc)
29 | assert exc.config_path == "/path/to/config"
30 | assert exc.message == "Invalid config"
31 | assert isinstance(exc, MCPException)
32 |
33 |
34 | def test_template_error():
35 | """Test TemplateError."""
36 | exc = TemplateError("Template not found", "/path/to/template")
37 | assert "Template not found" in str(exc)
38 | assert exc.template_path == "/path/to/template"
39 | assert exc.message == "Template not found"
40 | assert isinstance(exc, MCPException)
41 |
42 |
43 | def test_prompt_error():
44 | """Test PromptError."""
45 | exc = PromptError("Prompt failed", "my-prompt")
46 | assert "Prompt failed" in str(exc)
47 | assert exc.prompt_name == "my-prompt"
48 | assert exc.message == "Prompt failed"
49 | assert isinstance(exc, MCPException)
50 |
51 |
52 | def test_mermaid_error():
53 | """Test MermaidError."""
54 | exc = MermaidError("Diagram generation failed", "flowchart")
55 | assert "Diagram generation failed" in str(exc)
56 | assert exc.diagram_type == "flowchart"
57 | assert exc.message == "Diagram generation failed"
58 | assert isinstance(exc, MCPException)
59 |
60 |
61 | def test_validation_error():
62 | """Test ValidationError."""
63 | errors = ["error1", "error2"]
64 | exc = ValidationError("Validation failed", errors)
65 | assert "Validation failed" in str(exc)
66 | assert exc.validation_errors == errors
67 | assert exc.message == "Validation failed"
68 | assert isinstance(exc, MCPException)
69 |
70 |
71 | def test_resource_error():
72 | """Test ResourceError."""
73 | exc = ResourceError("Resource missing", "/path/to/resource")
74 | assert "Resource missing" in str(exc)
75 | assert exc.resource_path == "/path/to/resource"
76 | assert exc.message == "Resource missing"
77 | assert isinstance(exc, MCPException)
78 |
79 |
80 | def test_exception_hierarchy():
81 | """Test exception inheritance hierarchy."""
82 | # All custom exceptions should inherit from MCPException
83 | assert issubclass(ConfigError, MCPException)
84 | assert issubclass(TemplateError, MCPException)
85 | assert issubclass(PromptError, MCPException)
86 | assert issubclass(MermaidError, MCPException)
87 | assert issubclass(ValidationError, MCPException)
88 | assert issubclass(ResourceError, MCPException)
89 |
90 | # MCPException should inherit from Exception
91 | assert issubclass(MCPException, Exception)
92 |
93 |
94 | def test_exception_catching():
95 | """Test that exceptions can be caught properly."""
96 | try:
97 | raise TemplateError("Test template error")
98 | except MCPException as e:
99 | assert "Test template error" in str(e)
100 | except Exception:
101 | pytest.fail("Should have caught as MCPException")
102 |
103 |
104 | def test_exception_with_cause():
105 | """Test exception with underlying cause."""
106 | try:
107 | try:
108 | raise ValueError("Original error")
109 | except ValueError as e:
110 | raise TemplateError("Template error", cause=e) from e
111 | except TemplateError as e:
112 | assert "Template error" in str(e)
113 | assert isinstance(e.__cause__, ValueError)
114 | assert e.cause == e.__cause__
115 |
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/cursor_deployer.py:
--------------------------------------------------------------------------------
```python
1 | """Deploy Cursor configuration to local repository (profile management pattern)"""
2 | from pathlib import Path
3 | from jinja2 import Template
4 | import platform
5 | import os
6 | import shutil
7 |
8 | class CursorConfigDeployer:
9 | """Deploy Cursor configuration templates to local repository"""
10 |
11 | def __init__(self, repo_root: Path, package_root: Path):
12 | self.repo_root = Path(repo_root)
13 | self.package_root = Path(package_root)
14 | self.cursor_dir = self.repo_root / ".cursor"
15 | self.templates_dir = self.package_root / "cursor-templates"
16 |
17 | def deploy(self, force: bool = False, platform: str = None, project_type: str = "openssl"):
18 | """Deploy Cursor configuration to repository"""
19 |
20 | if self.cursor_dir.exists() and not force:
21 | print(f"ℹ️ .cursor/ already exists. Use --force to overwrite.")
22 | return
23 |
24 | # Auto-detect platform if not specified
25 | if platform is None:
26 | platform = platform.system().lower()
27 |
28 | platform_info = {
29 | "os": platform,
30 | "project_type": project_type,
31 | "user": os.getenv("USER", "developer"),
32 | "home": str(Path.home()),
33 | "repo_root": str(self.repo_root)
34 | }
35 |
36 | # Create .cursor directory structure
37 | self.cursor_dir.mkdir(exist_ok=True)
38 | (self.cursor_dir / "rules").mkdir(exist_ok=True)
39 |
40 | # Deploy platform-specific rules
41 | self._deploy_rules(platform_info, platform, project_type)
42 |
43 | # Deploy MCP configuration
44 | self._deploy_mcp_config(platform_info, project_type)
45 |
46 | print(f"✅ Cursor configuration deployed to {self.cursor_dir}")
47 | print(f" Platform: {platform}")
48 | print(f" Project type: {project_type}")
49 |
50 | def _deploy_rules(self, platform_info: dict, platform: str, project_type: str):
51 | """Deploy platform-specific rule files"""
52 |
53 | # Deploy shared rules
54 | shared_template = self.templates_dir / project_type / "shared.mdc.jinja2"
55 | if shared_template.exists():
56 | self._render_template(
57 | shared_template,
58 | self.cursor_dir / "rules" / "shared.mdc",
59 | platform_info
60 | )
61 |
62 | # Deploy platform-specific rules
63 | platform_template = self.templates_dir / project_type / f"{platform}-dev.mdc.jinja2"
64 | if platform_template.exists():
65 | self._render_template(
66 | platform_template,
67 | self.cursor_dir / "rules" / f"{platform}-dev.mdc",
68 | platform_info
69 | )
70 |
71 | def _deploy_mcp_config(self, platform_info: dict, project_type: str):
72 | """Deploy MCP server configuration"""
73 |
74 | # Create basic MCP configuration
75 | mcp_config = {
76 | "mcpServers": {
77 | "mcp-project-orchestrator": {
78 | "command": "python",
79 | "args": ["-m", "mcp_project_orchestrator"],
80 | "env": {
81 | "PROJECT_TYPE": project_type,
82 | "PLATFORM": platform_info["os"]
83 | }
84 | }
85 | }
86 | }
87 |
88 | import json
89 | (self.cursor_dir / "mcp.json").write_text(json.dumps(mcp_config, indent=2))
90 |
91 | def _render_template(self, template_path: Path, output_path: Path, context: dict):
92 | """Render Jinja2 template with context"""
93 | template = Template(template_path.read_text())
94 | rendered = template.render(**context)
95 | output_path.write_text(rendered)
96 | print(f" 📄 {output_path.relative_to(self.cursor_dir)}")
97 |
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/analysis/Sequential_Data_Analysis_with_MCP_Integration.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "Sequential Data Analysis with MCP Integration",
3 | "description": "Advanced prompt template for multi-stage data analysis that integrates filesystem, database, memory, and sequential thinking MCP servers for comprehensive data workflows.",
4 | "type": "prompt",
5 | "category": "analysis",
6 | "content": "# Sequential Data Analysis Assistant\n\nYou are a specialized AI assistant for comprehensive data analysis, with access to multiple MCP servers that enhance your capabilities. Your task is to analyze {{data_type}} data from {{data_source}} and provide insights about {{analysis_objective}}.\n\n## Available MCP Servers\n\nYou have access to the following MCP servers to assist with this analysis:\n\n- **Filesystem**: Access data files, configuration, and save analysis outputs\n- **PostgreSQL**: Query structured data from databases\n- **Memory**: Store intermediate analysis results and insights\n- **Sequential Thinking**: Break complex analysis into logical steps\n- **GitHub**: Access code repositories, documentation, and data processing scripts\n{{additional_servers}}\n\n## Data Context\n\n- **Data Type**: {{data_type}}\n- **Data Source**: {{data_source}}\n- **Analysis Objective**: {{analysis_objective}}\n- **Technical Background**: {{technical_background}}\n- **Required Output Format**: {{output_format}}\n\n## Analysis Plan\n\nYour data analysis should follow these sequential steps, utilizing appropriate MCP servers at each stage:\n\n### 1. Data Discovery and Acquisition\n- Identify all relevant data sources across available servers\n- Use Filesystem MCP to check available data files\n- Use PostgreSQL MCP to explore database schema and available tables\n- Use GitHub MCP to locate relevant data processing scripts\n- Document data types, formats, and relationships\n\n### 2. Data Preparation\n- Use Sequential Thinking MCP to plan data cleaning steps\n- Process data to handle missing values, outliers, transformations\n- Use Memory MCP to store intermediate processing results\n- Document data preparation decisions and their rationale\n\n### 3. Exploratory Analysis\n- Calculate descriptive statistics\n- Identify patterns, correlations, and potential insights\n- Generate appropriate visualizations (described textually)\n- Store key observations in Memory MCP for later reference\n\n### 4. Advanced Analysis\n- Apply statistical methods or machine learning techniques appropriate for {{analysis_objective}}\n- Use Sequential Thinking MCP to break down complex analysis into logical steps\n- Reference relevant GitHub repositories for specialized algorithms\n- Document methodology, assumptions, and limitations\n\n### 5. Synthesis and Reporting\n- Summarize key findings and insights\n- Relate results back to {{analysis_objective}}\n- Provide actionable recommendations\n- Use Filesystem MCP to save analysis results in {{output_format}}\n\n## Guidelines for Your Response\n\n1. Begin by outlining your understanding of the analysis objective and the data context\n2. Specify which MCP servers you'll use for each analysis stage\n3. Provide a structured analysis following the sequential steps above\n4. For complex analyses, use the Sequential Thinking MCP to break down your reasoning\n5. Store important intermediate findings in Memory MCP and reference them in your final analysis\n6. Present results in the required {{output_format}}\n7. Include recommendations for further analysis or actions\n8. Document any limitations of your analysis or areas requiring human validation\n\n{{additional_guidelines}}",
7 | "variables": [
8 | "data_type",
9 | "data_source",
10 | "analysis_objective",
11 | "technical_background",
12 | "output_format",
13 | "additional_servers",
14 | "additional_guidelines"
15 | ],
16 | "metadata": {
17 | "source": "/home/sparrow/projects/mcp-prompts/prompts/sequential-data-analysis.json",
18 | "imported": true
19 | }
20 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/Sequential_Data_Analysis_with_MCP_Integration.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "Sequential Data Analysis with MCP Integration",
3 | "description": "Advanced prompt template for multi-stage data analysis that integrates filesystem, database, memory, and sequential thinking MCP servers for comprehensive data workflows.",
4 | "type": "prompt",
5 | "category": "analysis",
6 | "content": "# Sequential Data Analysis Assistant\n\nYou are a specialized AI assistant for comprehensive data analysis, with access to multiple MCP servers that enhance your capabilities. Your task is to analyze {{data_type}} data from {{data_source}} and provide insights about {{analysis_objective}}.\n\n## Available MCP Servers\n\nYou have access to the following MCP servers to assist with this analysis:\n\n- **Filesystem**: Access data files, configuration, and save analysis outputs\n- **PostgreSQL**: Query structured data from databases\n- **Memory**: Store intermediate analysis results and insights\n- **Sequential Thinking**: Break complex analysis into logical steps\n- **GitHub**: Access code repositories, documentation, and data processing scripts\n{{additional_servers}}\n\n## Data Context\n\n- **Data Type**: {{data_type}}\n- **Data Source**: {{data_source}}\n- **Analysis Objective**: {{analysis_objective}}\n- **Technical Background**: {{technical_background}}\n- **Required Output Format**: {{output_format}}\n\n## Analysis Plan\n\nYour data analysis should follow these sequential steps, utilizing appropriate MCP servers at each stage:\n\n### 1. Data Discovery and Acquisition\n- Identify all relevant data sources across available servers\n- Use Filesystem MCP to check available data files\n- Use PostgreSQL MCP to explore database schema and available tables\n- Use GitHub MCP to locate relevant data processing scripts\n- Document data types, formats, and relationships\n\n### 2. Data Preparation\n- Use Sequential Thinking MCP to plan data cleaning steps\n- Process data to handle missing values, outliers, transformations\n- Use Memory MCP to store intermediate processing results\n- Document data preparation decisions and their rationale\n\n### 3. Exploratory Analysis\n- Calculate descriptive statistics\n- Identify patterns, correlations, and potential insights\n- Generate appropriate visualizations (described textually)\n- Store key observations in Memory MCP for later reference\n\n### 4. Advanced Analysis\n- Apply statistical methods or machine learning techniques appropriate for {{analysis_objective}}\n- Use Sequential Thinking MCP to break down complex analysis into logical steps\n- Reference relevant GitHub repositories for specialized algorithms\n- Document methodology, assumptions, and limitations\n\n### 5. Synthesis and Reporting\n- Summarize key findings and insights\n- Relate results back to {{analysis_objective}}\n- Provide actionable recommendations\n- Use Filesystem MCP to save analysis results in {{output_format}}\n\n## Guidelines for Your Response\n\n1. Begin by outlining your understanding of the analysis objective and the data context\n2. Specify which MCP servers you'll use for each analysis stage\n3. Provide a structured analysis following the sequential steps above\n4. For complex analyses, use the Sequential Thinking MCP to break down your reasoning\n5. Store important intermediate findings in Memory MCP and reference them in your final analysis\n6. Present results in the required {{output_format}}\n7. Include recommendations for further analysis or actions\n8. Document any limitations of your analysis or areas requiring human validation\n\n{{additional_guidelines}}",
7 | "variables": [
8 | "data_type",
9 | "data_source",
10 | "analysis_objective",
11 | "technical_background",
12 | "output_format",
13 | "additional_servers",
14 | "additional_guidelines"
15 | ],
16 | "metadata": {
17 | "source": "/home/sparrow/projects/mcp-prompts/prompts/sequential-data-analysis.json",
18 | "imported": true
19 | }
20 | }
```
--------------------------------------------------------------------------------
/tests/test_config.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for configuration management."""
2 |
3 | import pytest
4 | from pathlib import Path
5 | import tempfile
6 | import json
7 | import yaml
8 |
9 | from mcp_project_orchestrator.core import MCPConfig, Config
10 |
11 |
12 | def test_config_creation():
13 | """Test basic config creation."""
14 | config = MCPConfig()
15 | assert config.settings is not None
16 | assert config.settings.workspace_dir == Path.cwd()
17 | assert config.settings.host == "localhost"
18 | assert config.settings.port == 8000
19 |
20 |
21 | def test_config_alias():
22 | """Test that Config is an alias for MCPConfig."""
23 | assert Config is MCPConfig
24 | config = Config()
25 | assert isinstance(config, MCPConfig)
26 |
27 |
28 | def test_config_path_helpers(tmp_path):
29 | """Test configuration path helper methods."""
30 | config = MCPConfig()
31 | config.settings.workspace_dir = tmp_path
32 | config.settings.templates_dir = tmp_path / "templates"
33 | config.settings.prompts_dir = tmp_path / "prompts"
34 | config.settings.resources_dir = tmp_path / "resources"
35 |
36 | # Test path helpers
37 | workspace_path = config.get_workspace_path("test", "file.txt")
38 | assert workspace_path == tmp_path / "test" / "file.txt"
39 |
40 | template_path = config.get_template_path("template.json")
41 | assert template_path == tmp_path / "templates" / "template.json"
42 |
43 | prompt_path = config.get_prompt_path("prompt.json")
44 | assert prompt_path == tmp_path / "prompts" / "prompt.json"
45 |
46 | resource_path = config.get_resource_path("resource.txt")
47 | assert resource_path == tmp_path / "resources" / "resource.txt"
48 |
49 |
50 | def test_config_json_loading(tmp_path):
51 | """Test loading configuration from JSON file."""
52 | config_file = tmp_path / "config.json"
53 | config_data = {
54 | "workspace_dir": str(tmp_path / "workspace"),
55 | "templates_dir": str(tmp_path / "templates"),
56 | "prompts_dir": str(tmp_path / "prompts"),
57 | "host": "0.0.0.0",
58 | "port": 9000,
59 | "debug": True
60 | }
61 |
62 | with open(config_file, 'w') as f:
63 | json.dump(config_data, f)
64 |
65 | config = MCPConfig(config_path=config_file)
66 | config.load_config()
67 |
68 | assert config.settings.host == "0.0.0.0"
69 | assert config.settings.port == 9000
70 | assert config.settings.debug is True
71 |
72 |
73 | def test_config_yaml_loading(tmp_path):
74 | """Test loading configuration from YAML file."""
75 | config_file = tmp_path / "config.yml"
76 | config_data = {
77 | "workspace_dir": str(tmp_path / "workspace"),
78 | "host": "127.0.0.1",
79 | "port": 8080
80 | }
81 |
82 | with open(config_file, 'w') as f:
83 | yaml.dump(config_data, f)
84 |
85 | config = MCPConfig(config_path=config_file)
86 | config.load_config()
87 |
88 | assert config.settings.host == "127.0.0.1"
89 | assert config.settings.port == 8080
90 |
91 |
92 | def test_config_directory_creation(tmp_path):
93 | """Test that config creates required directories."""
94 | config = MCPConfig()
95 | config.settings.workspace_dir = tmp_path / "workspace"
96 | config.settings.templates_dir = tmp_path / "templates"
97 | config.settings.prompts_dir = tmp_path / "prompts"
98 | config.settings.resources_dir = tmp_path / "resources"
99 | config.settings.output_dir = tmp_path / "output"
100 |
101 | config._create_directories()
102 |
103 | assert config.settings.workspace_dir.exists()
104 | assert config.settings.templates_dir.exists()
105 | assert config.settings.prompts_dir.exists()
106 | assert config.settings.resources_dir.exists()
107 | assert config.settings.output_dir.exists()
108 |
109 |
110 | def test_config_invalid_file_format(tmp_path):
111 | """Test error handling for invalid config file format."""
112 | config_file = tmp_path / "config.txt"
113 | config_file.write_text("invalid config")
114 |
115 | config = MCPConfig(config_path=config_file)
116 |
117 | with pytest.raises(ValueError, match="Unsupported config file format"):
118 | config.load_config()
119 |
120 |
121 | def test_config_settings_defaults():
122 | """Test default settings values."""
123 | config = MCPConfig()
124 |
125 | assert config.settings.host == "localhost"
126 | assert config.settings.port == 8000
127 | assert config.settings.debug is False
128 | assert config.settings.template_extensions[".py"] == "python"
129 | assert config.settings.template_extensions[".js"] == "javascript"
130 |
```
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
```python
1 | """Pytest configuration and fixtures."""
2 |
3 | import os
4 | import shutil
5 | import tempfile
6 | from pathlib import Path
7 | import pytest
8 | import json
9 |
10 | from mcp_project_orchestrator.core import MCPConfig
11 | from mcp_project_orchestrator.templates import TemplateManager
12 | from mcp_project_orchestrator.prompt_manager import PromptManager
13 | from mcp_project_orchestrator.mermaid import MermaidGenerator, MermaidRenderer
14 |
15 | @pytest.fixture
16 | def temp_dir():
17 | """Create a temporary directory for tests."""
18 | temp_dir = tempfile.mkdtemp()
19 | yield Path(temp_dir)
20 | shutil.rmtree(temp_dir)
21 |
22 | @pytest.fixture
23 | def test_config(temp_dir):
24 | """Create a test configuration."""
25 | config = MCPConfig()
26 | config.settings.workspace_dir = temp_dir / "workspace"
27 | config.settings.templates_dir = temp_dir / "templates"
28 | config.settings.resources_dir = temp_dir / "resources"
29 | config.settings.prompts_dir = temp_dir / "prompts"
30 | config.settings.output_dir = temp_dir / "diagrams"
31 |
32 | # Create directories
33 | config.settings.workspace_dir.mkdir(parents=True, exist_ok=True)
34 | config.settings.templates_dir.mkdir(parents=True, exist_ok=True)
35 | config.settings.resources_dir.mkdir(parents=True, exist_ok=True)
36 | config.settings.prompts_dir.mkdir(parents=True, exist_ok=True)
37 | config.settings.output_dir.mkdir(parents=True, exist_ok=True)
38 |
39 | return config
40 |
41 | @pytest.fixture
42 | def template_manager(test_config):
43 | """Create a template manager instance."""
44 | return TemplateManager(test_config.settings.templates_dir)
45 |
46 | @pytest.fixture
47 | def prompt_manager(test_config):
48 | """Create a prompt manager instance."""
49 | manager = PromptManager(test_config)
50 | return manager
51 |
52 | @pytest.fixture
53 | def mermaid_generator(test_config):
54 | """Create a Mermaid generator instance."""
55 | return MermaidGenerator(test_config)
56 |
57 | @pytest.fixture
58 | def mermaid_renderer(test_config):
59 | """Create a Mermaid renderer instance."""
60 | return MermaidRenderer(test_config)
61 |
62 | @pytest.fixture
63 | def sample_project_template(temp_dir):
64 | """Create a sample project template for testing."""
65 | template_dir = temp_dir / "templates" / "sample-project"
66 | template_dir.mkdir(parents=True)
67 |
68 | # Create template.json
69 | template_json = {
70 | "name": "sample-project",
71 | "description": "A sample project template for testing",
72 | "type": "project",
73 | "version": "1.0.0",
74 | "variables": {
75 | "project_name": "Name of the project",
76 | "project_description": "Project description",
77 | "author_name": "Author's name",
78 | "author_email": "Author's email"
79 | }
80 | }
81 |
82 | with open(template_dir / "template.json", "w") as f:
83 | json.dump(template_json, f, indent=2)
84 |
85 | # Create sample files
86 | files_dir = template_dir / "files"
87 | files_dir.mkdir()
88 |
89 | with open(files_dir / "README.md", "w") as f:
90 | f.write("# {{ project_name }}\n\n{{ project_description }}")
91 |
92 | with open(files_dir / "pyproject.toml", "w") as f:
93 | f.write('[project]\nname = "{{ project_name }}"\nauthor = "{{ author_name }}"')
94 |
95 | (files_dir / "src").mkdir()
96 | (files_dir / "tests").mkdir()
97 |
98 | return template_dir
99 |
100 | @pytest.fixture
101 | def sample_component_template(temp_dir):
102 | """Create a sample component template for testing."""
103 | template_dir = temp_dir / "templates" / "sample-component"
104 | template_dir.mkdir(parents=True)
105 |
106 | # Create template.json
107 | template_json = {
108 | "name": "sample-component",
109 | "description": "A sample component template for testing",
110 | "type": "component",
111 | "version": "1.0.0",
112 | "variables": {
113 | "component_name": "Name of the component",
114 | "component_description": "Component description"
115 | }
116 | }
117 |
118 | with open(template_dir / "template.json", "w") as f:
119 | json.dump(template_json, f, indent=2)
120 |
121 | # Create sample files
122 | files_dir = template_dir / "files"
123 | files_dir.mkdir()
124 |
125 | with open(files_dir / "{{ component_name }}.py", "w") as f:
126 | f.write('"""{{ component_description }}"""\n\nclass {{ component_name }}:\n pass')
127 |
128 | return template_dir
```
--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [build-system]
2 | requires = ["setuptools>=61.0", "wheel"]
3 | build-backend = "setuptools.build_meta"
4 |
5 | [project]
6 | name = "mcp-project-orchestrator-openssl"
7 | version = "0.1.0"
8 | description = "Cursor configuration management for OpenSSL development"
9 | readme = "README.md"
10 | requires-python = ">=3.8"
11 | license = {text = "MIT"}
12 | authors = [
13 | {name = "MCP Project Orchestrator Team", email = "[email protected]"},
14 | ]
15 | maintainers = [
16 | {name = "MCP Project Orchestrator Team", email = "[email protected]"},
17 | ]
18 | keywords = ["openssl", "cursor", "ide", "configuration", "management", "conan", "build", "profiles"]
19 | classifiers = [
20 | "Development Status :: 4 - Beta",
21 | "Intended Audience :: Developers",
22 | "License :: OSI Approved :: MIT License",
23 | "Operating System :: OS Independent",
24 | "Programming Language :: Python :: 3",
25 | "Programming Language :: Python :: 3.8",
26 | "Programming Language :: Python :: 3.9",
27 | "Programming Language :: Python :: 3.10",
28 | "Programming Language :: Python :: 3.11",
29 | "Programming Language :: Python :: 3.12",
30 | "Topic :: Software Development :: Build Tools",
31 | "Topic :: Software Development :: Libraries :: Python Modules",
32 | "Topic :: System :: Systems Administration",
33 | "Topic :: Security :: Cryptography",
34 | ]
35 | dependencies = [
36 | "click>=8.0.0",
37 | "jinja2>=3.0.0",
38 | "pathlib2>=2.3.0; python_version < '3.4'",
39 | ]
40 |
41 | [project.optional-dependencies]
42 | dev = [
43 | "pytest>=7.0.0",
44 | "pytest-cov>=4.0.0",
45 | "pytest-xdist>=3.0.0",
46 | "black>=23.0.0",
47 | "ruff>=0.1.0",
48 | "mypy>=1.0.0",
49 | "pre-commit>=3.0.0",
50 | ]
51 | test = [
52 | "pytest>=7.0.0",
53 | "pytest-cov>=4.0.0",
54 | "pytest-xdist>=3.0.0",
55 | ]
56 |
57 | [project.urls]
58 | Homepage = "https://github.com/sparesparrow/mcp-project-orchestrator"
59 | Documentation = "https://github.com/sparesparrow/mcp-project-orchestrator/blob/main/docs/"
60 | Repository = "https://github.com/sparesparrow/mcp-project-orchestrator"
61 | "Bug Tracker" = "https://github.com/sparesparrow/mcp-project-orchestrator/issues"
62 |
63 | [project.scripts]
64 | mcp-orchestrator = "mcp_orchestrator.cli:cli"
65 | deploy-cursor = "mcp_orchestrator.deploy_cursor:deploy_cursor"
66 |
67 | [tool.setuptools.packages.find]
68 | where = ["."]
69 | include = ["mcp_orchestrator*"]
70 |
71 | [tool.setuptools.package-data]
72 | mcp_orchestrator = [
73 | "cursor-rules/**/*",
74 | "cursor-rules/**/*.jinja2",
75 | "cursor-rules/**/*.md",
76 | ]
77 |
78 | [tool.black]
79 | line-length = 88
80 | target-version = ['py38', 'py39', 'py310', 'py311', 'py312']
81 | include = '\.pyi?$'
82 | extend-exclude = '''
83 | /(
84 | # directories
85 | \.eggs
86 | | \.git
87 | | \.hg
88 | | \.mypy_cache
89 | | \.tox
90 | | \.venv
91 | | build
92 | | dist
93 | )/
94 | '''
95 |
96 | [tool.ruff]
97 | target-version = "py38"
98 | line-length = 88
99 | select = [
100 | "E", # pycodestyle errors
101 | "W", # pycodestyle warnings
102 | "F", # pyflakes
103 | "I", # isort
104 | "B", # flake8-bugbear
105 | "C4", # flake8-comprehensions
106 | "UP", # pyupgrade
107 | ]
108 | ignore = [
109 | "E501", # line too long, handled by black
110 | "B008", # do not perform function calls in argument defaults
111 | "C901", # too complex
112 | ]
113 |
114 | [tool.ruff.per-file-ignores]
115 | "__init__.py" = ["F401"]
116 |
117 | [tool.mypy]
118 | python_version = "3.8"
119 | warn_return_any = true
120 | warn_unused_configs = true
121 | disallow_untyped_defs = true
122 | disallow_incomplete_defs = true
123 | check_untyped_defs = true
124 | disallow_untyped_decorators = true
125 | no_implicit_optional = true
126 | warn_redundant_casts = true
127 | warn_unused_ignores = true
128 | warn_no_return = true
129 | warn_unreachable = true
130 | strict_equality = true
131 |
132 | [tool.pytest.ini_options]
133 | minversion = "7.0"
134 | addopts = "-ra -q --strict-markers --strict-config"
135 | testpaths = ["tests"]
136 | python_files = ["test_*.py", "*_test.py"]
137 | python_classes = ["Test*"]
138 | python_functions = ["test_*"]
139 | markers = [
140 | "slow: marks tests as slow (deselect with '-m \"not slow\"')",
141 | "integration: marks tests as integration tests",
142 | "unit: marks tests as unit tests",
143 | ]
144 |
145 | [tool.coverage.run]
146 | source = ["mcp_orchestrator"]
147 | omit = [
148 | "*/tests/*",
149 | "*/test_*",
150 | "*/__pycache__/*",
151 | "*/venv/*",
152 | "*/env/*",
153 | ]
154 |
155 | [tool.coverage.report]
156 | exclude_lines = [
157 | "pragma: no cover",
158 | "def __repr__",
159 | "if self.debug:",
160 | "if settings.DEBUG",
161 | "raise AssertionError",
162 | "raise NotImplementedError",
163 | "if 0:",
164 | "if __name__ == .__main__.:",
165 | "class .*\\bProtocol\\):",
166 | "@(abc\\.)?abstractmethod",
167 | ]
```
--------------------------------------------------------------------------------
/.github/workflows/fan-out-orchestrator.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Fan-Out Release Orchestrator
2 |
3 | on:
4 | workflow_dispatch:
5 | inputs:
6 | source_repository:
7 | description: 'Source repository that triggered the release'
8 | required: true
9 | type: string
10 | source_version:
11 | description: 'Version of the source repository'
12 | required: true
13 | type: string
14 | release_type:
15 | description: 'Type of release (foundation, tooling, domain, orchestration)'
16 | required: true
17 | type: choice
18 | options:
19 | - foundation
20 | - tooling
21 | - domain
22 | - orchestration
23 | dependency_update:
24 | description: 'Whether this is a dependency update'
25 | required: false
26 | default: 'false'
27 | type: boolean
28 | triggered_by:
29 | description: 'What triggered this orchestration'
30 | required: false
31 | default: 'manual'
32 | type: string
33 |
34 | env:
35 | PYTHON_VERSION: "3.11"
36 |
37 | jobs:
38 | orchestrate-release:
39 | name: Orchestrate Cross-Repository Release
40 | runs-on: ubuntu-latest
41 | permissions:
42 | contents: write
43 | pull-requests: write
44 | actions: read
45 |
46 | steps:
47 | - name: Checkout code
48 | uses: actions/checkout@v4
49 | with:
50 | fetch-depth: 0
51 |
52 | - name: Set up Python
53 | uses: actions/setup-python@v6
54 | with:
55 | python-version: ${{ env.PYTHON_VERSION }}
56 | cache: 'pip'
57 |
58 | - name: Install dependencies
59 | run: |
60 | pip install -r requirements.txt
61 | pip install PyGithub httpx
62 |
63 | - name: Run fan-out orchestration
64 | env:
65 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
66 | run: |
67 | python -c "
68 | import asyncio
69 | from mcp_project_orchestrator.fan_out_orchestrator import ReleaseCoordinator
70 |
71 | async def orchestrate():
72 | coordinator = ReleaseCoordinator('${{ env.GITHUB_TOKEN }}')
73 |
74 | if '${{ inputs.release_type }}' == 'foundation':
75 | result = await coordinator.coordinate_foundation_release('${{ inputs.source_version }}')
76 | elif '${{ inputs.release_type }}' == 'tooling':
77 | result = await coordinator.coordinate_tooling_release('${{ inputs.source_version }}')
78 | elif '${{ inputs.release_type }}' == 'domain':
79 | result = await coordinator.coordinate_domain_release('${{ inputs.source_version }}')
80 | else:
81 | print('Unknown release type: ${{ inputs.release_type }}')
82 | return
83 |
84 | print(f'Orchestration result: {result}')
85 |
86 | asyncio.run(orchestrate())
87 | "
88 |
89 | - name: Generate orchestration report
90 | if: always()
91 | run: |
92 | echo "## 🚀 Fan-Out Release Orchestration Report" >> $GITHUB_STEP_SUMMARY
93 | echo "" >> $GITHUB_STEP_SUMMARY
94 | echo "**Orchestration Run:** $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> $GITHUB_STEP_SUMMARY
95 | echo "**Source Repository:** ${{ inputs.source_repository }}" >> $GITHUB_STEP_SUMMARY
96 | echo "**Source Version:** ${{ inputs.source_version }}" >> $GITHUB_STEP_SUMMARY
97 | echo "**Release Type:** ${{ inputs.release_type }}" >> $GITHUB_STEP_SUMMARY
98 | echo "**Triggered By:** ${{ inputs.triggered_by }}" >> $GITHUB_STEP_SUMMARY
99 | echo "**Dependency Update:** ${{ inputs.dependency_update }}" >> $GITHUB_STEP_SUMMARY
100 | echo "" >> $GITHUB_STEP_SUMMARY
101 | echo "### Orchestration Actions" >> $GITHUB_STEP_SUMMARY
102 | echo "- Analyzed dependency relationships" >> $GITHUB_STEP_SUMMARY
103 | echo "- Triggered dependent repository workflows" >> $GITHUB_STEP_SUMMARY
104 | echo "- Created dependency update PRs where needed" >> $GITHUB_STEP_SUMMARY
105 | echo "- Monitored release status across ecosystem" >> $GITHUB_STEP_SUMMARY
106 | echo "" >> $GITHUB_STEP_SUMMARY
107 | echo "### Next Steps" >> $GITHUB_STEP_SUMMARY
108 | echo "- Review triggered workflows in dependent repositories" >> $GITHUB_STEP_SUMMARY
109 | echo "- Monitor build and test results" >> $GITHUB_STEP_SUMMARY
110 | echo "- Address any dependency conflicts" >> $GITHUB_STEP_SUMMARY
111 | echo "- Verify end-to-end integration" >> $GITHUB_STEP_SUMMARY
```
--------------------------------------------------------------------------------
/scripts/setup_aws_mcp.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 | # Setup script for AWS MCP integration
3 | # This script helps configure AWS credentials and test the integration
4 |
5 | set -e
6 |
7 | echo "======================================"
8 | echo "AWS MCP Integration Setup"
9 | echo "======================================"
10 | echo ""
11 |
12 | # Check if boto3 is installed
13 | if ! python3 -c "import boto3" 2>/dev/null; then
14 | echo "❌ boto3 is not installed"
15 | echo "Installing boto3 and botocore..."
16 | pip install boto3 botocore
17 | echo "✅ boto3 installed successfully"
18 | else
19 | echo "✅ boto3 is already installed"
20 | fi
21 |
22 | # Check if .env file exists
23 | if [ ! -f .env ]; then
24 | echo ""
25 | echo "Creating .env file from template..."
26 | cp .env.example .env
27 | echo "✅ .env file created"
28 | echo ""
29 | echo "⚠️ Please edit .env file and add your AWS credentials"
30 | echo " Required variables:"
31 | echo " - AWS_REGION"
32 | echo " - AWS_ACCESS_KEY_ID (optional if using IAM roles)"
33 | echo " - AWS_SECRET_ACCESS_KEY (optional if using IAM roles)"
34 | echo ""
35 | echo "You can also use AWS CLI profiles by setting AWS_PROFILE"
36 | else
37 | echo "✅ .env file already exists"
38 | fi
39 |
40 | # Check if AWS CLI is configured
41 | echo ""
42 | echo "Checking AWS CLI configuration..."
43 | if command -v aws &> /dev/null; then
44 | echo "✅ AWS CLI is installed"
45 |
46 | if aws sts get-caller-identity &> /dev/null; then
47 | echo "✅ AWS credentials are configured"
48 | echo ""
49 | echo "Current AWS Identity:"
50 | aws sts get-caller-identity
51 | else
52 | echo "⚠️ AWS CLI is not configured or credentials are invalid"
53 | echo ""
54 | echo "To configure AWS CLI, run:"
55 | echo " aws configure"
56 | echo ""
57 | echo "Or use environment variables in .env file"
58 | fi
59 | else
60 | echo "⚠️ AWS CLI is not installed"
61 | echo " Install it from: https://aws.amazon.com/cli/"
62 | fi
63 |
64 | # Test AWS MCP integration
65 | echo ""
66 | echo "======================================"
67 | echo "Testing AWS MCP Integration"
68 | echo "======================================"
69 | echo ""
70 |
71 | # Create a test script
72 | cat > /tmp/test_aws_mcp.py << 'EOF'
73 | """Test AWS MCP integration."""
74 | import os
75 | from dotenv import load_dotenv
76 |
77 | # Load environment variables
78 | load_dotenv()
79 |
80 | print("Loading AWS MCP integration...")
81 | try:
82 | from mcp_project_orchestrator.aws_mcp import AWSConfig, AWSMCPIntegration
83 |
84 | # Check configuration
85 | config = AWSConfig()
86 | print(f"✅ AWS Region: {config.region}")
87 |
88 | if config.validate():
89 | print("✅ AWS configuration is valid")
90 | else:
91 | print("⚠️ AWS configuration validation failed")
92 | print(" Check your environment variables")
93 |
94 | # Initialize integration
95 | aws = AWSMCPIntegration(config)
96 | print("✅ AWS MCP integration initialized")
97 |
98 | # Test best practices
99 | print("\nTesting AWS best practices...")
100 | practices = aws.get_aws_best_practices('s3')
101 | print(f"✅ Retrieved {len(practices)} best practice categories for S3")
102 |
103 | # Test cost estimation
104 | print("\nTesting cost estimation...")
105 | estimate = aws.estimate_costs('s3', {'storage_gb': 100, 'requests': 10000})
106 | print(f"✅ Cost estimate: ${estimate['total_usd']} USD")
107 |
108 | print("\n" + "="*50)
109 | print("✅ All tests passed!")
110 | print("="*50)
111 | print("\nAWS MCP integration is ready to use.")
112 | print("\nAvailable MCP tools:")
113 | print(" - aws_list_s3_buckets")
114 | print(" - aws_list_ec2_instances")
115 | print(" - aws_list_lambda_functions")
116 | print(" - aws_best_practices")
117 | print(" - aws_estimate_costs")
118 |
119 | except ImportError as e:
120 | print(f"❌ Import error: {e}")
121 | print("\nMake sure you have installed the package:")
122 | print(" pip install -e .[aws]")
123 | except Exception as e:
124 | print(f"❌ Error: {e}")
125 | import traceback
126 | traceback.print_exc()
127 | EOF
128 |
129 | # Run the test
130 | python3 /tmp/test_aws_mcp.py
131 |
132 | # Cleanup
133 | rm /tmp/test_aws_mcp.py
134 |
135 | echo ""
136 | echo "======================================"
137 | echo "Setup Complete!"
138 | echo "======================================"
139 | echo ""
140 | echo "Next steps:"
141 | echo "1. Edit .env file with your AWS credentials (if needed)"
142 | echo "2. Run the MCP server: python -m mcp_project_orchestrator.project_orchestration"
143 | echo "3. Use AWS MCP tools in your AI assistant"
144 | echo ""
145 | echo "Documentation:"
146 | echo " - See docs/AWS_MCP.md for detailed usage"
147 | echo " - See .env.example for all configuration options"
148 | echo ""
```
--------------------------------------------------------------------------------
/tests/test_base_classes.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for base classes."""
2 |
3 | import pytest
4 | from pathlib import Path
5 | from mcp_project_orchestrator.core.base import (
6 | BaseComponent,
7 | BaseTemplate,
8 | BaseManager,
9 | BaseOrchestrator
10 | )
11 |
12 |
13 | class ConcreteComponent(BaseComponent):
14 | """Concrete implementation for testing."""
15 |
16 | async def initialize(self):
17 | self.initialized = True
18 |
19 | async def cleanup(self):
20 | self.cleaned_up = True
21 |
22 |
23 | class ConcreteTemplate(BaseTemplate):
24 | """Concrete implementation for testing."""
25 |
26 | async def render(self, context):
27 | return f"Rendered: {context.get('name', 'unknown')}"
28 |
29 | async def validate(self):
30 | return self.template_path.exists() if hasattr(self, 'template_path') else True
31 |
32 |
33 | class ConcreteManager(BaseManager):
34 | """Concrete implementation for testing."""
35 |
36 | async def load_config(self):
37 | self.config_loaded = True
38 |
39 | async def register_component(self, component):
40 | self.components[component.name] = component
41 |
42 | async def get_component(self, name):
43 | return self.components.get(name)
44 |
45 | async def list_components(self):
46 | return list(self.components.keys())
47 |
48 |
49 | class ConcreteOrchestrator(BaseOrchestrator):
50 | """Concrete implementation for testing."""
51 |
52 | async def initialize(self):
53 | self.initialized = True
54 |
55 | async def cleanup(self):
56 | self.cleaned_up = True
57 |
58 |
59 | @pytest.mark.asyncio
60 | async def test_base_component():
61 | """Test BaseComponent."""
62 | component = ConcreteComponent("test-component", {"key": "value"})
63 |
64 | assert component.name == "test-component"
65 | assert component.config == {"key": "value"}
66 |
67 | await component.initialize()
68 | assert component.initialized is True
69 |
70 | await component.cleanup()
71 | assert component.cleaned_up is True
72 |
73 |
74 | @pytest.mark.asyncio
75 | async def test_base_template(tmp_path):
76 | """Test BaseTemplate."""
77 | template_file = tmp_path / "template.txt"
78 | template_file.write_text("Test template")
79 |
80 | template = ConcreteTemplate(template_file)
81 | assert template.template_path == template_file
82 |
83 | # Test render
84 | result = await template.render({"name": "Test"})
85 | assert result == "Rendered: Test"
86 |
87 | # Test validate
88 | is_valid = await template.validate()
89 | assert is_valid is True
90 |
91 |
92 | @pytest.mark.asyncio
93 | async def test_base_manager(tmp_path):
94 | """Test BaseManager."""
95 | config_file = tmp_path / "config.json"
96 | manager = ConcreteManager(config_file)
97 |
98 | assert manager.config_path == config_file
99 | assert manager.components == {}
100 |
101 | # Test load config
102 | await manager.load_config()
103 | assert manager.config_loaded is True
104 |
105 | # Test component registration
106 | component = ConcreteComponent("comp1", {})
107 | await manager.register_component(component)
108 |
109 | # Test get component
110 | retrieved = await manager.get_component("comp1")
111 | assert retrieved is component
112 |
113 | # Test list components
114 | components = await manager.list_components()
115 | assert components == ["comp1"]
116 |
117 |
118 | @pytest.mark.asyncio
119 | async def test_base_orchestrator():
120 | """Test BaseOrchestrator."""
121 | class MockConfig:
122 | name = "test-orchestrator"
123 |
124 | config = MockConfig()
125 | orchestrator = ConcreteOrchestrator(config)
126 |
127 | assert orchestrator.config is config
128 | assert orchestrator.name == "test-orchestrator"
129 |
130 | await orchestrator.initialize()
131 | assert orchestrator.initialized is True
132 |
133 | await orchestrator.cleanup()
134 | assert orchestrator.cleaned_up is True
135 |
136 |
137 | def test_base_component_without_config():
138 | """Test BaseComponent without config."""
139 | component = ConcreteComponent("test")
140 | assert component.name == "test"
141 | assert component.config == {}
142 |
143 |
144 | def test_abstract_methods():
145 | """Test that abstract methods must be implemented."""
146 | # BaseComponent requires initialize and cleanup
147 | with pytest.raises(TypeError):
148 | class IncompleteComponent(BaseComponent):
149 | async def initialize(self):
150 | pass
151 | # Missing cleanup
152 | IncompleteComponent("test")
153 |
154 | # BaseTemplate requires render and validate
155 | with pytest.raises(TypeError):
156 | class IncompleteTemplate(BaseTemplate):
157 | async def render(self, context):
158 | pass
159 | # Missing validate
160 | IncompleteTemplate(Path("test"))
161 |
```
--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/conanfile.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Conan package for mcp-project-orchestrator/openssl
3 |
4 | This package provides Cursor configuration management for OpenSSL development,
5 | similar to how Conan manages build profiles.
6 | """
7 |
8 | from conan import ConanFile
9 | from conan.tools.files import copy, get
10 | from conan.tools.layout import basic_layout
11 | import os
12 |
13 |
14 | class MCPProjectOrchestratorOpenSSLConan(ConanFile):
15 | name = "mcp-project-orchestrator-openssl"
16 | version = "0.1.0"
17 | description = "Cursor configuration management for OpenSSL development"
18 | license = "MIT"
19 | url = "https://github.com/sparesparrow/mcp-project-orchestrator"
20 | homepage = "https://github.com/sparesparrow/mcp-project-orchestrator"
21 | topics = ("openssl", "cursor", "ide", "configuration", "management", "conan", "build", "profiles")
22 | package_type = "python-require"
23 | settings = "os", "arch", "compiler", "build_type"
24 | options = {
25 | "with_cursor": [True, False],
26 | "cursor_opt_out": [True, False],
27 | }
28 | default_options = {
29 | "with_cursor": True,
30 | "cursor_opt_out": False,
31 | }
32 |
33 | def configure(self):
34 | """Configure the package."""
35 | # This is a Python package, not a C++ library
36 | self.settings.rm_safe("compiler")
37 | self.settings.rm_safe("build_type")
38 | self.settings.rm_safe("arch")
39 |
40 | def layout(self):
41 | """Set up the package layout."""
42 | basic_layout(self)
43 |
44 | def requirements(self):
45 | """Define package requirements."""
46 | self.requires("python_requires/click/8.0.0")
47 | self.requires("python_requires/jinja2/3.0.0")
48 |
49 | def build_requirements(self):
50 | """Define build requirements."""
51 | if self.options.with_cursor:
52 | self.build_requires("python_requires/click/8.0.0")
53 | self.build_requires("python_requires/jinja2/3.0.0")
54 |
55 | def source(self):
56 | """Download source code."""
57 | # This package contains only Python code and templates
58 | # No external source download needed
59 | pass
60 |
61 | def build(self):
62 | """Build the package."""
63 | # This is a Python package, no compilation needed
64 | pass
65 |
66 | def package(self):
67 | """Package the files."""
68 | # Copy Python package
69 | copy(self, "mcp_orchestrator/*", src=self.source_folder, dst=os.path.join(self.package_folder, "mcp_orchestrator"))
70 |
71 | # Copy cursor-rules templates
72 | copy(self, "cursor-rules/**/*", src=self.source_folder, dst=os.path.join(self.package_folder, "cursor-rules"))
73 |
74 | # Copy configuration files
75 | copy(self, "pyproject.toml", src=self.source_folder, dst=self.package_folder)
76 | copy(self, "setup.py", src=self.source_folder, dst=self.package_folder)
77 | copy(self, "requirements.txt", src=self.source_folder, dst=self.package_folder)
78 |
79 | def package_info(self):
80 | """Define package information."""
81 | # Set Python path
82 | self.cpp_info.bindirs = []
83 | self.cpp_info.libdirs = []
84 | self.cpp_info.includedirs = []
85 |
86 | # Set Python package path
87 | self.env_info.PYTHONPATH.append(os.path.join(self.package_folder, "mcp_orchestrator"))
88 |
89 | # Set cursor-rules path
90 | self.env_info.CURSOR_RULES_PATH = os.path.join(self.package_folder, "cursor-rules")
91 |
92 | # Set package options
93 | self.env_info.MCP_ORCHESTRATOR_WITH_CURSOR = str(self.options.with_cursor)
94 | self.env_info.MCP_ORCHESTRATOR_CURSOR_OPT_OUT = str(self.options.cursor_opt_out)
95 |
96 | def deploy(self):
97 | """Deploy the package."""
98 | # Copy Python package to destination
99 | copy(self, "mcp_orchestrator/*", src=self.package_folder, dst=self.build_folder)
100 |
101 | # Copy cursor-rules templates
102 | copy(self, "cursor-rules/**/*", src=self.package_folder, dst=self.build_folder)
103 |
104 | # Copy configuration files
105 | copy(self, "pyproject.toml", src=self.package_folder, dst=self.build_folder)
106 | copy(self, "setup.py", src=self.package_folder, dst=self.build_folder)
107 | copy(self, "requirements.txt", src=self.package_folder, dst=self.build_folder)
108 |
109 | def package_id(self):
110 | """Customize package ID."""
111 | # Include options in package ID
112 | self.info.options.with_cursor = self.options.with_cursor
113 | self.info.options.cursor_opt_out = self.options.cursor_opt_out
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/development/Monorepo_Migration_and_Code_Organization_Guide.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "Monorepo Migration and Code Organization Guide",
3 | "description": "A template for guiding the migration of code into a monorepo structure with best practices for TypeScript interfaces, Docker configuration, and CI/CD workflows",
4 | "type": "prompt",
5 | "category": "development",
6 | "content": "# Monorepo Migration and Code Organization Guide for {{project_name}}\n\n## Overview\n\nThis guide outlines the process for migrating {{project_type}} codebases into a monorepo structure while adhering to best practices for code organization, interface consolidation, containerization, and CI/CD workflows.\n\n## Interface Consolidation\n\n### TypeScript Interfaces Unification\n\n1. Create a centralized interfaces directory:\n ```bash\n mkdir -p src/interfaces\n ```\n\n2. Consolidate related interfaces into a single file to reduce fragmentation:\n - Group interfaces by domain/purpose\n - Maintain consistent naming conventions\n - Document each interface with JSDoc comments\n - Export all interfaces from a single entry point `index.ts`\n\n3. Example unified interface structure:\n ```typescript\n /**\n * Core domain interfaces\n */\n export interface {{primary_interface_name}} {\n id: string;\n name: string;\n // Additional properties...\n }\n\n /**\n * Service interfaces\n */\n export interface {{service_interface_name}} {\n // Service methods...\n }\n\n /**\n * Storage adapters\n */\n export interface StorageAdapter {\n // Storage operations...\n }\n ```\n\n## Docker Configuration\n\n### Dockerfile Best Practices\n\n1. Use multi-stage builds for better efficiency:\n ```dockerfile\n # Build stage\n FROM node:{{node_version}}-alpine AS build\n WORKDIR /app\n COPY package*.json ./\n RUN npm ci\n COPY . .\n RUN npm run build\n\n # Production stage\n FROM node:{{node_version}}-alpine\n WORKDIR /app\n COPY --from=build /app/build ./build\n # Additional configuration...\n ```\n\n2. Set appropriate environment variables\n3. Use non-root users for security\n4. Implement health checks\n5. Add proper LABEL metadata\n6. Configure volumes for persistent data\n\n### Docker Compose\n\n1. Base configuration for core functionality:\n ```yaml\n services:\n {{service_name}}:\n build: .\n volumes:\n - ./data:/app/data\n environment:\n - NODE_ENV=production\n # Additional environment variables...\n ```\n\n2. Extended configurations for additional functionality (database, etc.)\n3. Development-specific configurations\n\n## GitHub Workflows\n\n### Essential CI/CD Workflows\n\n1. Main CI workflow for testing and linting\n2. Build and publish workflow for releases\n3. Containerized testing workflow\n\n### Workflow Structure\n\n```yaml\nname: {{workflow_name}}\n\non:\n push:\n branches: [main]\n pull_request:\n branches: [main]\n\njobs:\n test:\n runs-on: ubuntu-latest\n # Job configuration...\n\n build:\n needs: [test]\n # Build configuration...\n```\n\n## Containerized Testing\n\nImplement containerized testing to ensure consistent environments:\n\n1. Create test-specific Dockerfiles\n2. Set up Docker networks for integrated tests\n3. Use Docker Compose for multi-container testing scenarios\n4. Implement proper cleanup procedures\n\n## DevContainer Configuration\n\nProvide consistent development environments:\n\n```json\n{\n \"name\": \"{{project_name}} Dev Environment\",\n \"build\": {\n \"dockerfile\": \"../Dockerfile\",\n \"context\": \"..\"\n },\n \"customizations\": {\n \"vscode\": {\n \"extensions\": [\n \"dbaeumer.vscode-eslint\",\n \"esbenp.prettier-vscode\"\n // Additional extensions...\n ]\n }\n }\n}\n```\n\n## Implementation Strategy\n\n1. Create a feature branch for interface consolidation\n2. Migrate interfaces in stages, testing thoroughly\n3. Add Docker and CI configurations\n4. Validate with containerized tests\n5. Create comprehensive documentation\n\n## Technical Considerations\n\n{{technical_considerations}}\n",
7 | "variables": [
8 | "project_name",
9 | "project_type",
10 | "primary_interface_name",
11 | "service_interface_name",
12 | "node_version",
13 | "service_name",
14 | "workflow_name",
15 | "technical_considerations"
16 | ],
17 | "metadata": {
18 | "source": "/home/sparrow/projects/mcp-prompts/prompts/monorepo-migration-guide.json",
19 | "imported": true
20 | }
21 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/Monorepo_Migration_and_Code_Organization_Guide.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "Monorepo Migration and Code Organization Guide",
3 | "description": "A template for guiding the migration of code into a monorepo structure with best practices for TypeScript interfaces, Docker configuration, and CI/CD workflows",
4 | "type": "prompt",
5 | "category": "development",
6 | "content": "# Monorepo Migration and Code Organization Guide for {{project_name}}\n\n## Overview\n\nThis guide outlines the process for migrating {{project_type}} codebases into a monorepo structure while adhering to best practices for code organization, interface consolidation, containerization, and CI/CD workflows.\n\n## Interface Consolidation\n\n### TypeScript Interfaces Unification\n\n1. Create a centralized interfaces directory:\n ```bash\n mkdir -p src/interfaces\n ```\n\n2. Consolidate related interfaces into a single file to reduce fragmentation:\n - Group interfaces by domain/purpose\n - Maintain consistent naming conventions\n - Document each interface with JSDoc comments\n - Export all interfaces from a single entry point `index.ts`\n\n3. Example unified interface structure:\n ```typescript\n /**\n * Core domain interfaces\n */\n export interface {{primary_interface_name}} {\n id: string;\n name: string;\n // Additional properties...\n }\n\n /**\n * Service interfaces\n */\n export interface {{service_interface_name}} {\n // Service methods...\n }\n\n /**\n * Storage adapters\n */\n export interface StorageAdapter {\n // Storage operations...\n }\n ```\n\n## Docker Configuration\n\n### Dockerfile Best Practices\n\n1. Use multi-stage builds for better efficiency:\n ```dockerfile\n # Build stage\n FROM node:{{node_version}}-alpine AS build\n WORKDIR /app\n COPY package*.json ./\n RUN npm ci\n COPY . .\n RUN npm run build\n\n # Production stage\n FROM node:{{node_version}}-alpine\n WORKDIR /app\n COPY --from=build /app/build ./build\n # Additional configuration...\n ```\n\n2. Set appropriate environment variables\n3. Use non-root users for security\n4. Implement health checks\n5. Add proper LABEL metadata\n6. Configure volumes for persistent data\n\n### Docker Compose\n\n1. Base configuration for core functionality:\n ```yaml\n services:\n {{service_name}}:\n build: .\n volumes:\n - ./data:/app/data\n environment:\n - NODE_ENV=production\n # Additional environment variables...\n ```\n\n2. Extended configurations for additional functionality (database, etc.)\n3. Development-specific configurations\n\n## GitHub Workflows\n\n### Essential CI/CD Workflows\n\n1. Main CI workflow for testing and linting\n2. Build and publish workflow for releases\n3. Containerized testing workflow\n\n### Workflow Structure\n\n```yaml\nname: {{workflow_name}}\n\non:\n push:\n branches: [main]\n pull_request:\n branches: [main]\n\njobs:\n test:\n runs-on: ubuntu-latest\n # Job configuration...\n\n build:\n needs: [test]\n # Build configuration...\n```\n\n## Containerized Testing\n\nImplement containerized testing to ensure consistent environments:\n\n1. Create test-specific Dockerfiles\n2. Set up Docker networks for integrated tests\n3. Use Docker Compose for multi-container testing scenarios\n4. Implement proper cleanup procedures\n\n## DevContainer Configuration\n\nProvide consistent development environments:\n\n```json\n{\n \"name\": \"{{project_name}} Dev Environment\",\n \"build\": {\n \"dockerfile\": \"../Dockerfile\",\n \"context\": \"..\"\n },\n \"customizations\": {\n \"vscode\": {\n \"extensions\": [\n \"dbaeumer.vscode-eslint\",\n \"esbenp.prettier-vscode\"\n // Additional extensions...\n ]\n }\n }\n}\n```\n\n## Implementation Strategy\n\n1. Create a feature branch for interface consolidation\n2. Migrate interfaces in stages, testing thoroughly\n3. Add Docker and CI configurations\n4. Validate with containerized tests\n5. Create comprehensive documentation\n\n## Technical Considerations\n\n{{technical_considerations}}\n",
7 | "variables": [
8 | "project_name",
9 | "project_type",
10 | "primary_interface_name",
11 | "service_interface_name",
12 | "node_version",
13 | "service_name",
14 | "workflow_name",
15 | "technical_considerations"
16 | ],
17 | "metadata": {
18 | "source": "/home/sparrow/projects/mcp-prompts/prompts/monorepo-migration-guide.json",
19 | "imported": true
20 | }
21 | }
```
--------------------------------------------------------------------------------
/tests/test_prompts.py:
--------------------------------------------------------------------------------
```python
1 | """Tests for the prompt management system."""
2 |
3 | import pytest
4 | from pathlib import Path
5 |
6 | from mcp_project_orchestrator.prompt_manager import (
7 | PromptManager,
8 | PromptTemplate,
9 | PromptCategory,
10 | PromptMetadata,
11 | )
12 |
13 | def test_prompt_metadata():
14 | """Test prompt metadata creation and conversion."""
15 | metadata = PromptMetadata(
16 | name="test-prompt",
17 | description="Test prompt",
18 | category=PromptCategory.SYSTEM,
19 | version="1.0.0",
20 | author="Test Author",
21 | tags=["test", "system"],
22 | variables={"var1": "desc1", "var2": "desc2"},
23 | )
24 |
25 | # Test to_dict
26 | data = metadata.to_dict()
27 | assert data["name"] == "test-prompt"
28 | assert data["category"] == "system"
29 |
30 | # Test from_dict
31 | new_metadata = PromptMetadata.from_dict(data)
32 | assert new_metadata.name == metadata.name
33 | assert new_metadata.category == metadata.category
34 |
35 | def test_prompt_template():
36 | """Test prompt template creation and rendering."""
37 | template = PromptTemplate(
38 | metadata=PromptMetadata(
39 | name="test-prompt",
40 | description="Test prompt",
41 | category=PromptCategory.SYSTEM,
42 | ),
43 | content="Hello {{ name }}! Welcome to {{ project }}.",
44 | )
45 |
46 | # Test variable substitution
47 | rendered = template.render({
48 | "name": "User",
49 | "project": "MCP",
50 | })
51 | assert rendered == "Hello User! Welcome to MCP."
52 |
53 | # Test missing variable
54 | with pytest.raises(KeyError):
55 | template.render({"name": "User"})
56 |
57 | def test_prompt_manager(prompt_manager, temp_dir):
58 | """Test prompt manager functionality."""
59 | # Create test prompt
60 | prompt_dir = temp_dir / "prompts"
61 | prompt_dir.mkdir(parents=True, exist_ok=True)
62 |
63 | prompt_file = prompt_dir / "test-prompt.json"
64 | prompt_file.write_text("""
65 | {
66 | "metadata": {
67 | "name": "test-prompt",
68 | "description": "Test prompt",
69 | "category": "system",
70 | "version": "1.0.0",
71 | "author": "Test Author",
72 | "tags": ["test", "system"],
73 | "variables": {
74 | "name": "User name",
75 | "project": "Project name"
76 | }
77 | },
78 | "content": "Hello {{ name }}! Welcome to {{ project }}."
79 | }
80 | """)
81 |
82 | # Load prompts
83 | prompt_manager.discover_prompts()
84 |
85 | # List prompts
86 | all_prompts = prompt_manager.list_prompts()
87 | assert "test-prompt" in all_prompts
88 |
89 | # Get specific prompts
90 | system_prompts = prompt_manager.list_prompts(PromptCategory.SYSTEM)
91 | assert "test-prompt" in system_prompts
92 |
93 | # Get prompt
94 | prompt = prompt_manager.get_prompt("test-prompt")
95 | assert prompt is not None
96 | assert prompt.metadata.name == "test-prompt"
97 | assert prompt.metadata.category == PromptCategory.SYSTEM
98 |
99 | # Render prompt
100 | rendered = prompt_manager.render_prompt("test-prompt", {
101 | "name": "User",
102 | "project": "MCP",
103 | })
104 | assert rendered == "Hello User! Welcome to MCP."
105 |
106 | def test_prompt_validation(prompt_manager):
107 | """Test prompt validation."""
108 | # Invalid prompt (missing required fields)
109 | metadata = PromptMetadata(
110 | name="invalid-prompt",
111 | description="Invalid prompt",
112 | category=PromptCategory.SYSTEM,
113 | )
114 | template = PromptTemplate(metadata=metadata, content="")
115 | assert not template.validate()
116 |
117 | # Valid prompt
118 | metadata = PromptMetadata(
119 | name="valid-prompt",
120 | description="Valid prompt",
121 | category=PromptCategory.SYSTEM,
122 | version="1.0.0",
123 | author="Test Author",
124 | tags=["test"],
125 | variables={"var1": "desc1"},
126 | )
127 | template = PromptTemplate(metadata=metadata, content="Test {{ var1 }}")
128 | assert template.validate()
129 |
130 | def test_prompt_save_load(prompt_manager, temp_dir):
131 | """Test saving and loading prompts."""
132 | # Create prompt
133 | metadata = PromptMetadata(
134 | name="save-test",
135 | description="Save test prompt",
136 | category=PromptCategory.SYSTEM,
137 | version="1.0.0",
138 | author="Test Author",
139 | tags=["test"],
140 | variables={"var1": "desc1"},
141 | )
142 | template = PromptTemplate(metadata=metadata, content="Test {{ var1 }}")
143 |
144 | # Save prompt
145 | prompt_manager.save_prompt(template)
146 |
147 | # Load prompt
148 | loaded = prompt_manager.get_prompt("save-test")
149 | assert loaded is not None
150 | assert loaded.metadata.name == template.metadata.name
151 | assert loaded.content == template.content
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/mcp-py/mcp-server.py:
--------------------------------------------------------------------------------
```python
1 | # server.py
2 | import asyncio
3 | import json
4 | import logging
5 | import os
6 | from typing import Any, Dict, List, Optional
7 |
8 | from fastapi import FastAPI, Request, Response
9 | from fastapi.responses import JSONResponse
10 | from sse_starlette.sse import EventSourceResponse
11 | import uvicorn
12 |
13 | from mcp.server import Server, NotificationOptions
14 | from mcp.server.models import InitializationOptions
15 | from mcp.server.stdio import stdio_server
16 | from mcp.server.sse import SseServerTransport
17 | import mcp.types as types
18 |
19 | # Configure logging
20 | logging.basicConfig(level=logging.INFO)
21 | logger = logging.getLogger(__name__)
22 |
23 | class MCPToolServer(Server):
24 | def __init__(self):
25 | super().__init__("tool-server")
26 | self.tools: List[Dict[str, Any]] = []
27 | self.load_tools()
28 |
29 | def load_tools(self):
30 | """Load tool definitions from tools.json"""
31 | try:
32 | with open("tools.json", "r") as f:
33 | self.tools = json.load(f)
34 | except Exception as e:
35 | logger.error(f"Failed to load tools.json: {e}")
36 | self.tools = []
37 |
38 | async def handle_list_tools(self) -> List[types.Tool]:
39 | """Handle tools/list request"""
40 | return [
41 | types.Tool(
42 | name=tool["name"],
43 | description=tool.get("description", ""),
44 | inputSchema=tool["input_schema"]
45 | )
46 | for tool in self.tools
47 | ]
48 |
49 | async def handle_call_tool(self, name: str, arguments: Optional[Dict[str, Any]] = None) -> List[types.TextContent]:
50 | """Handle tools/call request"""
51 | # Find the requested tool
52 | tool = next((t for t in self.tools if t["name"] == name), None)
53 | if not tool:
54 | raise ValueError(f"Tool not found: {name}")
55 |
56 | # Here you would implement the actual tool execution logic
57 | # For now, we'll just echo back the call details
58 | result = f"Called tool {name} with arguments: {json.dumps(arguments or {})}"
59 |
60 | return [types.TextContent(type="text", text=result)]
61 |
62 | class TransportManager:
63 | """Manages different transport types for the MCP server"""
64 | def __init__(self, server: MCPToolServer):
65 | self.server = server
66 | self.app = FastAPI()
67 | self.setup_routes()
68 |
69 | def setup_routes(self):
70 | """Set up FastAPI routes for SSE and HTTP endpoints"""
71 | @self.app.get("/sse")
72 | async def sse_endpoint(request: Request):
73 | transport = SseServerTransport("/message")
74 | return EventSourceResponse(self.handle_sse(transport, request))
75 |
76 | @self.app.post("/message")
77 | async def message_endpoint(request: Request):
78 | message = await request.json()
79 | # Handle incoming messages for SSE transport
80 | return JSONResponse({"status": "ok"})
81 |
82 | @self.app.post("/tools/call/{tool_name}")
83 | async def call_tool(tool_name: str, request: Request):
84 | arguments = await request.json()
85 | result = await self.server.handle_call_tool(tool_name, arguments)
86 | return JSONResponse({"result": result})
87 |
88 | @self.app.get("/tools")
89 | async def list_tools():
90 | tools = await self.server.handle_list_tools()
91 | return JSONResponse({"tools": [t.dict() for t in tools]})
92 |
93 | async def handle_sse(self, transport, request):
94 | """Handle SSE connection"""
95 | async with transport.connect_sse(request.scope, request.receive, request.send) as streams:
96 | await self.server.run(
97 | streams[0],
98 | streams[1],
99 | self.server.create_initialization_options()
100 | )
101 |
102 | async def run_stdio(self):
103 | """Run server with stdio transport"""
104 | async with stdio_server() as (read_stream, write_stream):
105 | await self.server.run(
106 | read_stream,
107 | write_stream,
108 | self.server.create_initialization_options()
109 | )
110 |
111 | async def main():
112 | # Create server and transport manager
113 | server = MCPToolServer()
114 | transport_mgr = TransportManager(server)
115 |
116 | # Determine transport type from environment
117 | transport_type = os.environ.get("MCP_TRANSPORT", "stdio")
118 |
119 | if transport_type == "stdio":
120 | await transport_mgr.run_stdio()
121 | else:
122 | # Run HTTP/SSE server
123 | port = int(os.environ.get("MCP_PORT", 8000))
124 | config = uvicorn.Config(transport_mgr.app, host="0.0.0.0", port=port)
125 | server = uvicorn.Server(config)
126 | await server.serve()
127 |
128 | if __name__ == "__main__":
129 | asyncio.run(main())
130 |
```
--------------------------------------------------------------------------------
/data/prompts/templates/monorepo-migration-guide.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "id": "monorepo-migration-guide",
3 | "name": "Monorepo Migration and Code Organization Guide",
4 | "description": "A template for guiding the migration of code into a monorepo structure with best practices for TypeScript interfaces, Docker configuration, and CI/CD workflows",
5 | "content": "# Monorepo Migration and Code Organization Guide for {{project_name}}\n\n## Overview\n\nThis guide outlines the process for migrating {{project_type}} codebases into a monorepo structure while adhering to best practices for code organization, interface consolidation, containerization, and CI/CD workflows.\n\n## Interface Consolidation\n\n### TypeScript Interfaces Unification\n\n1. Create a centralized interfaces directory:\n ```bash\n mkdir -p src/interfaces\n ```\n\n2. Consolidate related interfaces into a single file to reduce fragmentation:\n - Group interfaces by domain/purpose\n - Maintain consistent naming conventions\n - Document each interface with JSDoc comments\n - Export all interfaces from a single entry point `index.ts`\n\n3. Example unified interface structure:\n ```typescript\n /**\n * Core domain interfaces\n */\n export interface {{primary_interface_name}} {\n id: string;\n name: string;\n // Additional properties...\n }\n\n /**\n * Service interfaces\n */\n export interface {{service_interface_name}} {\n // Service methods...\n }\n\n /**\n * Storage adapters\n */\n export interface StorageAdapter {\n // Storage operations...\n }\n ```\n\n## Docker Configuration\n\n### Dockerfile Best Practices\n\n1. Use multi-stage builds for better efficiency:\n ```dockerfile\n # Build stage\n FROM node:{{node_version}}-alpine AS build\n WORKDIR /app\n COPY package*.json ./\n RUN npm ci\n COPY . .\n RUN npm run build\n\n # Production stage\n FROM node:{{node_version}}-alpine\n WORKDIR /app\n COPY --from=build /app/build ./build\n # Additional configuration...\n ```\n\n2. Set appropriate environment variables\n3. Use non-root users for security\n4. Implement health checks\n5. Add proper LABEL metadata\n6. Configure volumes for persistent data\n\n### Docker Compose\n\n1. Base configuration for core functionality:\n ```yaml\n services:\n {{service_name}}:\n build: .\n volumes:\n - ./data:/app/data\n environment:\n - NODE_ENV=production\n # Additional environment variables...\n ```\n\n2. Extended configurations for additional functionality (database, etc.)\n3. Development-specific configurations\n\n## GitHub Workflows\n\n### Essential CI/CD Workflows\n\n1. Main CI workflow for testing and linting\n2. Build and publish workflow for releases\n3. Containerized testing workflow\n\n### Workflow Structure\n\n```yaml\nname: {{workflow_name}}\n\non:\n push:\n branches: [main]\n pull_request:\n branches: [main]\n\njobs:\n test:\n runs-on: ubuntu-latest\n # Job configuration...\n\n build:\n needs: [test]\n # Build configuration...\n```\n\n## Containerized Testing\n\nImplement containerized testing to ensure consistent environments:\n\n1. Create test-specific Dockerfiles\n2. Set up Docker networks for integrated tests\n3. Use Docker Compose for multi-container testing scenarios\n4. Implement proper cleanup procedures\n\n## DevContainer Configuration\n\nProvide consistent development environments:\n\n```json\n{\n \"name\": \"{{project_name}} Dev Environment\",\n \"build\": {\n \"dockerfile\": \"../Dockerfile\",\n \"context\": \"..\"\n },\n \"customizations\": {\n \"vscode\": {\n \"extensions\": [\n \"dbaeumer.vscode-eslint\",\n \"esbenp.prettier-vscode\"\n // Additional extensions...\n ]\n }\n }\n}\n```\n\n## Implementation Strategy\n\n1. Create a feature branch for interface consolidation\n2. Migrate interfaces in stages, testing thoroughly\n3. Add Docker and CI configurations\n4. Validate with containerized tests\n5. Create comprehensive documentation\n\n## Technical Considerations\n\n{{technical_considerations}}\n",
6 | "isTemplate": true,
7 | "variables": [
8 | "project_name",
9 | "project_type",
10 | "primary_interface_name",
11 | "service_interface_name",
12 | "node_version",
13 | "service_name",
14 | "workflow_name",
15 | "technical_considerations"
16 | ],
17 | "tags": [
18 | "development",
19 | "monorepo",
20 | "typescript",
21 | "docker",
22 | "ci-cd",
23 | "migration"
24 | ],
25 | "category": "development",
26 | "createdAt": "2024-08-08T15:30:00.000Z",
27 | "updatedAt": "2024-08-08T15:30:00.000Z",
28 | "version": 1
29 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/mcp-py/mcp-client.py:
--------------------------------------------------------------------------------
```python
1 | # client.py
2 | import asyncio
3 | import json
4 | from typing import Any, Dict, List, Optional
5 | import os
6 | import httpx
7 | from urllib.parse import urljoin
8 |
9 | from mcp import ClientSession, StdioServerParameters
10 | from mcp.client.stdio import stdio_client
11 | import mcp.types as types
12 |
13 | class MCPClient:
14 | """A flexible MCP client that supports both stdio and HTTP/SSE transports"""
15 |
16 | def __init__(self, transport_type: str = "stdio", server_url: Optional[str] = None):
17 | self.transport_type = transport_type
18 | self.server_url = server_url or "http://localhost:8000"
19 | self.session: Optional[ClientSession] = None
20 | self.http_client = httpx.AsyncClient()
21 |
22 | async def connect_stdio(self, server_command: str, server_args: Optional[List[str]] = None):
23 | """Connect using stdio transport"""
24 | params = StdioServerParameters(
25 | command=server_command,
26 | args=server_args or [],
27 | env=None
28 | )
29 |
30 | streams = await stdio_client(params).__aenter__()
31 | self.session = await ClientSession(streams[0], streams[1]).__aenter__()
32 | await self.session.initialize()
33 |
34 | async def connect_http(self):
35 | """Connect using HTTP transport"""
36 | # For HTTP transport, we don't need to maintain a persistent connection
37 | # We'll just make HTTP requests as needed
38 | pass
39 |
40 | async def list_tools(self) -> List[Dict[str, Any]]:
41 | """List available tools"""
42 | if self.transport_type == "stdio":
43 | if not self.session:
44 | raise RuntimeError("Not connected")
45 | response = await self.session.list_tools()
46 | return [tool.dict() for tool in response.tools]
47 | else:
48 | # Use HTTP endpoint
49 | async with httpx.AsyncClient() as client:
50 | response = await client.get(urljoin(self.server_url, "/tools"))
51 | response.raise_for_status()
52 | return response.json()["tools"]
53 |
54 | async def call_tool(self, tool_name: str, arguments: Optional[Dict[str, Any]] = None) -> Any:
55 | """Call a specific tool"""
56 | if self.transport_type == "stdio":
57 | if not self.session:
58 | raise RuntimeError("Not connected")
59 | result = await self.session.call_tool(tool_name, arguments or {})
60 | return result
61 | else:
62 | # Use HTTP endpoint
63 | async with httpx.AsyncClient() as client:
64 | response = await client.post(
65 | urljoin(self.server_url, f"/tools/call/{tool_name}"),
66 | json=arguments or {}
67 | )
68 | response.raise_for_status()
69 | return response.json()["result"]
70 |
71 | async def close(self):
72 | """Clean up resources"""
73 | if self.session:
74 | await self.session.__aexit__(None, None, None)
75 | await self.http_client.aclose()
76 |
77 | class MCPClientCLI:
78 | """Command-line interface for the MCP client"""
79 |
80 | def __init__(self):
81 | self.transport_type = os.environ.get("MCP_TRANSPORT", "stdio")
82 | self.server_url = os.environ.get("MCP_SERVER_URL", "http://localhost:8000")
83 | self.client = MCPClient(self.transport_type, self.server_url)
84 |
85 | async def run(self):
86 | """Run the CLI"""
87 | try:
88 | if self.transport_type == "stdio":
89 | await self.client.connect_stdio("python", ["server.py"])
90 | else:
91 | await self.client.connect_http()
92 |
93 | while True:
94 | command = input("\nEnter command (list_tools/call_tool/quit): ").strip()
95 |
96 | if command == "quit":
97 | break
98 | elif command == "list_tools":
99 | tools = await self.client.list_tools()
100 | print("\nAvailable tools:")
101 | for tool in tools:
102 | print(f"- {tool['name']}: {tool['description']}")
103 | elif command == "call_tool":
104 | tool_name = input("Enter tool name: ").strip()
105 | args_str = input("Enter arguments as JSON (or empty): ").strip()
106 | arguments = json.loads(args_str) if args_str else {}
107 |
108 | result = await self.client.call_tool(tool_name, arguments)
109 | print("\nResult:", result)
110 | else:
111 | print("Unknown command")
112 |
113 | finally:
114 | await self.client.close()
115 |
116 | async def main():
117 | cli = MCPClientCLI()
118 | await cli.run()
119 |
120 | if __name__ == "__main__":
121 | asyncio.run(main())
122 |
```
--------------------------------------------------------------------------------
/data/prompts/templates/advanced-multi-server-template.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "id": "advanced-multi-server-template",
3 | "name": "Advanced Multi-Server Integration Template",
4 | "description": "A comprehensive template that coordinates multiple MCP servers for complex tasks requiring diverse capabilities",
5 | "content": "# Advanced Multi-Server Assistant\n\nYou are an advanced AI assistant with access to multiple specialized MCP servers that significantly enhance your capabilities. Your task is to help with {{primary_task}} by coordinating these diverse tools and resources effectively.\n\n## Available MCP Servers and Capabilities\n\n### Core Resources and Data Access\n- **filesystem**: Access files and directories on the local system\n - Use for: examining code, reading configuration files, accessing project documentation\n- **github**: Interact with repositories, issues, pull requests, and code on GitHub\n - Use for: code exploration, commit history analysis, repository management\n- **postgres**: Execute SQL queries and interact with database content\n - Use for: data analysis, schema exploration, complex data retrieval\n\n### Knowledge Management\n- **prompts**: Access and apply specialized templates for different tasks\n - Use for: structured workflows, consistent outputs, domain-specific prompting\n- **memory**: Store and retrieve key information across conversation sessions\n - Use for: retaining context, tracking progress on multi-step tasks\n\n### Enhanced Reasoning\n- **sequential-thinking**: Break down complex problems into logical steps\n - Use for: multi-step reasoning, maintaining clarity in complex analyses\n- **mcp-compass**: Navigate between different capabilities with strategic direction\n - Use for: orchestrating complex workflows involving multiple servers\n\n### Specialized Capabilities\n- **puppeteer**: Automate browser interactions and web scraping\n - Use for: testing web applications, extracting data from websites\n- **elevenlabs**: Convert text to realistic speech\n - Use for: creating audio versions of content, accessibility enhancements\n- **brave-search**: Perform web searches for up-to-date information\n - Use for: research, finding relevant resources, staying current\n\n## Integration Strategy\n\nI will coordinate these capabilities based on your needs by:\n1. **Understanding the primary goal** of {{primary_task}}\n2. **Identifying which MCP servers** are most relevant for this task\n3. **Creating a workflow** that efficiently combines their capabilities\n4. **Executing tasks** in an optimal sequence\n5. **Synthesizing results** into a comprehensive response\n\n## Specialized Task Approach\n\nFor your specific task in {{domain_expertise}}, I'll focus on using:\n- {{primary_server_1}}\n- {{primary_server_2}}\n- {{primary_server_3}}\n\nAdditional servers may be utilized as needed based on our conversation.\n\n## Guiding Principles\n\n- I'll prioritize {{priority_principle}} in my approach\n- I'll maintain awareness of {{ethical_consideration}} throughout our interaction\n- I'll structure my responses to emphasize {{output_focus}}\n\nLet's begin by clarifying your specific needs for {{primary_task}} and how I can best leverage these MCP servers to assist you.",
6 | "variables": [
7 | "primary_task",
8 | "domain_expertise",
9 | "primary_server_1",
10 | "primary_server_2",
11 | "primary_server_3",
12 | "priority_principle",
13 | "ethical_consideration",
14 | "output_focus"
15 | ],
16 | "examples": [
17 | {
18 | "name": "Code Repository Analysis",
19 | "values": {
20 | "primary_task": "analyzing a GitHub repository structure and suggesting improvements",
21 | "domain_expertise": "software architecture",
22 | "primary_server_1": "github",
23 | "primary_server_2": "filesystem",
24 | "primary_server_3": "sequential-thinking",
25 | "priority_principle": "maintainability and clarity",
26 | "ethical_consideration": "respecting original code design intentions",
27 | "output_focus": "actionable recommendations with examples"
28 | }
29 | },
30 | {
31 | "name": "Data Analysis Project",
32 | "values": {
33 | "primary_task": "exploring a database and generating insights about customer behavior",
34 | "domain_expertise": "data analytics",
35 | "primary_server_1": "postgres",
36 | "primary_server_2": "sequential-thinking",
37 | "primary_server_3": "memory",
38 | "priority_principle": "finding meaningful patterns in complex data",
39 | "ethical_consideration": "privacy and data protection concerns",
40 | "output_focus": "visualizable insights and business recommendations"
41 | }
42 | }
43 | ],
44 | "categories": ["integration", "multi-server", "advanced", "orchestration"]
45 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/prompts/version.py:
--------------------------------------------------------------------------------
```python
1 | """Prompt version implementation for MCP Project Orchestrator.
2 |
3 | This module provides the PromptVersion class that handles version information
4 | for prompts, following semantic versioning principles.
5 | """
6 |
7 | from typing import Dict, Any
8 |
9 |
10 | class PromptVersion:
11 | """Class representing a prompt version."""
12 |
13 | def __init__(self, major: int = 1, minor: int = 0, patch: int = 0):
14 | """Initialize prompt version.
15 |
16 | Args:
17 | major: Major version number
18 | minor: Minor version number
19 | patch: Patch version number
20 | """
21 | self.major = major
22 | self.minor = minor
23 | self.patch = patch
24 |
25 | def bump_major(self) -> None:
26 | """Bump major version number.
27 |
28 | This resets minor and patch numbers to 0.
29 | """
30 | self.major += 1
31 | self.minor = 0
32 | self.patch = 0
33 |
34 | def bump_minor(self) -> None:
35 | """Bump minor version number.
36 |
37 | This resets patch number to 0.
38 | """
39 | self.minor += 1
40 | self.patch = 0
41 |
42 | def bump_patch(self) -> None:
43 | """Bump patch version number."""
44 | self.patch += 1
45 |
46 | def is_compatible_with(self, other: "PromptVersion") -> bool:
47 | """Check if this version is compatible with another version.
48 |
49 | Compatible versions have the same major version number.
50 |
51 | Args:
52 | other: Version to compare with
53 |
54 | Returns:
55 | bool: True if versions are compatible
56 | """
57 | return self.major == other.major
58 |
59 | def is_newer_than(self, other: "PromptVersion") -> bool:
60 | """Check if this version is newer than another version.
61 |
62 | Args:
63 | other: Version to compare with
64 |
65 | Returns:
66 | bool: True if this version is newer
67 | """
68 | if self.major != other.major:
69 | return self.major > other.major
70 | if self.minor != other.minor:
71 | return self.minor > other.minor
72 | return self.patch > other.patch
73 |
74 | def to_dict(self) -> Dict[str, Any]:
75 | """Convert version to dictionary representation.
76 |
77 | Returns:
78 | Dict[str, Any]: Dictionary representation
79 | """
80 | return {
81 | "major": self.major,
82 | "minor": self.minor,
83 | "patch": self.patch,
84 | }
85 |
86 | def __str__(self) -> str:
87 | """Get string representation.
88 |
89 | Returns:
90 | str: String representation in format 'major.minor.patch'
91 | """
92 | return f"{self.major}.{self.minor}.{self.patch}"
93 |
94 | def __repr__(self) -> str:
95 | """Get detailed string representation.
96 |
97 | Returns:
98 | str: Detailed string representation
99 | """
100 | return (
101 | f"PromptVersion(major={self.major}, "
102 | f"minor={self.minor}, "
103 | f"patch={self.patch})"
104 | )
105 |
106 | def __eq__(self, other: object) -> bool:
107 | """Check if versions are equal.
108 |
109 | Args:
110 | other: Version to compare with
111 |
112 | Returns:
113 | bool: True if versions are equal
114 | """
115 | if not isinstance(other, PromptVersion):
116 | return NotImplemented
117 | return (
118 | self.major == other.major
119 | and self.minor == other.minor
120 | and self.patch == other.patch
121 | )
122 |
123 | def __lt__(self, other: "PromptVersion") -> bool:
124 | """Check if this version is less than another version.
125 |
126 | Args:
127 | other: Version to compare with
128 |
129 | Returns:
130 | bool: True if this version is less than other
131 | """
132 | if self.major != other.major:
133 | return self.major < other.major
134 | if self.minor != other.minor:
135 | return self.minor < other.minor
136 | return self.patch < other.patch
137 |
138 | def __le__(self, other: "PromptVersion") -> bool:
139 | """Check if this version is less than or equal to another version.
140 |
141 | Args:
142 | other: Version to compare with
143 |
144 | Returns:
145 | bool: True if this version is less than or equal to other
146 | """
147 | return self < other or self == other
148 |
149 | def __gt__(self, other: "PromptVersion") -> bool:
150 | """Check if this version is greater than another version.
151 |
152 | Args:
153 | other: Version to compare with
154 |
155 | Returns:
156 | bool: True if this version is greater than other
157 | """
158 | return not (self <= other)
159 |
160 | def __ge__(self, other: "PromptVersion") -> bool:
161 | """Check if this version is greater than or equal to another version.
162 |
163 | Args:
164 | other: Version to compare with
165 |
166 | Returns:
167 | bool: True if this version is greater than or equal to other
168 | """
169 | return not (self < other)
```
--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/mcp_orchestrator/platform_detector.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Platform detection utilities for Cursor configuration deployment.
3 |
4 | This module detects the developer's platform and environment to select
5 | appropriate rule templates and configuration settings.
6 | """
7 |
8 | import platform
9 | import os
10 | from pathlib import Path
11 | from typing import Dict, Any
12 | from datetime import datetime
13 |
14 |
15 | class PlatformDetector:
16 | """Detect developer platform and environment for Cursor configuration."""
17 |
18 | def __init__(self):
19 | self._cache: Dict[str, Any] = {}
20 |
21 | def detect_platform(self) -> Dict[str, Any]:
22 | """
23 | Detect developer platform and environment.
24 |
25 | Returns:
26 | Dictionary containing platform information including OS,
27 | version, Python version, CI status, and user details.
28 | """
29 | if self._cache:
30 | return self._cache
31 |
32 | system = platform.system().lower()
33 |
34 | # Detect CI environment
35 | is_ci = os.getenv("CI", "false").lower() == "true"
36 | is_github_actions = os.getenv("GITHUB_ACTIONS", "false").lower() == "true"
37 | is_gitlab_ci = os.getenv("GITLAB_CI", "false").lower() == "true"
38 | is_jenkins = os.getenv("JENKINS_URL") is not None
39 |
40 | # Get user information
41 | user = os.getenv("USER", os.getenv("USERNAME", "developer"))
42 | home = str(Path.home())
43 |
44 | # Detect shell
45 | shell = os.getenv("SHELL", "/bin/bash")
46 | if system == "windows":
47 | shell = os.getenv("COMSPEC", "cmd.exe")
48 |
49 | # Detect development tools
50 | has_git = self._has_command("git")
51 | has_conan = self._has_command("conan")
52 | has_cursor = self._has_command("cursor")
53 |
54 | # Detect Python environment
55 | python_version = platform.python_version()
56 | python_implementation = platform.python_implementation()
57 |
58 | # Detect if running in virtual environment
59 | in_venv = (
60 | hasattr(sys, 'real_prefix') or
61 | (hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix) or
62 | os.getenv("VIRTUAL_ENV") is not None
63 | )
64 |
65 | platform_info = {
66 | "os": system,
67 | "os_version": platform.version(),
68 | "os_release": platform.release(),
69 | "architecture": platform.machine(),
70 | "python_version": python_version,
71 | "python_implementation": python_implementation,
72 | "in_venv": in_venv,
73 | "is_ci": is_ci,
74 | "is_github_actions": is_github_actions,
75 | "is_gitlab_ci": is_gitlab_ci,
76 | "is_jenkins": is_jenkins,
77 | "user": user,
78 | "home": home,
79 | "shell": shell,
80 | "has_git": has_git,
81 | "has_conan": has_conan,
82 | "has_cursor": has_cursor,
83 | "timestamp": datetime.now().isoformat(),
84 | }
85 |
86 | self._cache = platform_info
87 | return platform_info
88 |
89 | def _has_command(self, command: str) -> bool:
90 | """Check if a command is available in PATH."""
91 | import shutil
92 | return shutil.which(command) is not None
93 |
94 | def get_rule_template_name(self) -> str:
95 | """
96 | Get the appropriate rule template name based on platform detection.
97 |
98 | Returns:
99 | Template filename (without .jinja2 extension)
100 | """
101 | platform_info = self.detect_platform()
102 |
103 | if platform_info["is_ci"]:
104 | return "ci-linux" # Default CI template
105 | else:
106 | os_name = platform_info["os"]
107 | return f"{os_name}-dev"
108 |
109 | def get_mcp_command(self) -> str:
110 | """
111 | Get the appropriate MCP command for the platform.
112 |
113 | Returns:
114 | Command to run MCP servers (npx or npx.cmd)
115 | """
116 | platform_info = self.detect_platform()
117 |
118 | if platform_info["os"] == "windows":
119 | return "npx.cmd"
120 | else:
121 | return "npx"
122 |
123 | def is_development_environment(self) -> bool:
124 | """Check if this is a development environment (not CI)."""
125 | platform_info = self.detect_platform()
126 | return not platform_info["is_ci"]
127 |
128 | def get_conan_home(self) -> str:
129 | """Get the Conan home directory for this platform."""
130 | platform_info = self.detect_platform()
131 | conan_home = os.getenv("CONAN_USER_HOME")
132 |
133 | if conan_home:
134 | return conan_home
135 |
136 | # Default Conan home
137 | home = platform_info["home"]
138 | if platform_info["os"] == "windows":
139 | return f"{home}\\.conan2"
140 | else:
141 | return f"{home}/.conan2"
142 |
143 |
144 | # Import sys for virtual environment detection
145 | import sys
```