This is page 16 of 24. Use http://codebase.md/sparesparrow/mcp-project-orchestrator?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .cursorrules
├── .env.example
├── .github
│ └── workflows
│ ├── build.yml
│ ├── ci-cd.yml
│ ├── ci.yml
│ ├── deploy.yml
│ ├── ecosystem-monitor.yml
│ ├── fan-out-orchestrator.yml
│ └── release.yml
├── .gitignore
├── .pre-commit-config.yaml
├── AUTOMOTIVE_CAMERA_SYSTEM_SUMMARY.md
├── automotive-camera-system
│ ├── docs
│ │ └── IMPLEMENTACE_CS.md
│ └── README.md
├── AWS_MCP_IMPLEMENTATION_SUMMARY.md
├── AWS_MCP_QUICKSTART.md
├── AWS_SIP_TRUNK_DEPLOYMENT_COMPLETE.md
├── aws-sip-trunk
│ ├── .gitignore
│ ├── config
│ │ ├── extensions.conf.j2
│ │ └── pjsip.conf.j2
│ ├── DEPLOYMENT_SUMMARY.md
│ ├── docs
│ │ ├── DEPLOYMENT.md
│ │ └── TROUBLESHOOTING.md
│ ├── PROJECT_INDEX.md
│ ├── pyproject.toml
│ ├── QUICKSTART.md
│ ├── README.md
│ ├── scripts
│ │ ├── deploy-asterisk-aws.sh
│ │ └── user-data.sh
│ ├── terraform
│ │ ├── ec2.tf
│ │ ├── main.tf
│ │ ├── monitoring.tf
│ │ ├── networking.tf
│ │ ├── outputs.tf
│ │ ├── storage.tf
│ │ ├── terraform.tfvars.example
│ │ └── variables.tf
│ ├── tests
│ │ └── test_sip_connectivity.py
│ └── VERIFICATION_CHECKLIST.md
├── CLAUDE.md
├── component_templates.json
├── conanfile.py
├── config
│ ├── default.json
│ └── project_orchestration.json
├── Containerfile
├── cursor-templates
│ └── openssl
│ ├── linux-dev.mdc.jinja2
│ └── shared.mdc.jinja2
├── data
│ └── prompts
│ └── templates
│ ├── advanced-multi-server-template.json
│ ├── analysis-assistant.json
│ ├── analyze-mermaid-diagram.json
│ ├── architecture-design-assistant.json
│ ├── code-diagram-documentation-creator.json
│ ├── code-refactoring-assistant.json
│ ├── code-review-assistant.json
│ ├── collaborative-development.json
│ ├── consolidated-interfaces-template.json
│ ├── could-you-interpret-the-assumed-applicat.json
│ ├── data-analysis-template.json
│ ├── database-query-assistant.json
│ ├── debugging-assistant.json
│ ├── development-system-prompt-zcna0.json
│ ├── development-system-prompt.json
│ ├── development-workflow.json
│ ├── docker-compose-prompt-combiner.json
│ ├── docker-containerization-guide.json
│ ├── docker-mcp-servers-orchestration.json
│ ├── foresight-assistant.json
│ ├── generate-different-types-of-questions-ab.json
│ ├── generate-mermaid-diagram.json
│ ├── image-1-describe-the-icon-in-one-sen.json
│ ├── initialize-project-setup-for-a-new-micro.json
│ ├── install-dependencies-build-run-test.json
│ ├── mcp-code-generator.json
│ ├── mcp-integration-assistant.json
│ ├── mcp-resources-explorer.json
│ ├── mcp-resources-integration.json
│ ├── mcp-server-configurator.json
│ ├── mcp-server-dev-prompt-combiner.json
│ ├── mcp-server-integration-template.json
│ ├── mcp-template-system.json
│ ├── mermaid-analysis-expert.json
│ ├── mermaid-class-diagram-generator.json
│ ├── mermaid-diagram-generator.json
│ ├── mermaid-diagram-modifier.json
│ ├── modify-mermaid-diagram.json
│ ├── monorepo-migration-guide.json
│ ├── multi-resource-context.json
│ ├── project-analysis-assistant.json
│ ├── prompt-combiner-interface.json
│ ├── prompt-templates.json
│ ├── repository-explorer.json
│ ├── research-assistant.json
│ ├── sequential-data-analysis.json
│ ├── solid-code-analysis-visualizer.json
│ ├── task-list-helper-8ithy.json
│ ├── template-based-mcp-integration.json
│ ├── templates.json
│ ├── test-prompt.json
│ └── you-are-limited-to-respond-yes-or-no-onl.json
├── docs
│ ├── AWS_MCP.md
│ ├── AWS.md
│ ├── CONAN.md
│ └── integration.md
├── elevenlabs-agents
│ ├── agent-prompts.json
│ └── README.md
├── IMPLEMENTATION_STATUS.md
├── integration_plan.md
├── LICENSE
├── MANIFEST.in
├── mcp-project-orchestrator
│ └── openssl
│ ├── .github
│ │ └── workflows
│ │ └── validate-cursor-config.yml
│ ├── conanfile.py
│ ├── CURSOR_DEPLOYMENT_POLISH.md
│ ├── cursor-rules
│ │ ├── mcp.json.jinja2
│ │ ├── prompts
│ │ │ ├── fips-compliance.md.jinja2
│ │ │ ├── openssl-coding-standards.md.jinja2
│ │ │ └── pr-review.md.jinja2
│ │ └── rules
│ │ ├── ci-linux.mdc.jinja2
│ │ ├── linux-dev.mdc.jinja2
│ │ ├── macos-dev.mdc.jinja2
│ │ ├── shared.mdc.jinja2
│ │ └── windows-dev.mdc.jinja2
│ ├── docs
│ │ └── cursor-configuration-management.md
│ ├── examples
│ │ └── example-workspace
│ │ ├── .cursor
│ │ │ ├── mcp.json
│ │ │ └── rules
│ │ │ ├── linux-dev.mdc
│ │ │ └── shared.mdc
│ │ ├── .gitignore
│ │ ├── CMakeLists.txt
│ │ ├── conanfile.py
│ │ ├── profiles
│ │ │ ├── linux-gcc-debug.profile
│ │ │ └── linux-gcc-release.profile
│ │ ├── README.md
│ │ └── src
│ │ ├── crypto_utils.cpp
│ │ ├── crypto_utils.h
│ │ └── main.cpp
│ ├── IMPLEMENTATION_SUMMARY.md
│ ├── mcp_orchestrator
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── conan_integration.py
│ │ ├── cursor_config.py
│ │ ├── cursor_deployer.py
│ │ ├── deploy_cursor.py
│ │ ├── env_config.py
│ │ ├── platform_detector.py
│ │ └── yaml_validator.py
│ ├── openssl-cursor-example-workspace-20251014_121133.zip
│ ├── pyproject.toml
│ ├── README.md
│ ├── requirements.txt
│ ├── scripts
│ │ └── create_example_workspace.py
│ ├── setup.py
│ ├── test_deployment.py
│ └── tests
│ ├── __init__.py
│ ├── test_cursor_deployer.py
│ └── test_template_validation.py
├── printcast-agent
│ ├── .env.example
│ ├── config
│ │ └── asterisk
│ │ └── extensions.conf
│ ├── Containerfile
│ ├── docker-compose.yml
│ ├── pyproject.toml
│ ├── README.md
│ ├── scripts
│ │ └── docker-entrypoint.sh
│ ├── src
│ │ ├── integrations
│ │ │ ├── __init__.py
│ │ │ ├── asterisk.py
│ │ │ ├── content.py
│ │ │ ├── delivery.py
│ │ │ ├── elevenlabs.py
│ │ │ └── printing.py
│ │ ├── mcp_server
│ │ │ ├── __init__.py
│ │ │ ├── main.py
│ │ │ └── server.py
│ │ └── orchestration
│ │ ├── __init__.py
│ │ └── workflow.py
│ └── tests
│ └── test_mcp_server.py
├── project_orchestration.json
├── project_templates.json
├── pyproject.toml
├── README.md
├── REFACTORING_COMPLETED.md
├── REFACTORING_RECOMMENDATIONS.md
├── requirements.txt
├── scripts
│ ├── archive
│ │ ├── init_claude_test.sh
│ │ ├── init_postgres.sh
│ │ ├── start_mcp_servers.sh
│ │ └── test_claude_desktop.sh
│ ├── consolidate_mermaid.py
│ ├── consolidate_prompts.py
│ ├── consolidate_resources.py
│ ├── consolidate_templates.py
│ ├── INSTRUCTIONS.md
│ ├── README.md
│ ├── setup_aws_mcp.sh
│ ├── setup_mcp.sh
│ ├── setup_orchestrator.sh
│ ├── setup_project.py
│ └── test_mcp.sh
├── src
│ └── mcp_project_orchestrator
│ ├── __init__.py
│ ├── __main__.py
│ ├── aws_mcp.py
│ ├── cli
│ │ └── __init__.py
│ ├── cli.py
│ ├── commands
│ │ └── openssl_cli.py
│ ├── core
│ │ ├── __init__.py
│ │ ├── base.py
│ │ ├── config.py
│ │ ├── exceptions.py
│ │ ├── fastmcp.py
│ │ ├── logging.py
│ │ └── managers.py
│ ├── cursor_deployer.py
│ ├── ecosystem_monitor.py
│ ├── fan_out_orchestrator.py
│ ├── fastmcp.py
│ ├── mcp-py
│ │ ├── AggregateVersions.py
│ │ ├── CustomBashTool.py
│ │ ├── FileAnnotator.py
│ │ ├── mcp-client.py
│ │ ├── mcp-server.py
│ │ ├── MermaidDiagramGenerator.py
│ │ ├── NamingAgent.py
│ │ └── solid-analyzer-agent.py
│ ├── mermaid
│ │ ├── __init__.py
│ │ ├── generator.py
│ │ ├── mermaid_orchestrator.py
│ │ ├── renderer.py
│ │ ├── templates
│ │ │ ├── AbstractFactory-diagram.json
│ │ │ ├── Adapter-diagram.json
│ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ ├── Builder-diagram.json
│ │ │ ├── Chain-diagram.json
│ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ ├── Command-diagram.json
│ │ │ ├── Decorator-diagram.json
│ │ │ ├── Facade-diagram.json
│ │ │ ├── Factory-diagram.json
│ │ │ ├── flowchart
│ │ │ │ ├── AbstractFactory-diagram.json
│ │ │ │ ├── Adapter-diagram.json
│ │ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ │ ├── Builder-diagram.json
│ │ │ │ ├── Chain-diagram.json
│ │ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ │ ├── Command-diagram.json
│ │ │ │ ├── Decorator-diagram.json
│ │ │ │ ├── Facade-diagram.json
│ │ │ │ ├── Factory-diagram.json
│ │ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ │ ├── generated_diagram.json
│ │ │ │ ├── integration.json
│ │ │ │ ├── Iterator-diagram.json
│ │ │ │ ├── Mediator-diagram.json
│ │ │ │ ├── Memento-diagram.json
│ │ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ │ ├── Observer-diagram.json
│ │ │ │ ├── Prototype-diagram.json
│ │ │ │ ├── Proxy-diagram.json
│ │ │ │ ├── README.json
│ │ │ │ ├── Singleton-diagram.json
│ │ │ │ ├── State-diagram.json
│ │ │ │ ├── Strategy-diagram.json
│ │ │ │ ├── TemplateMethod-diagram.json
│ │ │ │ ├── theme_dark.json
│ │ │ │ ├── theme_default.json
│ │ │ │ ├── theme_pastel.json
│ │ │ │ ├── theme_vibrant.json
│ │ │ │ └── Visitor-diagram.json
│ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ ├── generated_diagram.json
│ │ │ ├── index.json
│ │ │ ├── integration.json
│ │ │ ├── Iterator-diagram.json
│ │ │ ├── Mediator-diagram.json
│ │ │ ├── Memento-diagram.json
│ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ ├── Observer-diagram.json
│ │ │ ├── Prototype-diagram.json
│ │ │ ├── Proxy-diagram.json
│ │ │ ├── README.json
│ │ │ ├── Singleton-diagram.json
│ │ │ ├── State-diagram.json
│ │ │ ├── Strategy-diagram.json
│ │ │ ├── TemplateMethod-diagram.json
│ │ │ ├── theme_dark.json
│ │ │ ├── theme_default.json
│ │ │ ├── theme_pastel.json
│ │ │ ├── theme_vibrant.json
│ │ │ └── Visitor-diagram.json
│ │ └── types.py
│ ├── project_orchestration.py
│ ├── prompt_manager
│ │ ├── __init__.py
│ │ ├── loader.py
│ │ ├── manager.py
│ │ └── template.py
│ ├── prompts
│ │ ├── __dirname.json
│ │ ├── __image_1___describe_the_icon_in_one_sen___.json
│ │ ├── __init__.py
│ │ ├── __type.json
│ │ ├── _.json
│ │ ├── _DEFAULT_OPEN_DELIMITER.json
│ │ ├── _emojiRegex.json
│ │ ├── _UUID_CHARS.json
│ │ ├── a.json
│ │ ├── A.json
│ │ ├── Aa.json
│ │ ├── aAnnotationPadding.json
│ │ ├── absoluteThresholdGroup.json
│ │ ├── add.json
│ │ ├── ADDITIONAL_PROPERTY_FLAG.json
│ │ ├── Advanced_Multi-Server_Integration_Template.json
│ │ ├── allOptionsList.json
│ │ ├── analysis
│ │ │ ├── Data_Analysis_Template.json
│ │ │ ├── index.json
│ │ │ ├── Mermaid_Analysis_Expert.json
│ │ │ ├── Sequential_Data_Analysis_with_MCP_Integration.json
│ │ │ └── SOLID_Code_Analysis_Visualizer.json
│ │ ├── Analysis_Assistant.json
│ │ ├── Analyze_Mermaid_Diagram.json
│ │ ├── ANDROID_EVERGREEN_FIRST.json
│ │ ├── ANSI_ESCAPE_BELL.json
│ │ ├── architecture
│ │ │ ├── index.json
│ │ │ └── PromptCombiner_Interface.json
│ │ ├── Architecture_Design_Assistant.json
│ │ ├── argsTag.json
│ │ ├── ARROW.json
│ │ ├── assistant
│ │ │ ├── Analysis_Assistant.json
│ │ │ ├── Architecture_Design_Assistant.json
│ │ │ ├── Code_Refactoring_Assistant.json
│ │ │ ├── Code_Review_Assistant.json
│ │ │ ├── Database_Query_Assistant.json
│ │ │ ├── Debugging_Assistant.json
│ │ │ ├── Foresight_Assistant.json
│ │ │ ├── index.json
│ │ │ ├── MCP_Integration_Assistant.json
│ │ │ ├── Project_Analysis_Assistant.json
│ │ │ └── Research_Assistant.json
│ │ ├── astralRange.json
│ │ ├── at.json
│ │ ├── authorization_endpoint.json
│ │ ├── b.json
│ │ ├── BABELIGNORE_FILENAME.json
│ │ ├── BACKSLASH.json
│ │ ├── backupId.json
│ │ ├── BANG.json
│ │ ├── BASE64_MAP.json
│ │ ├── baseFlags.json
│ │ ├── Basic_Template.json
│ │ ├── bgModel.json
│ │ ├── bignum.json
│ │ ├── blockKeywordsStr.json
│ │ ├── BOMChar.json
│ │ ├── boundary.json
│ │ ├── brackets.json
│ │ ├── BROWSER_VAR.json
│ │ ├── bt.json
│ │ ├── BUILTIN.json
│ │ ├── BULLET.json
│ │ ├── c.json
│ │ ├── C.json
│ │ ├── CACHE_VERSION.json
│ │ ├── cacheControl.json
│ │ ├── cacheProp.json
│ │ ├── category.py
│ │ ├── CHANGE_EVENT.json
│ │ ├── CHAR_CODE_0.json
│ │ ├── chars.json
│ │ ├── cjsPattern.json
│ │ ├── cKeywords.json
│ │ ├── classForPercent.json
│ │ ├── classStr.json
│ │ ├── clientFirstMessageBare.json
│ │ ├── cmd.json
│ │ ├── Code_Diagram_Documentation_Creator.json
│ │ ├── Code_Refactoring_Assistant.json
│ │ ├── Code_Review_Assistant.json
│ │ ├── code.json
│ │ ├── coding
│ │ │ ├── __dirname.json
│ │ │ ├── _.json
│ │ │ ├── _DEFAULT_OPEN_DELIMITER.json
│ │ │ ├── _emojiRegex.json
│ │ │ ├── _UUID_CHARS.json
│ │ │ ├── a.json
│ │ │ ├── A.json
│ │ │ ├── aAnnotationPadding.json
│ │ │ ├── absoluteThresholdGroup.json
│ │ │ ├── add.json
│ │ │ ├── ADDITIONAL_PROPERTY_FLAG.json
│ │ │ ├── allOptionsList.json
│ │ │ ├── ANDROID_EVERGREEN_FIRST.json
│ │ │ ├── ANSI_ESCAPE_BELL.json
│ │ │ ├── argsTag.json
│ │ │ ├── ARROW.json
│ │ │ ├── astralRange.json
│ │ │ ├── at.json
│ │ │ ├── authorization_endpoint.json
│ │ │ ├── BABELIGNORE_FILENAME.json
│ │ │ ├── BACKSLASH.json
│ │ │ ├── BANG.json
│ │ │ ├── BASE64_MAP.json
│ │ │ ├── baseFlags.json
│ │ │ ├── bgModel.json
│ │ │ ├── bignum.json
│ │ │ ├── blockKeywordsStr.json
│ │ │ ├── BOMChar.json
│ │ │ ├── boundary.json
│ │ │ ├── brackets.json
│ │ │ ├── BROWSER_VAR.json
│ │ │ ├── bt.json
│ │ │ ├── BUILTIN.json
│ │ │ ├── BULLET.json
│ │ │ ├── c.json
│ │ │ ├── C.json
│ │ │ ├── CACHE_VERSION.json
│ │ │ ├── cacheControl.json
│ │ │ ├── cacheProp.json
│ │ │ ├── CHANGE_EVENT.json
│ │ │ ├── CHAR_CODE_0.json
│ │ │ ├── chars.json
│ │ │ ├── cjsPattern.json
│ │ │ ├── cKeywords.json
│ │ │ ├── classForPercent.json
│ │ │ ├── classStr.json
│ │ │ ├── clientFirstMessageBare.json
│ │ │ ├── cmd.json
│ │ │ ├── code.json
│ │ │ ├── colorCode.json
│ │ │ ├── comma.json
│ │ │ ├── command.json
│ │ │ ├── configJsContent.json
│ │ │ ├── connectionString.json
│ │ │ ├── cssClassStr.json
│ │ │ ├── currentBoundaryParse.json
│ │ │ ├── d.json
│ │ │ ├── data.json
│ │ │ ├── DATA.json
│ │ │ ├── dataWebpackPrefix.json
│ │ │ ├── debug.json
│ │ │ ├── decodeStateVectorV2.json
│ │ │ ├── DEFAULT_DELIMITER.json
│ │ │ ├── DEFAULT_DIAGRAM_DIRECTION.json
│ │ │ ├── DEFAULT_JS_PATTERN.json
│ │ │ ├── DEFAULT_LOG_TARGET.json
│ │ │ ├── defaultHelpOpt.json
│ │ │ ├── defaultHost.json
│ │ │ ├── deferY18nLookupPrefix.json
│ │ │ ├── DELIM.json
│ │ │ ├── delimiter.json
│ │ │ ├── DEPRECATION.json
│ │ │ ├── destMain.json
│ │ │ ├── DID_NOT_THROW.json
│ │ │ ├── direction.json
│ │ │ ├── displayValue.json
│ │ │ ├── DNS.json
│ │ │ ├── doc.json
│ │ │ ├── DOCUMENTATION_NOTE.json
│ │ │ ├── DOT.json
│ │ │ ├── DOTS.json
│ │ │ ├── dummyCompoundId.json
│ │ │ ├── e.json
│ │ │ ├── E.json
│ │ │ ├── earlyHintsLink.json
│ │ │ ├── elide.json
│ │ │ ├── EMPTY.json
│ │ │ ├── end.json
│ │ │ ├── endpoint.json
│ │ │ ├── environment.json
│ │ │ ├── ERR_CODE.json
│ │ │ ├── errMessage.json
│ │ │ ├── errMsg.json
│ │ │ ├── ERROR_MESSAGE.json
│ │ │ ├── error.json
│ │ │ ├── ERROR.json
│ │ │ ├── ERRORCLASS.json
│ │ │ ├── errorMessage.json
│ │ │ ├── es6Default.json
│ │ │ ├── ESC.json
│ │ │ ├── Escapable.json
│ │ │ ├── escapedChar.json
│ │ │ ├── escapeFuncStr.json
│ │ │ ├── escSlash.json
│ │ │ ├── ev.json
│ │ │ ├── event.json
│ │ │ ├── execaMessage.json
│ │ │ ├── EXPECTED_LABEL.json
│ │ │ ├── expected.json
│ │ │ ├── expectedString.json
│ │ │ ├── expression1.json
│ │ │ ├── EXTENSION.json
│ │ │ ├── f.json
│ │ │ ├── FAIL_TEXT.json
│ │ │ ├── FILE_BROWSER_FACTORY.json
│ │ │ ├── fill.json
│ │ │ ├── findPackageJson.json
│ │ │ ├── fnKey.json
│ │ │ ├── FORMAT.json
│ │ │ ├── formatted.json
│ │ │ ├── from.json
│ │ │ ├── fullpaths.json
│ │ │ ├── FUNC_ERROR_TEXT.json
│ │ │ ├── GenStateSuspendedStart.json
│ │ │ ├── GENSYNC_EXPECTED_START.json
│ │ │ ├── gutter.json
│ │ │ ├── h.json
│ │ │ ├── handlerFuncName.json
│ │ │ ├── HASH_UNDEFINED.json
│ │ │ ├── head.json
│ │ │ ├── helpMessage.json
│ │ │ ├── HINT_ARG.json
│ │ │ ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│ │ │ ├── i.json
│ │ │ ├── id.json
│ │ │ ├── identifier.json
│ │ │ ├── Identifier.json
│ │ │ ├── INDENT.json
│ │ │ ├── indentation.json
│ │ │ ├── index.json
│ │ │ ├── INDIRECTION_FRAGMENT.json
│ │ │ ├── input.json
│ │ │ ├── inputText.json
│ │ │ ├── insert.json
│ │ │ ├── insertPromptQuery.json
│ │ │ ├── INSPECT_MAX_BYTES.json
│ │ │ ├── intToCharMap.json
│ │ │ ├── IS_ITERABLE_SENTINEL.json
│ │ │ ├── IS_KEYED_SENTINEL.json
│ │ │ ├── isConfigType.json
│ │ │ ├── isoSentinel.json
│ │ │ ├── isSourceNode.json
│ │ │ ├── j.json
│ │ │ ├── JAKE_CMD.json
│ │ │ ├── JEST_GLOBAL_NAME.json
│ │ │ ├── JEST_GLOBALS_MODULE_NAME.json
│ │ │ ├── JSON_SYNTAX_CHAR.json
│ │ │ ├── json.json
│ │ │ ├── jsonType.json
│ │ │ ├── jupyter_namespaceObject.json
│ │ │ ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│ │ │ ├── k.json
│ │ │ ├── KERNEL_STATUS_ERROR_CLASS.json
│ │ │ ├── key.json
│ │ │ ├── l.json
│ │ │ ├── labelId.json
│ │ │ ├── LATEST_PROTOCOL_VERSION.json
│ │ │ ├── LETTERDASHNUMBER.json
│ │ │ ├── LF.json
│ │ │ ├── LIMIT_REPLACE_NODE.json
│ │ │ ├── logTime.json
│ │ │ ├── lstatkey.json
│ │ │ ├── lt.json
│ │ │ ├── m.json
│ │ │ ├── maliciousPayload.json
│ │ │ ├── mask.json
│ │ │ ├── match.json
│ │ │ ├── matchingDelim.json
│ │ │ ├── MAXIMUM_MESSAGE_SIZE.json
│ │ │ ├── mdcContent.json
│ │ │ ├── MERMAID_DOM_ID_PREFIX.json
│ │ │ ├── message.json
│ │ │ ├── messages.json
│ │ │ ├── meth.json
│ │ │ ├── minimatch.json
│ │ │ ├── MOCK_CONSTRUCTOR_NAME.json
│ │ │ ├── MOCKS_PATTERN.json
│ │ │ ├── moduleDirectory.json
│ │ │ ├── msg.json
│ │ │ ├── mtr.json
│ │ │ ├── multipartType.json
│ │ │ ├── n.json
│ │ │ ├── N.json
│ │ │ ├── name.json
│ │ │ ├── NATIVE_PLATFORM.json
│ │ │ ├── newUrl.json
│ │ │ ├── NM.json
│ │ │ ├── NO_ARGUMENTS.json
│ │ │ ├── NO_DIFF_MESSAGE.json
│ │ │ ├── NODE_MODULES.json
│ │ │ ├── nodeInternalPrefix.json
│ │ │ ├── nonASCIIidentifierStartChars.json
│ │ │ ├── nonKey.json
│ │ │ ├── NOT_A_DOT.json
│ │ │ ├── notCharacterOrDash.json
│ │ │ ├── notebookURL.json
│ │ │ ├── notSelector.json
│ │ │ ├── nullTag.json
│ │ │ ├── num.json
│ │ │ ├── NUMBER.json
│ │ │ ├── o.json
│ │ │ ├── O.json
│ │ │ ├── octChar.json
│ │ │ ├── octetStreamType.json
│ │ │ ├── operators.json
│ │ │ ├── out.json
│ │ │ ├── OUTSIDE_JEST_VM_PROTOCOL.json
│ │ │ ├── override.json
│ │ │ ├── p.json
│ │ │ ├── PACKAGE_FILENAME.json
│ │ │ ├── PACKAGE_JSON.json
│ │ │ ├── packageVersion.json
│ │ │ ├── paddedNumber.json
│ │ │ ├── page.json
│ │ │ ├── parseClass.json
│ │ │ ├── path.json
│ │ │ ├── pathExt.json
│ │ │ ├── pattern.json
│ │ │ ├── PatternBoolean.json
│ │ │ ├── pBuiltins.json
│ │ │ ├── pFloatForm.json
│ │ │ ├── pkg.json
│ │ │ ├── PLUGIN_ID_DOC_MANAGER.json
│ │ │ ├── plusChar.json
│ │ │ ├── PN_CHARS.json
│ │ │ ├── point.json
│ │ │ ├── prefix.json
│ │ │ ├── PRETTY_PLACEHOLDER.json
│ │ │ ├── property_prefix.json
│ │ │ ├── pubkey256.json
│ │ │ ├── Q.json
│ │ │ ├── qmark.json
│ │ │ ├── QO.json
│ │ │ ├── query.json
│ │ │ ├── querystringType.json
│ │ │ ├── queryText.json
│ │ │ ├── r.json
│ │ │ ├── R.json
│ │ │ ├── rangeStart.json
│ │ │ ├── re.json
│ │ │ ├── reI.json
│ │ │ ├── REQUIRED_FIELD_SYMBOL.json
│ │ │ ├── reserve.json
│ │ │ ├── resolvedDestination.json
│ │ │ ├── resolverDir.json
│ │ │ ├── responseType.json
│ │ │ ├── result.json
│ │ │ ├── ROOT_DESCRIBE_BLOCK_NAME.json
│ │ │ ├── ROOT_NAMESPACE_NAME.json
│ │ │ ├── ROOT_TASK_NAME.json
│ │ │ ├── route.json
│ │ │ ├── RUNNING_TEXT.json
│ │ │ ├── s.json
│ │ │ ├── SCHEMA_PATH.json
│ │ │ ├── se.json
│ │ │ ├── SEARCHABLE_CLASS.json
│ │ │ ├── secret.json
│ │ │ ├── selector.json
│ │ │ ├── SEMVER_SPEC_VERSION.json
│ │ │ ├── sensitiveHeaders.json
│ │ │ ├── sep.json
│ │ │ ├── separator.json
│ │ │ ├── SHAPE_STATE.json
│ │ │ ├── shape.json
│ │ │ ├── SHARED.json
│ │ │ ├── short.json
│ │ │ ├── side.json
│ │ │ ├── SNAPSHOT_VERSION.json
│ │ │ ├── SOURCE_MAPPING_PREFIX.json
│ │ │ ├── source.json
│ │ │ ├── sourceMapContent.json
│ │ │ ├── SPACE_SYMBOL.json
│ │ │ ├── SPACE.json
│ │ │ ├── sqlKeywords.json
│ │ │ ├── sranges.json
│ │ │ ├── st.json
│ │ │ ├── ST.json
│ │ │ ├── stack.json
│ │ │ ├── START_HIDING.json
│ │ │ ├── START_OF_LINE.json
│ │ │ ├── startNoTraversal.json
│ │ │ ├── STATES.json
│ │ │ ├── stats.json
│ │ │ ├── statSync.json
│ │ │ ├── storageStatus.json
│ │ │ ├── storageType.json
│ │ │ ├── str.json
│ │ │ ├── stringifiedObject.json
│ │ │ ├── stringPath.json
│ │ │ ├── stringResult.json
│ │ │ ├── stringTag.json
│ │ │ ├── strValue.json
│ │ │ ├── style.json
│ │ │ ├── SUB_NAME.json
│ │ │ ├── subkey.json
│ │ │ ├── SUBPROTOCOL.json
│ │ │ ├── SUITE_NAME.json
│ │ │ ├── symbolPattern.json
│ │ │ ├── symbolTag.json
│ │ │ ├── t.json
│ │ │ ├── T.json
│ │ │ ├── templateDir.json
│ │ │ ├── tempName.json
│ │ │ ├── text.json
│ │ │ ├── time.json
│ │ │ ├── titleSeparator.json
│ │ │ ├── tmpl.json
│ │ │ ├── tn.json
│ │ │ ├── toValue.json
│ │ │ ├── transform.json
│ │ │ ├── trustProxyDefaultSymbol.json
│ │ │ ├── typeArgumentsKey.json
│ │ │ ├── typeKey.json
│ │ │ ├── typeMessage.json
│ │ │ ├── typesRegistryPackageName.json
│ │ │ ├── u.json
│ │ │ ├── UNDEFINED.json
│ │ │ ├── unit.json
│ │ │ ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│ │ │ ├── ur.json
│ │ │ ├── USAGE.json
│ │ │ ├── value.json
│ │ │ ├── Vr.json
│ │ │ ├── watchmanURL.json
│ │ │ ├── webkit.json
│ │ │ ├── xhtml.json
│ │ │ ├── XP_DEFAULT_PATHEXT.json
│ │ │ └── y.json
│ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ ├── colorCode.json
│ │ ├── comma.json
│ │ ├── command.json
│ │ ├── completionShTemplate.json
│ │ ├── configJsContent.json
│ │ ├── connectionString.json
│ │ ├── Consolidated_TypeScript_Interfaces_Template.json
│ │ ├── Could_you_interpret_the_assumed_applicat___.json
│ │ ├── cssClassStr.json
│ │ ├── currentBoundaryParse.json
│ │ ├── d.json
│ │ ├── Data_Analysis_Template.json
│ │ ├── data.json
│ │ ├── DATA.json
│ │ ├── Database_Query_Assistant.json
│ │ ├── dataWebpackPrefix.json
│ │ ├── debug.json
│ │ ├── Debugging_Assistant.json
│ │ ├── decodeStateVectorV2.json
│ │ ├── DEFAULT_DELIMITER.json
│ │ ├── DEFAULT_DIAGRAM_DIRECTION.json
│ │ ├── DEFAULT_INDENT.json
│ │ ├── DEFAULT_JS_PATTERN.json
│ │ ├── DEFAULT_LOG_TARGET.json
│ │ ├── defaultHelpOpt.json
│ │ ├── defaultHost.json
│ │ ├── deferY18nLookupPrefix.json
│ │ ├── DELIM.json
│ │ ├── delimiter.json
│ │ ├── DEPRECATION.json
│ │ ├── DESCENDING.json
│ │ ├── destMain.json
│ │ ├── development
│ │ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ │ ├── Consolidated_TypeScript_Interfaces_Template.json
│ │ │ ├── Development_Workflow.json
│ │ │ ├── index.json
│ │ │ ├── MCP_Server_Development_Prompt_Combiner.json
│ │ │ └── Monorepo_Migration_and_Code_Organization_Guide.json
│ │ ├── Development_System_Prompt.json
│ │ ├── Development_Workflow.json
│ │ ├── devops
│ │ │ ├── Docker_Compose_Prompt_Combiner.json
│ │ │ ├── Docker_Containerization_Guide.json
│ │ │ └── index.json
│ │ ├── DID_NOT_THROW.json
│ │ ├── direction.json
│ │ ├── displayValue.json
│ │ ├── DNS.json
│ │ ├── doc.json
│ │ ├── Docker_Compose_Prompt_Combiner.json
│ │ ├── Docker_Containerization_Guide.json
│ │ ├── Docker_MCP_Servers_Orchestration_Guide.json
│ │ ├── DOCUMENTATION_NOTE.json
│ │ ├── DOT.json
│ │ ├── DOTS.json
│ │ ├── dummyCompoundId.json
│ │ ├── e.json
│ │ ├── E.json
│ │ ├── earlyHintsLink.json
│ │ ├── elide.json
│ │ ├── EMPTY.json
│ │ ├── encoded.json
│ │ ├── end.json
│ │ ├── endpoint.json
│ │ ├── environment.json
│ │ ├── ERR_CODE.json
│ │ ├── errMessage.json
│ │ ├── errMsg.json
│ │ ├── ERROR_MESSAGE.json
│ │ ├── error.json
│ │ ├── ERROR.json
│ │ ├── ERRORCLASS.json
│ │ ├── errorMessage.json
│ │ ├── es6Default.json
│ │ ├── ESC.json
│ │ ├── Escapable.json
│ │ ├── escapedChar.json
│ │ ├── escapeFuncStr.json
│ │ ├── escSlash.json
│ │ ├── ev.json
│ │ ├── event.json
│ │ ├── execaMessage.json
│ │ ├── EXPECTED_LABEL.json
│ │ ├── expected.json
│ │ ├── expectedString.json
│ │ ├── expression1.json
│ │ ├── EXTENSION.json
│ │ ├── f.json
│ │ ├── FAIL_TEXT.json
│ │ ├── FILE_BROWSER_FACTORY.json
│ │ ├── fill.json
│ │ ├── findPackageJson.json
│ │ ├── fnKey.json
│ │ ├── Foresight_Assistant.json
│ │ ├── FORMAT.json
│ │ ├── formatted.json
│ │ ├── from.json
│ │ ├── fullpaths.json
│ │ ├── FUNC_ERROR_TEXT.json
│ │ ├── general
│ │ │ └── index.json
│ │ ├── Generate_different_types_of_questions_ab___.json
│ │ ├── Generate_Mermaid_Diagram.json
│ │ ├── GenStateSuspendedStart.json
│ │ ├── GENSYNC_EXPECTED_START.json
│ │ ├── GitHub_Repository_Explorer.json
│ │ ├── gutter.json
│ │ ├── h.json
│ │ ├── handlerFuncName.json
│ │ ├── HASH_UNDEFINED.json
│ │ ├── head.json
│ │ ├── helpMessage.json
│ │ ├── HINT_ARG.json
│ │ ├── HOOK_RETURNED_NOTHING_ERROR_MESSAGE.json
│ │ ├── i.json
│ │ ├── id.json
│ │ ├── identifier.json
│ │ ├── Identifier.json
│ │ ├── INDENT.json
│ │ ├── indentation.json
│ │ ├── index.json
│ │ ├── INDIRECTION_FRAGMENT.json
│ │ ├── Initialize_project_setup_for_a_new_micro___.json
│ │ ├── input.json
│ │ ├── inputText.json
│ │ ├── insert.json
│ │ ├── insertPromptQuery.json
│ │ ├── INSPECT_MAX_BYTES.json
│ │ ├── install_dependencies__build__run__test____.json
│ │ ├── intToCharMap.json
│ │ ├── IS_ITERABLE_SENTINEL.json
│ │ ├── IS_KEYED_SENTINEL.json
│ │ ├── isConfigType.json
│ │ ├── isoSentinel.json
│ │ ├── isSourceNode.json
│ │ ├── j.json
│ │ ├── J.json
│ │ ├── JAKE_CMD.json
│ │ ├── JEST_GLOBAL_NAME.json
│ │ ├── JEST_GLOBALS_MODULE_NAME.json
│ │ ├── JSON_SYNTAX_CHAR.json
│ │ ├── json.json
│ │ ├── jsonType.json
│ │ ├── jupyter_namespaceObject.json
│ │ ├── JUPYTERLAB_DOCMANAGER_PLUGIN_ID.json
│ │ ├── k.json
│ │ ├── KERNEL_STATUS_ERROR_CLASS.json
│ │ ├── key.json
│ │ ├── l.json
│ │ ├── labelId.json
│ │ ├── LATEST_PROTOCOL_VERSION.json
│ │ ├── LETTERDASHNUMBER.json
│ │ ├── LF.json
│ │ ├── LIMIT_REPLACE_NODE.json
│ │ ├── LINE_FEED.json
│ │ ├── logTime.json
│ │ ├── lstatkey.json
│ │ ├── lt.json
│ │ ├── m.json
│ │ ├── maliciousPayload.json
│ │ ├── manager.py
│ │ ├── marker.json
│ │ ├── mask.json
│ │ ├── match.json
│ │ ├── matchingDelim.json
│ │ ├── MAXIMUM_MESSAGE_SIZE.json
│ │ ├── MCP_Integration_Assistant.json
│ │ ├── MCP_Resources_Explorer.json
│ │ ├── MCP_Resources_Integration_Guide.json
│ │ ├── MCP_Server_Development_Prompt_Combiner.json
│ │ ├── MCP_Server_Integration_Guide.json
│ │ ├── mcp-code-generator.json
│ │ ├── mdcContent.json
│ │ ├── Mermaid_Analysis_Expert.json
│ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ ├── Mermaid_Diagram_Generator.json
│ │ ├── Mermaid_Diagram_Modifier.json
│ │ ├── MERMAID_DOM_ID_PREFIX.json
│ │ ├── message.json
│ │ ├── messages.json
│ │ ├── meth.json
│ │ ├── minimatch.json
│ │ ├── MOBILE_QUERY.json
│ │ ├── MOCK_CONSTRUCTOR_NAME.json
│ │ ├── MOCKS_PATTERN.json
│ │ ├── Modify_Mermaid_Diagram.json
│ │ ├── moduleDirectory.json
│ │ ├── Monorepo_Migration_and_Code_Organization_Guide.json
│ │ ├── msg.json
│ │ ├── mtr.json
│ │ ├── Multi-Resource_Context_Assistant.json
│ │ ├── multipartType.json
│ │ ├── n.json
│ │ ├── N.json
│ │ ├── name.json
│ │ ├── NATIVE_PLATFORM.json
│ │ ├── newUrl.json
│ │ ├── NM.json
│ │ ├── NO_ARGUMENTS.json
│ │ ├── NO_DIFF_MESSAGE.json
│ │ ├── NODE_MODULES.json
│ │ ├── nodeInternalPrefix.json
│ │ ├── nonASCIIidentifierStartChars.json
│ │ ├── nonKey.json
│ │ ├── NOT_A_DOT.json
│ │ ├── notCharacterOrDash.json
│ │ ├── notebookURL.json
│ │ ├── notSelector.json
│ │ ├── nullTag.json
│ │ ├── num.json
│ │ ├── NUMBER.json
│ │ ├── o.json
│ │ ├── O.json
│ │ ├── octChar.json
│ │ ├── octetStreamType.json
│ │ ├── operators.json
│ │ ├── other
│ │ │ ├── __image_1___describe_the_icon_in_one_sen___.json
│ │ │ ├── __type.json
│ │ │ ├── Advanced_Multi-Server_Integration_Template.json
│ │ │ ├── Analyze_Mermaid_Diagram.json
│ │ │ ├── Basic_Template.json
│ │ │ ├── Code_Diagram_Documentation_Creator.json
│ │ │ ├── Collaborative_Development_with_MCP_Integration.json
│ │ │ ├── completionShTemplate.json
│ │ │ ├── Could_you_interpret_the_assumed_applicat___.json
│ │ │ ├── DEFAULT_INDENT.json
│ │ │ ├── Docker_MCP_Servers_Orchestration_Guide.json
│ │ │ ├── Generate_different_types_of_questions_ab___.json
│ │ │ ├── Generate_Mermaid_Diagram.json
│ │ │ ├── GitHub_Repository_Explorer.json
│ │ │ ├── index.json
│ │ │ ├── Initialize_project_setup_for_a_new_micro___.json
│ │ │ ├── install_dependencies__build__run__test____.json
│ │ │ ├── LINE_FEED.json
│ │ │ ├── MCP_Resources_Explorer.json
│ │ │ ├── MCP_Resources_Integration_Guide.json
│ │ │ ├── MCP_Server_Integration_Guide.json
│ │ │ ├── mcp-code-generator.json
│ │ │ ├── Mermaid_Class_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Generator.json
│ │ │ ├── Mermaid_Diagram_Modifier.json
│ │ │ ├── Modify_Mermaid_Diagram.json
│ │ │ ├── Multi-Resource_Context_Assistant.json
│ │ │ ├── output.json
│ │ │ ├── sseUrl.json
│ │ │ ├── string.json
│ │ │ ├── Task_List_Helper.json
│ │ │ ├── Template-Based_MCP_Integration.json
│ │ │ ├── Test_Prompt.json
│ │ │ ├── type.json
│ │ │ ├── VERSION.json
│ │ │ ├── WIN_SLASH.json
│ │ │ └── You_are_limited_to_respond_Yes_or_No_onl___.json
│ │ ├── out.json
│ │ ├── output.json
│ │ ├── OUTSIDE_JEST_VM_PROTOCOL.json
│ │ ├── override.json
│ │ ├── p.json
│ │ ├── PACKAGE_FILENAME.json
│ │ ├── PACKAGE_JSON.json
│ │ ├── packageVersion.json
│ │ ├── paddedNumber.json
│ │ ├── page.json
│ │ ├── parseClass.json
│ │ ├── PATH_NODE_MODULES.json
│ │ ├── path.json
│ │ ├── pathExt.json
│ │ ├── pattern.json
│ │ ├── PatternBoolean.json
│ │ ├── pBuiltins.json
│ │ ├── pFloatForm.json
│ │ ├── pkg.json
│ │ ├── PLUGIN_ID_DOC_MANAGER.json
│ │ ├── plusChar.json
│ │ ├── PN_CHARS.json
│ │ ├── point.json
│ │ ├── prefix.json
│ │ ├── PRETTY_PLACEHOLDER.json
│ │ ├── Project_Analysis_Assistant.json
│ │ ├── ProjectsUpdatedInBackgroundEvent.json
│ │ ├── PromptCombiner_Interface.json
│ │ ├── promptId.json
│ │ ├── property_prefix.json
│ │ ├── pubkey256.json
│ │ ├── Q.json
│ │ ├── qmark.json
│ │ ├── QO.json
│ │ ├── query.json
│ │ ├── querystringType.json
│ │ ├── queryText.json
│ │ ├── r.json
│ │ ├── R.json
│ │ ├── rangeStart.json
│ │ ├── re.json
│ │ ├── reI.json
│ │ ├── REQUIRED_FIELD_SYMBOL.json
│ │ ├── Research_Assistant.json
│ │ ├── reserve.json
│ │ ├── resolvedDestination.json
│ │ ├── resolverDir.json
│ │ ├── responseType.json
│ │ ├── result.json
│ │ ├── ROOT_DESCRIBE_BLOCK_NAME.json
│ │ ├── ROOT_NAMESPACE_NAME.json
│ │ ├── ROOT_TASK_NAME.json
│ │ ├── route.json
│ │ ├── RUNNING_TEXT.json
│ │ ├── RXstyle.json
│ │ ├── s.json
│ │ ├── SCHEMA_PATH.json
│ │ ├── schemaQuery.json
│ │ ├── se.json
│ │ ├── SEARCHABLE_CLASS.json
│ │ ├── secret.json
│ │ ├── selector.json
│ │ ├── SEMVER_SPEC_VERSION.json
│ │ ├── sensitiveHeaders.json
│ │ ├── sep.json
│ │ ├── separator.json
│ │ ├── Sequential_Data_Analysis_with_MCP_Integration.json
│ │ ├── SHAPE_STATE.json
│ │ ├── shape.json
│ │ ├── SHARED.json
│ │ ├── short.json
│ │ ├── side.json
│ │ ├── SNAPSHOT_VERSION.json
│ │ ├── SOLID_Code_Analysis_Visualizer.json
│ │ ├── SOURCE_MAPPING_PREFIX.json
│ │ ├── source.json
│ │ ├── sourceMapContent.json
│ │ ├── SPACE_SYMBOL.json
│ │ ├── SPACE.json
│ │ ├── sqlKeywords.json
│ │ ├── sranges.json
│ │ ├── sseUrl.json
│ │ ├── st.json
│ │ ├── ST.json
│ │ ├── stack.json
│ │ ├── START_HIDING.json
│ │ ├── START_OF_LINE.json
│ │ ├── startNoTraversal.json
│ │ ├── STATES.json
│ │ ├── stats.json
│ │ ├── statSync.json
│ │ ├── status.json
│ │ ├── storageStatus.json
│ │ ├── storageType.json
│ │ ├── str.json
│ │ ├── string.json
│ │ ├── stringifiedObject.json
│ │ ├── stringPath.json
│ │ ├── stringResult.json
│ │ ├── stringTag.json
│ │ ├── strValue.json
│ │ ├── style.json
│ │ ├── SUB_NAME.json
│ │ ├── subkey.json
│ │ ├── SUBPROTOCOL.json
│ │ ├── SUITE_NAME.json
│ │ ├── symbolPattern.json
│ │ ├── symbolTag.json
│ │ ├── system
│ │ │ ├── Aa.json
│ │ │ ├── b.json
│ │ │ ├── Development_System_Prompt.json
│ │ │ ├── index.json
│ │ │ ├── marker.json
│ │ │ ├── PATH_NODE_MODULES.json
│ │ │ ├── ProjectsUpdatedInBackgroundEvent.json
│ │ │ ├── RXstyle.json
│ │ │ ├── status.json
│ │ │ └── versionMajorMinor.json
│ │ ├── t.json
│ │ ├── T.json
│ │ ├── Task_List_Helper.json
│ │ ├── Template-Based_MCP_Integration.json
│ │ ├── template.py
│ │ ├── templateDir.json
│ │ ├── tempName.json
│ │ ├── Test_Prompt.json
│ │ ├── text.json
│ │ ├── time.json
│ │ ├── titleSeparator.json
│ │ ├── tmpl.json
│ │ ├── tn.json
│ │ ├── TOPBAR_FACTORY.json
│ │ ├── toValue.json
│ │ ├── transform.json
│ │ ├── trustProxyDefaultSymbol.json
│ │ ├── txt.json
│ │ ├── type.json
│ │ ├── typeArgumentsKey.json
│ │ ├── typeKey.json
│ │ ├── typeMessage.json
│ │ ├── typesRegistryPackageName.json
│ │ ├── u.json
│ │ ├── UNDEFINED.json
│ │ ├── unit.json
│ │ ├── UNMATCHED_SURROGATE_PAIR_REPLACE.json
│ │ ├── ur.json
│ │ ├── usage.json
│ │ ├── USAGE.json
│ │ ├── user
│ │ │ ├── backupId.json
│ │ │ ├── DESCENDING.json
│ │ │ ├── encoded.json
│ │ │ ├── index.json
│ │ │ ├── J.json
│ │ │ ├── MOBILE_QUERY.json
│ │ │ ├── promptId.json
│ │ │ ├── schemaQuery.json
│ │ │ ├── TOPBAR_FACTORY.json
│ │ │ ├── txt.json
│ │ │ └── usage.json
│ │ ├── value.json
│ │ ├── VERSION.json
│ │ ├── version.py
│ │ ├── versionMajorMinor.json
│ │ ├── Vr.json
│ │ ├── watchmanURL.json
│ │ ├── webkit.json
│ │ ├── WIN_SLASH.json
│ │ ├── xhtml.json
│ │ ├── XP_DEFAULT_PATHEXT.json
│ │ ├── y.json
│ │ └── You_are_limited_to_respond_Yes_or_No_onl___.json
│ ├── resources
│ │ ├── __init__.py
│ │ ├── code_examples
│ │ │ └── index.json
│ │ ├── config
│ │ │ └── index.json
│ │ ├── documentation
│ │ │ └── index.json
│ │ ├── images
│ │ │ └── index.json
│ │ ├── index.json
│ │ └── other
│ │ └── index.json
│ ├── server.py
│ ├── templates
│ │ ├── __init__.py
│ │ ├── AbstractFactory.json
│ │ ├── Adapter.json
│ │ ├── base.py
│ │ ├── Builder.json
│ │ ├── Chain.json
│ │ ├── Command.json
│ │ ├── component
│ │ │ ├── AbstractFactory.json
│ │ │ ├── Adapter.json
│ │ │ ├── Builder.json
│ │ │ ├── Chain.json
│ │ │ ├── Command.json
│ │ │ ├── Decorator.json
│ │ │ ├── Facade.json
│ │ │ ├── Factory.json
│ │ │ ├── Iterator.json
│ │ │ ├── Mediator.json
│ │ │ ├── Memento.json
│ │ │ ├── Observer.json
│ │ │ ├── Prototype.json
│ │ │ ├── Proxy.json
│ │ │ ├── Singleton.json
│ │ │ ├── State.json
│ │ │ ├── Strategy.json
│ │ │ ├── TemplateMethod.json
│ │ │ └── Visitor.json
│ │ ├── component.py
│ │ ├── Decorator.json
│ │ ├── Facade.json
│ │ ├── Factory.json
│ │ ├── index.json
│ │ ├── Iterator.json
│ │ ├── manager.py
│ │ ├── Mediator.json
│ │ ├── Memento.json
│ │ ├── Observer.json
│ │ ├── project.py
│ │ ├── Prototype.json
│ │ ├── Proxy.json
│ │ ├── renderer.py
│ │ ├── Singleton.json
│ │ ├── State.json
│ │ ├── Strategy.json
│ │ ├── template_manager.py
│ │ ├── TemplateMethod.json
│ │ ├── types.py
│ │ └── Visitor.json
│ └── utils
│ └── __init__.py
├── SUMMARY.md
├── TASK_COMPLETION_SUMMARY.md
├── templates
│ └── openssl
│ ├── files
│ │ ├── CMakeLists.txt.jinja2
│ │ ├── conanfile.py.jinja2
│ │ ├── main.cpp.jinja2
│ │ └── README.md.jinja2
│ ├── openssl-consumer.json
│ └── template.json
├── test_openssl_integration.sh
├── test_package
│ └── conanfile.py
└── tests
├── __init__.py
├── conftest.py
├── integration
│ ├── test_core_integration.py
│ ├── test_mermaid_integration.py
│ ├── test_prompt_manager_integration.py
│ └── test_server_integration.py
├── test_aws_mcp.py
├── test_base_classes.py
├── test_config.py
├── test_exceptions.py
├── test_mermaid.py
├── test_prompts.py
└── test_templates.py
```
# Files
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/templates/template_manager.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Template Manager module for MCP Project Orchestrator.
4 |
5 | This module manages the retrieval, selection, and application of project templates
6 | and component templates. It loads templates from JSON files, allows selection
7 | based on design patterns, applies templates by creating the project structure,
8 | and generates basic documentation.
9 | """
10 |
11 | import os
12 | import json
13 | import re
14 | from typing import Any, Dict, List, Optional, Tuple
15 | from dataclasses import dataclass
16 | from datetime import datetime
17 | from pathlib import Path
18 |
19 | @dataclass
20 | class TemplateVersion:
21 | """Represents a template version with metadata."""
22 | major: int
23 | minor: int
24 | patch: int
25 | created_at: datetime
26 | updated_at: datetime
27 |
28 | @classmethod
29 | def from_string(cls, version_str: str) -> 'TemplateVersion':
30 | """Create a TemplateVersion from a string like '1.2.3'."""
31 | major, minor, patch = map(int, version_str.split('.'))
32 | now = datetime.now()
33 | return cls(major, minor, patch, now, now)
34 |
35 | def __str__(self) -> str:
36 | return f"{self.major}.{self.minor}.{self.patch}"
37 |
38 | class TemplateManager:
39 | """
40 | Manager for project and component templates.
41 |
42 | Attributes:
43 | templates_path: Optional path to the templates directory or file.
44 | project_templates: List of project templates loaded from JSON.
45 | component_templates: List of component templates loaded from JSON.
46 | template_versions: Dictionary mapping template names to their versions.
47 | template_inheritance: Dictionary tracking template inheritance relationships.
48 | """
49 |
50 | def __init__(self, templates_path: Optional[str] = None) -> None:
51 | """
52 | Initialize the TemplateManager.
53 |
54 | Args:
55 | templates_path: Optional path to templates. If not provided, defaults to
56 | reading 'project_templates.json' and 'component_templates.json'
57 | from the current working directory.
58 | """
59 | self.templates_path = templates_path
60 | self.template_versions: Dict[str, TemplateVersion] = {}
61 | self.template_inheritance: Dict[str, List[str]] = {}
62 | self.project_templates = self._load_templates("project_templates.json")
63 | self.component_templates = self._load_templates("component_templates.json")
64 |
65 | def _validate_template(self, template: Dict[str, Any]) -> Tuple[bool, str]:
66 | """
67 | Validate a template's structure and content.
68 |
69 | Args:
70 | template: The template dictionary to validate.
71 |
72 | Returns:
73 | A tuple of (is_valid, error_message).
74 | """
75 | required_fields = ["project_name", "version", "description", "components"]
76 |
77 | # Check required fields
78 | for field in required_fields:
79 | if field not in template:
80 | return False, f"Missing required field: {field}"
81 |
82 | # Validate version format
83 | version = template.get("version", "")
84 | if not re.match(r'^\d+\.\d+\.\d+$', version):
85 | return False, "Invalid version format. Expected: X.Y.Z"
86 |
87 | # Validate components structure
88 | components = template.get("components", [])
89 | if not isinstance(components, list):
90 | return False, "Components must be a list"
91 |
92 | for comp in components:
93 | if not isinstance(comp, dict) or "name" not in comp:
94 | return False, "Each component must be a dictionary with at least a 'name' field"
95 |
96 | return True, ""
97 |
98 | def _load_templates(self, filename: str) -> List[Dict[str, Any]]:
99 | """
100 | Load templates from the specified JSON file.
101 |
102 | Args:
103 | filename: The JSON file name to load templates from.
104 |
105 | Returns:
106 | A list of template dictionaries. If file not found or error occurs, returns an empty list.
107 | """
108 | paths_to_try = [
109 | self.templates_path if self.templates_path else filename,
110 | os.path.join(os.getcwd(), filename),
111 | os.path.join(os.getcwd(), "templates", filename),
112 | os.path.join(Path.home(), ".mcp", "templates", filename)
113 | ]
114 |
115 | for path in paths_to_try:
116 | if os.path.exists(path):
117 | try:
118 | with open(path, "r") as f:
119 | templates = json.load(f)
120 | if not isinstance(templates, list):
121 | continue
122 |
123 | # Validate and process each template
124 | valid_templates = []
125 | for template in templates:
126 | is_valid, error = self._validate_template(template)
127 | if is_valid:
128 | # Process version
129 | name = template["project_name"]
130 | version = template.get("version", "0.1.0")
131 | self.template_versions[name] = TemplateVersion.from_string(version)
132 |
133 | # Process inheritance
134 | if "extends" in template:
135 | parent = template["extends"]
136 | if parent not in self.template_inheritance:
137 | self.template_inheritance[parent] = []
138 | self.template_inheritance[parent].append(name)
139 |
140 | valid_templates.append(template)
141 |
142 | return valid_templates
143 | except (json.JSONDecodeError, OSError):
144 | continue
145 |
146 | return []
147 |
148 | def get_template_version(self, template_name: str) -> Optional[TemplateVersion]:
149 | """
150 | Get the version information for a template.
151 |
152 | Args:
153 | template_name: Name of the template.
154 |
155 | Returns:
156 | TemplateVersion object if found, None otherwise.
157 | """
158 | return self.template_versions.get(template_name)
159 |
160 | def get_derived_templates(self, template_name: str) -> List[str]:
161 | """
162 | Get all templates that inherit from the specified template.
163 |
164 | Args:
165 | template_name: Name of the base template.
166 |
167 | Returns:
168 | List of template names that inherit from the specified template.
169 | """
170 | return self.template_inheritance.get(template_name, [])
171 |
172 | def get_project_templates(self) -> List[Dict[str, Any]]:
173 | """
174 | Retrieve project templates.
175 |
176 | Returns:
177 | A list of project templates.
178 | """
179 | return self.project_templates
180 |
181 | def get_component_templates(self) -> List[Dict[str, Any]]:
182 | """
183 | Retrieve component templates.
184 |
185 | Returns:
186 | A list of component templates.
187 | """
188 | return self.component_templates
189 |
190 | def _merge_templates(self, child: Dict[str, Any], parent: Dict[str, Any]) -> Dict[str, Any]:
191 | """
192 | Merge a child template with its parent template.
193 |
194 | Args:
195 | child: The child template dictionary.
196 | parent: The parent template dictionary.
197 |
198 | Returns:
199 | A new dictionary containing the merged template.
200 | """
201 | merged = parent.copy()
202 |
203 | # Merge basic fields
204 | for field in ["project_name", "description", "version"]:
205 | if field in child:
206 | merged[field] = child[field]
207 |
208 | # Merge keywords
209 | merged["keywords"] = list(set(parent.get("keywords", []) + child.get("keywords", [])))
210 |
211 | # Merge components with override support
212 | parent_components = {comp["name"]: comp for comp in parent.get("components", [])}
213 | child_components = {comp["name"]: comp for comp in child.get("components", [])}
214 |
215 | # Start with parent components
216 | final_components = parent_components.copy()
217 |
218 | # Override or add child components
219 | final_components.update(child_components)
220 |
221 | merged["components"] = list(final_components.values())
222 |
223 | return merged
224 |
225 | def get_template_with_inheritance(self, template_name: str) -> Optional[Dict[str, Any]]:
226 | """
227 | Get a template with all inherited properties merged.
228 |
229 | Args:
230 | template_name: Name of the template to retrieve.
231 |
232 | Returns:
233 | The merged template dictionary if found, None otherwise.
234 | """
235 | template = next((t for t in self.project_templates if t["project_name"] == template_name), None)
236 | if not template:
237 | return None
238 |
239 | # If template extends another, merge with parent
240 | if "extends" in template:
241 | parent_name = template["extends"]
242 | parent = self.get_template_with_inheritance(parent_name) # Recursive call for nested inheritance
243 | if parent:
244 | template = self._merge_templates(template, parent)
245 |
246 | return template
247 |
248 | def reload_templates(self) -> None:
249 | """Reload all templates from disk."""
250 | self.template_versions.clear()
251 | self.template_inheritance.clear()
252 | self.project_templates = self._load_templates("project_templates.json")
253 | self.component_templates = self._load_templates("component_templates.json")
254 |
255 | def watch_templates(self, callback: Optional[callable] = None) -> None:
256 | """
257 | Start watching template files for changes.
258 |
259 | Args:
260 | callback: Optional function to call when templates are reloaded.
261 | """
262 | from watchdog.observers import Observer
263 | from watchdog.events import FileSystemEventHandler
264 |
265 | class TemplateHandler(FileSystemEventHandler):
266 | def __init__(self, manager: 'TemplateManager', callback: Optional[callable]):
267 | self.manager = manager
268 | self.callback = callback
269 |
270 | def on_modified(self, event):
271 | if event.src_path.endswith(('.json')):
272 | self.manager.reload_templates()
273 | if self.callback:
274 | self.callback()
275 |
276 | paths_to_watch = [
277 | os.getcwd(),
278 | os.path.join(os.getcwd(), "templates"),
279 | os.path.join(Path.home(), ".mcp", "templates")
280 | ]
281 |
282 | observer = Observer()
283 | handler = TemplateHandler(self, callback)
284 |
285 | for path in paths_to_watch:
286 | if os.path.exists(path):
287 | observer.schedule(handler, path, recursive=False)
288 |
289 | observer.start()
290 |
291 | def select_template(self, description: str, patterns: List[str]) -> str:
292 | """
293 | Select an appropriate template based on the project description and design patterns.
294 |
295 | Args:
296 | description: Project description.
297 | patterns: List of identified design patterns.
298 |
299 | Returns:
300 | The name of the selected template. If no template matches, returns a default template name.
301 | """
302 | # Enhanced template selection logic
303 | best_match = None
304 | max_score = -1
305 |
306 | for template in self.project_templates:
307 | score = 0
308 |
309 | # Score based on keyword matches
310 | keywords = template.get("keywords", [])
311 | for pattern in patterns:
312 | if pattern in keywords:
313 | score += 2
314 |
315 | # Score based on description similarity
316 | template_desc = template.get("description", "").lower()
317 | description = description.lower()
318 | common_words = set(template_desc.split()) & set(description.split())
319 | score += len(common_words)
320 |
321 | # Check inheritance - templates that are more specialized (inherit from others) get a bonus
322 | if "extends" in template:
323 | score += 1
324 |
325 | if score > max_score:
326 | max_score = score
327 | best_match = template
328 |
329 | if best_match:
330 | return best_match.get("project_name", "DefaultProject")
331 |
332 | # Fallback to first template if available
333 | if self.project_templates:
334 | return self.project_templates[0].get("project_name", "DefaultProject")
335 |
336 | return "DefaultProject"
337 |
338 | def apply_template(self, template_name: str, project_name: str, description: str,
339 | patterns: List[str], output_dir: str) -> Dict[str, Any]:
340 | """
341 | Apply the selected template: create the project directory structure and placeholder files.
342 |
343 | Args:
344 | template_name: Name of the template to use.
345 | project_name: Name of the new project.
346 | description: Description of the project.
347 | patterns: List of design patterns.
348 | output_dir: Directory where the project will be created.
349 |
350 | Returns:
351 | A dictionary containing the project path and a success message; otherwise, error details.
352 | """
353 | # Find the template by matching project_name
354 | template = next((t for t in self.project_templates if t.get("project_name") == template_name), None)
355 | if not template:
356 | return {"error": f"Template '{template_name}' not found."}
357 |
358 | project_path = os.path.join(output_dir, project_name)
359 | if os.path.exists(project_path):
360 | return {"error": f"Project '{project_name}' already exists."}
361 |
362 | try:
363 | # Create project structure directories
364 | os.makedirs(os.path.join(project_path, "src", "components"), exist_ok=True)
365 | os.makedirs(os.path.join(project_path, "src", "interfaces"), exist_ok=True)
366 | os.makedirs(os.path.join(project_path, "src", "services"), exist_ok=True)
367 | os.makedirs(os.path.join(project_path, "src", "utils"), exist_ok=True)
368 | os.makedirs(os.path.join(project_path, "tests"), exist_ok=True)
369 | os.makedirs(os.path.join(project_path, "docs"), exist_ok=True)
370 |
371 | # Generate placeholder files for each component defined in the template
372 | components = template.get("components", [])
373 | for comp in components:
374 | comp_name = comp.get("name", "Component")
375 | # Create interface file
376 | interface_path = os.path.join(project_path, "src", "interfaces", f"i_{comp_name.lower()}.py")
377 | with open(interface_path, "w") as f:
378 | f.write(f"# TODO: Define interface methods for {comp_name}\nclass I{comp_name}:\n pass\n")
379 | # Create implementation file
380 | impl_path = os.path.join(project_path, "src", "components", f"{comp_name.lower()}.py")
381 | with open(impl_path, "w") as f:
382 | f.write(f"# TODO: Implement {comp_name} logic\nclass {comp_name}:\n pass\n")
383 | # Create service file (optional placeholder)
384 | service_path = os.path.join(project_path, "src", "services", f"{comp_name.lower()}_service.py")
385 | with open(service_path, "w") as f:
386 | f.write(f"# TODO: Implement service logic for {comp_name}\n")
387 | # Create a basic README file
388 | readme_path = os.path.join(project_path, "README.md")
389 | with open(readme_path, "w") as f:
390 | f.write(f"# {project_name}\n\n{description}\n")
391 |
392 | return {"project_path": project_path, "message": "Project created successfully."}
393 | except Exception as e:
394 | return {"error": str(e)}
395 |
396 | def generate_documentation(self, project_path: str) -> str:
397 | """
398 | Generate documentation for the project at the given path.
399 |
400 | Args:
401 | project_path: The path to the project directory.
402 |
403 | Returns:
404 | A string containing the generated documentation in Markdown format.
405 | """
406 | # Generate a placeholder README documentation
407 | doc = f"# Project Documentation\n\nProject path: {project_path}\n\n---\n\nThis documentation is auto-generated based on the project template."
408 | return doc
409 |
```
--------------------------------------------------------------------------------
/data/prompts/templates/mcp-server-integration-template.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "id": "mcp-server-integration-template",
3 | "name": "MCP Server Integration Guide",
4 | "description": "A comprehensive template for planning, configuring, and integrating multiple MCP servers into a cohesive ecosystem",
5 | "content": "# MCP Server Integration Guide\n\nI'll help you integrate multiple MCP servers to create a powerful AI context ecosystem for {{project_name}}. By combining specialized MCP servers, you can significantly enhance AI capabilities beyond what a single model can provide.\n\n## Project Requirements Analysis\n\n### Core Use Case\n\nYour primary use case for MCP server integration is:\n- **{{primary_use_case}}**\n\n### Key Requirements\n\nBased on your use case, we'll focus on these requirements:\n1. {{requirement_1}}\n2. {{requirement_2}}\n3. {{requirement_3}}\n\n## MCP Server Selection\n\nBased on your requirements, I recommend these MCP servers:\n\n### Core Infrastructure\n- **{{primary_mcp_server}}**: {{primary_server_description}}\n- **{{secondary_mcp_server}}**: {{secondary_server_description}}\n- **{{tertiary_mcp_server}}**: {{tertiary_server_description}}\n\n### Supporting Services\n- Additional servers to consider: {{additional_servers}}\n\n## Integration Architecture\n\n```mermaid\ngraph TD\n Client[AI Client] --> |Requests| Primary[{{primary_mcp_server}}]\n Primary --> |Data Flow| Secondary[{{secondary_mcp_server}}]\n Primary --> |Data Flow| Tertiary[{{tertiary_mcp_server}}]\n \n subgraph \"Core MCP Ecosystem\"\n Primary\n Secondary\n Tertiary\n end\n```\n\n## Configuration and Setup\n\n### Installation Steps\n\n1. **{{primary_mcp_server}}**:\n ```bash\n {{primary_installation_command}}\n ```\n\n2. **{{secondary_mcp_server}}**:\n ```bash\n {{secondary_installation_command}}\n ```\n\n3. **{{tertiary_mcp_server}}**:\n ```bash\n {{tertiary_installation_command}}\n ```\n\n### Claude Desktop Configuration\n\n```json\n{\n \"mcpServers\": {\n \"{{primary_mcp_server_id}}\": {\n \"command\": \"{{primary_command}}\",\n \"args\": [{{primary_args}}],\n \"env\": {\n {{primary_env_vars}}\n }\n },\n \"{{secondary_mcp_server_id}}\": {\n \"command\": \"{{secondary_command}}\",\n \"args\": [{{secondary_args}}],\n \"env\": {\n {{secondary_env_vars}}\n }\n },\n \"{{tertiary_mcp_server_id}}\": {\n \"command\": \"{{tertiary_command}}\",\n \"args\": [{{tertiary_args}}],\n \"env\": {\n {{tertiary_env_vars}}\n }\n }\n }\n}\n```\n\n### Docker Compose Integration\n\n```yaml\nversion: '3'\nservices:\n {{primary_mcp_server_id}}:\n image: {{primary_image}}\n environment:\n - {{primary_environment_1}}\n - {{primary_environment_2}}\n volumes:\n - {{primary_volume_mapping}}\n ports:\n - \"{{primary_port_mapping}}\"\n \n {{secondary_mcp_server_id}}:\n image: {{secondary_image}}\n environment:\n - {{secondary_environment_1}}\n - {{secondary_environment_2}}\n volumes:\n - {{secondary_volume_mapping}}\n ports:\n - \"{{secondary_port_mapping}}\"\n \n {{tertiary_mcp_server_id}}:\n image: {{tertiary_image}}\n environment:\n - {{tertiary_environment_1}}\n - {{tertiary_environment_2}}\n volumes:\n - {{tertiary_volume_mapping}}\n ports:\n - \"{{tertiary_port_mapping}}\"\n```\n\n## Integration Patterns\n\n### Data Flow\n\nFor your use case, I recommend the following data flow pattern:\n\n```\n{{data_flow_pattern}}\n```\n\n### Communication Model\n\nThe optimal communication model for your servers is:\n**{{communication_model}}**\n\nRationale: {{communication_rationale}}\n\n## Best Practices for Your Integration\n\n1. **Performance Optimization**: {{performance_recommendation}}\n2. **Security Considerations**: {{security_recommendation}}\n3. **Error Handling**: {{error_handling_recommendation}}\n4. **Testing Strategy**: {{testing_recommendation}}\n\n## MCP Server Interaction Examples\n\n### Example 1: {{example_scenario_1}}\n\n```javascript\n// Client-side code example\nuse_mcp_tool({\n server_name: \"{{primary_mcp_server_id}}\",\n tool_name: \"{{example_tool_1}}\",\n arguments: {\n {{example_args_1}}\n }\n});\n```\n\n### Example 2: {{example_scenario_2}}\n\n```javascript\n// Client-side code example\nuse_mcp_tool({\n server_name: \"{{secondary_mcp_server_id}}\",\n tool_name: \"{{example_tool_2}}\",\n arguments: {\n {{example_args_2}}\n }\n});\n```\n\n## Troubleshooting Guide\n\n| Problem | Possible Cause | Solution |\n|---------|----------------|----------|\n| {{problem_1}} | {{cause_1}} | {{solution_1}} |\n| {{problem_2}} | {{cause_2}} | {{solution_2}} |\n| {{problem_3}} | {{cause_3}} | {{solution_3}} |\n\n## Next Steps\n\n1. {{next_step_1}}\n2. {{next_step_2}}\n3. {{next_step_3}}\n\nWould you like me to elaborate on any specific aspect of this MCP server integration plan?",
6 | "variables": [
7 | "project_name",
8 | "primary_use_case",
9 | "requirement_1",
10 | "requirement_2",
11 | "requirement_3",
12 | "primary_mcp_server",
13 | "primary_server_description",
14 | "secondary_mcp_server",
15 | "secondary_server_description",
16 | "tertiary_mcp_server",
17 | "tertiary_server_description",
18 | "additional_servers",
19 | "primary_installation_command",
20 | "secondary_installation_command",
21 | "tertiary_installation_command",
22 | "primary_mcp_server_id",
23 | "primary_command",
24 | "primary_args",
25 | "primary_env_vars",
26 | "secondary_mcp_server_id",
27 | "secondary_command",
28 | "secondary_args",
29 | "secondary_env_vars",
30 | "tertiary_mcp_server_id",
31 | "tertiary_command",
32 | "tertiary_args",
33 | "tertiary_env_vars",
34 | "primary_image",
35 | "primary_environment_1",
36 | "primary_environment_2",
37 | "primary_volume_mapping",
38 | "primary_port_mapping",
39 | "secondary_image",
40 | "secondary_environment_1",
41 | "secondary_environment_2",
42 | "secondary_volume_mapping",
43 | "secondary_port_mapping",
44 | "tertiary_image",
45 | "tertiary_environment_1",
46 | "tertiary_environment_2",
47 | "tertiary_volume_mapping",
48 | "tertiary_port_mapping",
49 | "data_flow_pattern",
50 | "communication_model",
51 | "communication_rationale",
52 | "performance_recommendation",
53 | "security_recommendation",
54 | "error_handling_recommendation",
55 | "testing_recommendation",
56 | "example_scenario_1",
57 | "example_tool_1",
58 | "example_args_1",
59 | "example_scenario_2",
60 | "example_tool_2",
61 | "example_args_2",
62 | "problem_1",
63 | "cause_1",
64 | "solution_1",
65 | "problem_2",
66 | "cause_2",
67 | "solution_2",
68 | "problem_3",
69 | "cause_3",
70 | "solution_3",
71 | "next_step_1",
72 | "next_step_2",
73 | "next_step_3"
74 | ],
75 | "examples": [
76 | {
77 | "name": "Development Environment Integration",
78 | "values": {
79 | "project_name": "AI-Enhanced Development Environment",
80 | "primary_use_case": "Creating an integrated development environment that enhances coding, documentation, and testing with AI assistance",
81 | "requirement_1": "Code repository analysis and exploration",
82 | "requirement_2": "Database query and schema analysis",
83 | "requirement_3": "Documentation generation and enhancement",
84 | "primary_mcp_server": "github",
85 | "primary_server_description": "Integrates with GitHub repositories to provide code context and exploration",
86 | "secondary_mcp_server": "filesystem",
87 | "secondary_server_description": "Provides access to local project files and configuration",
88 | "tertiary_mcp_server": "postgres",
89 | "tertiary_server_description": "Allows database exploration and SQL query execution",
90 | "additional_servers": "prompts, sequential-thinking, memory",
91 | "primary_installation_command": "npx -y @modelcontextprotocol/server-github",
92 | "secondary_installation_command": "npx -y @modelcontextprotocol/server-filesystem /path/to/workspace",
93 | "tertiary_installation_command": "npx -y @modelcontextprotocol/server-postgres postgresql://localhost/mydb",
94 | "primary_mcp_server_id": "github",
95 | "primary_command": "npx",
96 | "primary_args": "\"-y\", \"@modelcontextprotocol/server-github\"",
97 | "primary_env_vars": "\"GITHUB_PERSONAL_ACCESS_TOKEN\": \"your-token-here\"",
98 | "secondary_mcp_server_id": "filesystem",
99 | "secondary_command": "npx",
100 | "secondary_args": "\"-y\", \"@modelcontextprotocol/server-filesystem\", \"/path/to/workspace\"",
101 | "secondary_env_vars": "",
102 | "tertiary_mcp_server_id": "postgres",
103 | "tertiary_command": "npx",
104 | "tertiary_args": "\"-y\", \"@modelcontextprotocol/server-postgres\", \"postgresql://localhost/mydb\"",
105 | "tertiary_env_vars": "",
106 | "primary_image": "node:alpine",
107 | "primary_environment_1": "GITHUB_PERSONAL_ACCESS_TOKEN=your-token-here",
108 | "primary_environment_2": "PORT=3001",
109 | "primary_volume_mapping": "./data:/data",
110 | "primary_port_mapping": "3001:3000",
111 | "secondary_image": "node:alpine",
112 | "secondary_environment_1": "PORT=3002",
113 | "secondary_environment_2": "",
114 | "secondary_volume_mapping": "./workspace:/workspace",
115 | "secondary_port_mapping": "3002:3000",
116 | "tertiary_image": "node:alpine",
117 | "tertiary_environment_1": "PORT=3003",
118 | "tertiary_environment_2": "",
119 | "tertiary_volume_mapping": "./pgdata:/var/lib/postgresql/data",
120 | "tertiary_port_mapping": "3003:3000",
121 | "data_flow_pattern": "GitHub → Filesystem → Postgres → Client, with bidirectional flows as needed",
122 | "communication_model": "Hub and Spoke with GitHub as the central hub",
123 | "communication_rationale": "Centralizing around GitHub allows for repository-centric workflows, which matches most development scenarios",
124 | "performance_recommendation": "Use volume mounting for filesystem paths to minimize container rebuild times during development",
125 | "security_recommendation": "Utilize environment variables and Docker secrets for sensitive tokens and credentials",
126 | "error_handling_recommendation": "Implement retries with exponential backoff for GitHub API requests to handle rate limiting",
127 | "testing_recommendation": "Create a test suite with mock repositories to validate cross-server integration before production use",
128 | "example_scenario_1": "Exploring a repository",
129 | "example_tool_1": "list_repositories",
130 | "example_args_1": "owner: \"username\", limit: 5",
131 | "example_scenario_2": "Reading project files",
132 | "example_tool_2": "read_directory",
133 | "example_args_2": "path: \"/workspace/src\"",
134 | "problem_1": "GitHub API rate limiting",
135 | "cause_1": "Too many requests in a short time period",
136 | "solution_1": "Implement caching and rate limiting in the client code",
137 | "problem_2": "Permission denied for filesystem",
138 | "cause_2": "Container user doesn't have access to mounted volumes",
139 | "solution_2": "Check file permissions and user IDs in container",
140 | "problem_3": "Database connection issues",
141 | "cause_3": "Incorrect connection string or database not running",
142 | "solution_3": "Verify database is running and connection parameters are correct",
143 | "next_step_1": "Set up Docker Compose environment with the three core MCP servers",
144 | "next_step_2": "Configure Claude Desktop to use these MCP servers",
145 | "next_step_3": "Create sample prompts that utilize multiple servers for code exploration tasks"
146 | }
147 | },
148 | {
149 | "name": "Content Creation Ecosystem",
150 | "values": {
151 | "project_name": "AI-Powered Content Creation Suite",
152 | "primary_use_case": "Building a sophisticated content creation system with research, drafting, and media generation capabilities",
153 | "requirement_1": "Real-time web research and citation gathering",
154 | "requirement_2": "Automated content generation with template support",
155 | "requirement_3": "Text-to-speech conversion for audio content",
156 | "primary_mcp_server": "brave-search",
157 | "primary_server_description": "Provides up-to-date web search capabilities for research",
158 | "secondary_mcp_server": "prompts",
159 | "secondary_server_description": "Manages content templates and generation patterns",
160 | "tertiary_mcp_server": "elevenlabs",
161 | "tertiary_server_description": "Converts text to high-quality speech for podcasts or audio content",
162 | "additional_servers": "memory, filesystem",
163 | "primary_installation_command": "npx -y @modelcontextprotocol/server-brave-search",
164 | "secondary_installation_command": "npx -y @sparesparrow/mcp-prompts",
165 | "tertiary_installation_command": "uvx elevenlabs-mcp-server",
166 | "primary_mcp_server_id": "brave-search",
167 | "primary_command": "npx",
168 | "primary_args": "\"-y\", \"@modelcontextprotocol/server-brave-search\"",
169 | "primary_env_vars": "\"BRAVE_API_KEY\": \"your-brave-api-key\"",
170 | "secondary_mcp_server_id": "prompts",
171 | "secondary_command": "npx",
172 | "secondary_args": "\"-y\", \"@sparesparrow/mcp-prompts\"",
173 | "secondary_env_vars": "\"STORAGE_TYPE\": \"file\", \"PROMPTS_DIR\": \"/path/to/prompts\"",
174 | "tertiary_mcp_server_id": "elevenlabs",
175 | "tertiary_command": "uvx",
176 | "tertiary_args": "\"elevenlabs-mcp-server\"",
177 | "tertiary_env_vars": "\"ELEVENLABS_API_KEY\": \"your-elevenlabs-api-key\", \"ELEVENLABS_VOICE_ID\": \"preferred-voice-id\"",
178 | "primary_image": "node:alpine",
179 | "primary_environment_1": "BRAVE_API_KEY=your-brave-api-key",
180 | "primary_environment_2": "PORT=3001",
181 | "primary_volume_mapping": "./data:/data",
182 | "primary_port_mapping": "3001:3000",
183 | "secondary_image": "sparesparrow/mcp-prompts:latest",
184 | "secondary_environment_1": "STORAGE_TYPE=file",
185 | "secondary_environment_2": "PROMPTS_DIR=/app/data/prompts",
186 | "secondary_volume_mapping": "./prompts:/app/data/prompts",
187 | "secondary_port_mapping": "3002:3000",
188 | "tertiary_image": "node:alpine",
189 | "tertiary_environment_1": "ELEVENLABS_API_KEY=your-elevenlabs-api-key",
190 | "tertiary_environment_2": "ELEVENLABS_VOICE_ID=preferred-voice-id",
191 | "tertiary_volume_mapping": "./audio:/app/data/audio",
192 | "tertiary_port_mapping": "3003:3000",
193 | "data_flow_pattern": "Brave Search → Prompts → ElevenLabs → Client, with the option to store results in Memory or Filesystem",
194 | "communication_model": "Pipeline Processing",
195 | "communication_rationale": "Content creation naturally follows a linear workflow from research to drafting to audio production",
196 | "performance_recommendation": "Cache search results from Brave Search to minimize API usage and improve response times",
197 | "security_recommendation": "Store all API keys in environment variables and never expose them in generated content",
198 | "error_handling_recommendation": "Implement fallback voices for ElevenLabs in case the primary voice is unavailable",
199 | "testing_recommendation": "Create sample prompts that exercise the full pipeline from research to audio generation",
200 | "example_scenario_1": "Researching a topic",
201 | "example_tool_1": "search",
202 | "example_args_1": "query: \"latest developments in AI assistants 2025\"",
203 | "example_scenario_2": "Generating an article template",
204 | "example_tool_2": "apply_template",
205 | "example_args_2": "template_id: \"blog-article\", variables: {topic: \"AI advancements\", tone: \"educational\"}",
206 | "problem_1": "Brave Search API limits exceeded",
207 | "cause_1": "Too many searches in a short time period",
208 | "solution_1": "Implement rate limiting and caching for search results",
209 | "problem_2": "Missing prompts or templates",
210 | "cause_2": "Incorrect path to prompts directory",
211 | "solution_2": "Verify PROMPTS_DIR environment variable points to existing directory",
212 | "problem_3": "ElevenLabs audio generation fails",
213 | "cause_3": "Invalid API key or voice ID",
214 | "solution_3": "Check API key validity and available voices through ElevenLabs dashboard",
215 | "next_step_1": "Set up Docker Compose environment with all three MCP servers",
216 | "next_step_2": "Create a set of content templates in the prompts server",
217 | "next_step_3": "Develop a sample workflow that demonstrates research, content generation, and audio production"
218 | }
219 | }
220 | ],
221 | "categories": ["integration", "multi-server", "configuration", "advanced", "docker"]
222 | }
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/project_orchestration.py:
--------------------------------------------------------------------------------
```python
1 | import os
2 | import json
3 | from typing import List, Dict, Optional
4 | from dotenv import load_dotenv
5 |
6 | # Import our own FastMCP implementation
7 | from .fastmcp import FastMCP
8 |
9 | # Load environment variables
10 | load_dotenv()
11 |
12 | # Import AWS MCP integration
13 | try:
14 | from .aws_mcp import register_aws_mcp_tools, AWSConfig
15 | AWS_MCP_AVAILABLE = True
16 | except ImportError:
17 | AWS_MCP_AVAILABLE = False
18 | print("Warning: AWS MCP integration not available. Install boto3 to enable AWS features.")
19 |
20 | # Load MCP configuration from JSON file
21 | CONFIG_FILE = 'project_orchestration.json'
22 | with open(CONFIG_FILE, 'r') as config_file:
23 | MCP_CONFIG = json.load(config_file)
24 |
25 | # MCP configuration details (e.g., communication_protocol, mcp_compliance) are now available in MCP_CONFIG
26 |
27 | # Directory for projects
28 | PROJECTS_DIR = './projects'
29 | os.makedirs(PROJECTS_DIR, exist_ok=True)
30 |
31 | # Load project templates from JSON file
32 | with open('project_templates.json', 'r') as f:
33 | PROJECT_TEMPLATES = json.load(f)
34 |
35 | # Comprehensive README template aligned with JSON requirements
36 | README_TEMPLATE = """
37 | # {{project_name}}
38 |
39 | ## Overview
40 | {{project_name}} is designed to {{primary_purpose}} using {{design}} patterns, adhering to systematic approaches for maintainability and scalability.
41 |
42 | ## Architecture
43 | ### Design Patterns
44 | {{design_patterns}}
45 |
46 | ### Software Architecture
47 | {{software_architecture}}
48 |
49 | ### Components and Modules
50 | {{components_section}}
51 |
52 | ### Relationships
53 | {{relationships}}
54 |
55 | ### Interfaces
56 | {{interfaces_section}}
57 |
58 | ### Communication Protocols
59 | {{communication_protocols}}
60 |
61 | ### Technologies
62 | {{technologies}}
63 |
64 | ### Dependencies
65 | {{dependencies}}
66 |
67 | ### Commands
68 | - **Installation**: `{{install_command}}`
69 | - **Build**: `{{build_command}}`
70 | - **Run**: `{{run_command}}`
71 | - **Test**: `{{test_command}}`
72 |
73 | ## File Structure
74 | {{project_structure}}
75 |
76 | ## Implementation Strategy
77 | {{implementation_strategy}}
78 |
79 | ## Mermaid Diagrams
80 | {{mermaid_diagrams}}
81 |
82 | ## Instructions for Composer Implementor Agent
83 | {{instructions}}
84 | """
85 |
86 | # Initialize MCP server
87 | mcp = FastMCP("ProjectOrchestrator")
88 | mcp.config = MCP_CONFIG # attach configuration to MCP server instance
89 |
90 | '''
91 | MCP Project Orchestrator Server
92 | -------------------------------
93 | This MCP server orchestrates the creation and configuration of new software projects.
94 | It performs the following steps:
95 | 1. Extracts key design patterns and architecture concepts from user input.
96 | 2. Selects an appropriate project template from a standardized catalogue.
97 | 3. Applies the template by creating well-structured directories and placeholder files.
98 | 4. Generates comprehensive documentation including software architecture, components, process flows, and file structures.
99 |
100 | The server configuration is loaded from 'project_orchestration.json', which defines overall settings such as communication protocols and compliance standards.
101 |
102 | Developers can extend or modify this orchestration process by updating the template definitions or the configuration JSON.
103 | '''
104 |
105 | # Tool: Analyze design patterns and architecture
106 | @mcp.tool()
107 | def analyze_design_patterns(idea: str) -> Dict[str, List[str]]:
108 | """Analyze the user's idea to identify design patterns and architecture concepts."""
109 | idea_lower = idea.lower()
110 | patterns = []
111 | architectures = []
112 |
113 | keyword_map = {
114 | "microservices": ("Microservices Architecture", "Distributed System"),
115 | "event": ("Event-Driven Architecture", "Asynchronous Processing"),
116 | "async": ("Event-Driven Architecture", "Asynchronous Processing"),
117 | "data": ("Repository Pattern", "Layered Architecture"),
118 | "repository": ("Repository Pattern", "Layered Architecture"),
119 | "cqrs": ("CQRS", "Event Sourcing"),
120 | "client": ("Client-Server", "Request-Response"),
121 | "server": ("Client-Server", "Request-Response"),
122 | "modular": ("Modular Monolith", "Monolithic Architecture"),
123 | "serverless": ("Serverless Architecture", "Function-as-a-Service"),
124 | "bridge": ("Bridge Pattern", "Abstraction Separation"),
125 | "composite": ("Composite Pattern", "Tree Structure"),
126 | "flyweight": ("Flyweight Pattern", "Memory Optimization"),
127 | "strategy": ("Strategy Pattern", "Behavioral Flexibility"),
128 | "template": ("Template Method Pattern", "Algorithm Skeleton"),
129 | "visitor": ("Visitor Pattern", "Operation Separation")
130 | }
131 |
132 | for keyword, (pattern, arch) in keyword_map.items():
133 | if keyword in idea_lower:
134 | if pattern not in patterns:
135 | patterns.append(pattern)
136 | if arch not in architectures:
137 | architectures.append(arch)
138 |
139 | if not patterns:
140 | patterns.append("Modular Monolith")
141 | architectures.append("Monolithic Architecture")
142 |
143 | return {"design_patterns": patterns, "architectures": architectures}
144 |
145 | # Tool: Generate Mermaid diagrams (aligned with JSON's MermaidTool)
146 | @mcp.tool()
147 | def mermaid_tool(diagram_planning: str, template_name: Optional[str] = None) -> str:
148 | """Generate Mermaid diagrams for visualization based on planning."""
149 | planning_lower = diagram_planning.lower()
150 | if "architecture" in planning_lower:
151 | if template_name and "Microservices" in template_name:
152 | return (
153 | "```mermaid\n"
154 | "graph TD\n"
155 | " A[API Gateway] --> B[UserService]\n"
156 | " A --> C[OrderService]\n"
157 | " B --> D[UserDB]\n"
158 | " C --> E[OrderDB]\n"
159 | " B --> F[MessageQueue]\n"
160 | " C --> F\n"
161 | "```\n"
162 | )
163 | elif template_name and "EventDriven" in template_name:
164 | return (
165 | "```mermaid\n"
166 | "graph TD\n"
167 | " A[EventProducer] --> B[EventBus]\n"
168 | " B --> C[EventConsumer]\n"
169 | " C --> D[EventStore]\n"
170 | "```\n"
171 | )
172 | return (
173 | "```mermaid\n"
174 | "graph TD\n"
175 | " A[CoreModule] --> B[Services]\n"
176 | " B --> C[Utilities]\n"
177 | " A --> D[Database]\n"
178 | "```\n"
179 | )
180 | elif "file structure" in planning_lower:
181 | if template_name:
182 | template = next((t for t in PROJECT_TEMPLATES if t["project_name"] == template_name), None)
183 | if template:
184 | components = "\n".join([f" E --> F{i+1}[{c['name']}]" for i, c in enumerate(template["components"])])
185 | return (
186 | "```mermaid\n"
187 | "graph TD\n"
188 | " A[ProjectRoot] --> B[src]\n"
189 | " A --> C[tests]\n"
190 | " A --> D[docs]\n"
191 | " B --> E[components]\n"
192 | f"{components}\n"
193 | " B --> G[interfaces]\n"
194 | " B --> H[services]\n"
195 | " B --> I[utils]\n"
196 | "```\n"
197 | )
198 | return (
199 | "```mermaid\n"
200 | "graph TD\n"
201 | " A[ProjectRoot] --> B[src]\n"
202 | " A --> C[tests]\n"
203 | " A --> D[docs]\n"
204 | " B --> E[components]\n"
205 | " B --> F[interfaces]\n"
206 | " B --> G[services]\n"
207 | " B --> H[utils]\n"
208 | "```\n"
209 | )
210 | elif "process flow" in planning_lower:
211 | return (
212 | "```mermaid\n"
213 | "sequenceDiagram\n"
214 | " participant U as User\n"
215 | " participant S as System\n"
216 | " U->>S: Initiate Action\n"
217 | " S-->>U: Process Result\n"
218 | "```\n"
219 | )
220 | return "```mermaid\n%% Placeholder diagram\n```"
221 |
222 | # Tool: Apply project template
223 | @mcp.tool()
224 | def apply_project_template(template_name: str, project_name: str, user_idea: str, design_info: Dict[str, List[str]]) -> str:
225 | """Apply a template and create comprehensive documentation."""
226 | template = next((t for t in PROJECT_TEMPLATES if t["project_name"] == template_name), None)
227 | if not template:
228 | return f"Error: Template '{template_name}' not found."
229 |
230 | project_path = os.path.join(PROJECTS_DIR, project_name)
231 | if os.path.exists(project_path):
232 | return f"Error: Project '{project_name}' already exists."
233 |
234 | # Step 5: Prepare detailed file structure
235 | os.makedirs(os.path.join(project_path, "src", "components"), exist_ok=True)
236 | os.makedirs(os.path.join(project_path, "src", "interfaces"), exist_ok=True)
237 | os.makedirs(os.path.join(project_path, "src", "services"), exist_ok=True)
238 | os.makedirs(os.path.join(project_path, "src", "utils"), exist_ok=True)
239 | os.makedirs(os.path.join(project_path, "tests"), exist_ok=True)
240 | os.makedirs(os.path.join(project_path, "docs"), exist_ok=True)
241 |
242 | # Generate component files with consistent names and TODOs
243 | components_section = ""
244 | interfaces_section = ""
245 | relationships = ""
246 | communication_protocols = "REST API, Message Queues" if "Microservices" in template_name else "Internal Function Calls"
247 |
248 | for i, component in enumerate(template["components"]):
249 | name = component["name"]
250 | # Interface
251 | interface_file = f"i_{name.lower()}.py"
252 | with open(os.path.join(project_path, "src", "interfaces", interface_file), "w") as f:
253 | f.write(f"# TODO: Define interface methods for {name}\nclass I{name}:\n pass\n")
254 | # Implementation
255 | impl_file = f"{name.lower()}.py"
256 | with open(os.path.join(project_path, "src", "components", impl_file), "w") as f:
257 | f.write(f"# TODO: Implement {name} logic\nclass {name}:\n pass\n")
258 | # Service (if applicable)
259 | service_file = f"{name.lower()}_service.py"
260 | with open(os.path.join(project_path, "src", "services", service_file), "w") as f:
261 | f.write(f"# TODO: Implement service logic for {name}\n")
262 | # Test
263 | test_file = f"test_{name.lower()}.py"
264 | with open(os.path.join(project_path, "tests", test_file), "w") as f:
265 | f.write(f"# TODO: Write unit tests for {name}\n")
266 |
267 | components_section += (
268 | f"- **{name}**: {component.get('description', 'TBD')}\n"
269 | f" - Interface: [{interface_file}](./src/interfaces/{interface_file})\n"
270 | f" - Implementation: [{impl_file}](./src/components/{impl_file})\n"
271 | f" - Service: [{service_file}](./src/services/{service_file})\n"
272 | f" - Tests: [{test_file}](./tests/{test_file})\n"
273 | )
274 | interfaces_section += f"class I{name}:\n # TODO: Define {name} methods\n pass\n\n"
275 | if i > 0:
276 | relationships += f"- {template['components'][i-1]['name']} interacts with {name} via {communication_protocols}\n"
277 |
278 | # Step 4: Comprehensive documentation
279 | design_patterns = "- " + "\n- ".join(design_info["design_patterns"])
280 | software_architecture = "- " + "\n- ".join(design_info["architectures"])
281 | technologies = "Python, Flask, Docker, Kafka" if "Microservices" in template_name else "Python, Django"
282 | dependencies = "requests, pytest, docker, confluent-kafka" if "Microservices" in template_name else "django, pytest"
283 | install_command = "pip install -r requirements.txt"
284 | build_command = "docker build ." if "Microservices" in template_name else "python manage.py migrate"
285 | run_command = "docker-compose up" if "Microservices" in template_name else "python manage.py runserver"
286 | test_command = "pytest"
287 |
288 | # File structure visualization
289 | project_structure = mermaid_tool("file structure", template_name)
290 |
291 | # Step 6: Implementation strategy
292 | impl_order = "\n".join([f"{i+1}. src/components/{c['name'].lower()}.py" for i, c in enumerate(template["components"])])
293 | implementation_strategy = (
294 | f"### File Implementation Order\n{impl_order}\n"
295 | "### Testing Strategies\n- Unit Tests: Use pytest for component-level testing.\n- Integration Tests: Verify inter-component interactions.\n"
296 | f"### Build and Deployment\n- Build: `{build_command}`\n- Deploy: Use Docker containers or a cloud platform like AWS.\n"
297 | )
298 |
299 | # Mermaid diagrams
300 | mermaid_diagrams = (
301 | f"### Architecture Diagram\n{mermaid_tool('architecture', template_name)}\n"
302 | f"### File Structure\n{project_structure}\n"
303 | f"### Process Flow\n{mermaid_tool('process flow', template_name)}"
304 | )
305 |
306 | # Instructions for the composer implementor agent
307 | instructions = (
308 | "1. Refine the generated documentation in README.md.\n"
309 | "2. Implement components starting with core logic in src/components/.\n"
310 | "3. Use mermaid_tool for additional visualizations (e.g., `mermaid_tool 'detailed process flow'`).\n"
311 | "4. Follow the implementation strategy and test using provided commands."
312 | )
313 |
314 | # Substitutions for README
315 | substitutions = {
316 | "project_name": project_name,
317 | "design": ", ".join(design_info["design_patterns"]),
318 | "primary_purpose": template["description"].split(".")[0],
319 | "design_patterns": design_patterns,
320 | "software_architecture": software_architecture,
321 | "components_section": components_section,
322 | "relationships": relationships if relationships else "TBD - Define inter-component relationships",
323 | "interfaces_section": interfaces_section,
324 | "communication_protocols": communication_protocols,
325 | "technologies": technologies,
326 | "dependencies": dependencies,
327 | "install_command": install_command,
328 | "build_command": build_command,
329 | "run_command": run_command,
330 | "test_command": test_command,
331 | "project_structure": project_structure,
332 | "implementation_strategy": implementation_strategy,
333 | "mermaid_diagrams": mermaid_diagrams,
334 | "instructions": instructions
335 | }
336 |
337 | # Generate README
338 | readme_content = README_TEMPLATE
339 | for key, value in substitutions.items():
340 | readme_content = readme_content.replace("{{" + key + "}}", value)
341 | with open(os.path.join(project_path, "README.md"), "w") as f:
342 | f.write(readme_content)
343 |
344 | return f"Project '{project_name}' created successfully at '{project_path}'."
345 |
346 | # Helper: Select template
347 | def select_template(idea: str, design_info: Dict[str, List[str]]) -> str:
348 | """Select a project template based on design patterns and architectures."""
349 | idea_lower = idea.lower()
350 | patterns = design_info["design_patterns"]
351 | template_map = {
352 | "Microservices Architecture": "MicroservicesArchitectureProject",
353 | "Event-Driven Architecture": "EventDrivenArchitectureProject",
354 | "Repository Pattern": "RepositoryPatternProject",
355 | "CQRS": "CQRSProject",
356 | "Client-Server": "ClientServerProject",
357 | "Modular Monolith": "ModularMonolithProject",
358 | "Serverless Architecture": "ServerlessFunctionProject",
359 | "Bridge Pattern": "BridgeProject",
360 | "Composite Pattern": "CompositeProject",
361 | "Flyweight Pattern": "FlyweightProject",
362 | "Strategy Pattern": "StrategyProject",
363 | "Template Method Pattern": "TemplateMethodProject",
364 | "Visitor Pattern": "VisitorProject"
365 | }
366 | for pattern in patterns:
367 | if pattern in template_map:
368 | return template_map[pattern]
369 | return "ModularMonolithProject" # Default
370 |
371 | # Tool: Orchestrate project setup
372 | @mcp.tool()
373 | def orchestrate_new_project(user_idea: str) -> str:
374 | """Orchestrate the setup of a new software project from the user's idea."""
375 | # Step 1: Information Extraction
376 | design_info = analyze_design_patterns(user_idea)
377 |
378 | # Step 2: Design Patterns & Architecture Identification (handled by analyze_design_patterns)
379 |
380 | # Step 3: Project Template Application
381 | template_name = select_template(user_idea, design_info)
382 | project_name = user_idea.lower().replace(" ", "_")[:20]
383 |
384 | # Steps 4-6: Apply template, generate documentation, prepare file structure, and define strategy
385 | result = apply_project_template(template_name, project_name, user_idea, design_info)
386 | if "Error" in result:
387 | return result
388 |
389 | return (
390 | f"Project '{project_name}' has been initialized with template '{template_name}'.\n"
391 | f"Design Patterns Identified: {', '.join(design_info['design_patterns'])}\n"
392 | f"Architecture Concepts: {', '.join(design_info['architectures'])}\n"
393 | "Next Steps: Review the generated README.md at '{project_path}/README.md' for detailed documentation and instructions."
394 | )
395 |
396 | # Register AWS MCP tools if available
397 | if AWS_MCP_AVAILABLE and os.getenv("AWS_REGION"):
398 | try:
399 | register_aws_mcp_tools(mcp)
400 | print("AWS MCP tools registered successfully")
401 | except Exception as e:
402 | print(f"Warning: Failed to register AWS MCP tools: {e}")
403 |
404 | # Run the server
405 | if __name__ == "__main__":
406 | mcp.run()
```
--------------------------------------------------------------------------------
/printcast-agent/src/mcp_server/server.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Main MCP Server implementation for PrintCast Agent.
3 |
4 | This server orchestrates voice-to-print workflows, integrating multiple services:
5 | - Asterisk SIP for telephony
6 | - ElevenLabs for conversational AI
7 | - GitHub/RSS for content sourcing
8 | - CUPS for printing
9 | - Delivery services for shipping
10 | """
11 |
12 | import asyncio
13 | import json
14 | import logging
15 | from typing import Any, Dict, List, Optional, Sequence
16 | from datetime import datetime
17 |
18 | from fastmcp import FastMCP
19 | from pydantic import BaseModel, Field
20 | import structlog
21 |
22 | from ..integrations.asterisk import AsteriskManager
23 | from ..integrations.elevenlabs import ElevenLabsAgent
24 | from ..integrations.content import ContentFetcher
25 | from ..integrations.printing import PrintManager
26 | from ..integrations.delivery import DeliveryService
27 | from ..orchestration.workflow import WorkflowOrchestrator
28 | from ..utils.monitoring import MetricsCollector
29 |
30 | # Configure structured logging
31 | structlog.configure(
32 | processors=[
33 | structlog.stdlib.filter_by_level,
34 | structlog.stdlib.add_logger_name,
35 | structlog.stdlib.add_log_level,
36 | structlog.stdlib.PositionalArgumentsFormatter(),
37 | structlog.processors.TimeStamper(fmt="iso"),
38 | structlog.processors.StackInfoRenderer(),
39 | structlog.processors.format_exc_info,
40 | structlog.processors.UnicodeDecoder(),
41 | structlog.processors.JSONRenderer()
42 | ],
43 | context_class=dict,
44 | logger_factory=structlog.stdlib.LoggerFactory(),
45 | cache_logger_on_first_use=True,
46 | )
47 |
48 | logger = structlog.get_logger(__name__)
49 |
50 |
51 | class CallSession(BaseModel):
52 | """Represents an active call session."""
53 |
54 | session_id: str
55 | caller_id: str
56 | start_time: datetime
57 | selected_items: List[str] = Field(default_factory=list)
58 | delivery_address: Optional[str] = None
59 | status: str = "active"
60 | metadata: Dict[str, Any] = Field(default_factory=dict)
61 |
62 |
63 | class PrintCastMCPServer:
64 | """
65 | Main MCP server for PrintCast Agent system.
66 |
67 | Provides tools and resources for:
68 | - Handling incoming calls through Asterisk
69 | - Managing AI voice conversations
70 | - Fetching and presenting content
71 | - Processing print jobs
72 | - Arranging delivery
73 | """
74 |
75 | def __init__(self, config: Optional[Dict[str, Any]] = None):
76 | """
77 | Initialize the PrintCast MCP Server.
78 |
79 | Args:
80 | config: Configuration dictionary for all services
81 | """
82 | self.config = config or {}
83 | self.app = FastMCP("PrintCast Agent")
84 | self.sessions: Dict[str, CallSession] = {}
85 |
86 | # Initialize service managers
87 | self.asterisk = AsteriskManager(self.config.get("asterisk", {}))
88 | self.elevenlabs = ElevenLabsAgent(self.config.get("elevenlabs", {}))
89 | self.content = ContentFetcher(self.config.get("content", {}))
90 | self.printer = PrintManager(self.config.get("printing", {}))
91 | self.delivery = DeliveryService(self.config.get("delivery", {}))
92 | self.orchestrator = WorkflowOrchestrator(
93 | asterisk=self.asterisk,
94 | elevenlabs=self.elevenlabs,
95 | content=self.content,
96 | printer=self.printer,
97 | delivery=self.delivery
98 | )
99 | self.metrics = MetricsCollector()
100 |
101 | # Register MCP tools
102 | self._register_tools()
103 |
104 | # Register MCP resources
105 | self._register_resources()
106 |
107 | logger.info("PrintCast MCP Server initialized", config=self.config)
108 |
109 | def _register_tools(self):
110 | """Register all MCP tools."""
111 |
112 | @self.app.tool()
113 | async def handle_incoming_call(
114 | caller_id: str,
115 | dtmf_code: Optional[str] = None,
116 | language: str = "cs"
117 | ) -> Dict[str, Any]:
118 | """
119 | Handle an incoming call and initiate the voice workflow.
120 |
121 | Args:
122 | caller_id: The phone number of the caller
123 | dtmf_code: Optional DTMF code entered by caller
124 | language: Language preference (cs=Czech, en=English)
125 |
126 | Returns:
127 | Session information and next steps
128 | """
129 | try:
130 | # Create new session
131 | session = CallSession(
132 | session_id=f"call_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{caller_id}",
133 | caller_id=caller_id,
134 | start_time=datetime.now(),
135 | metadata={"language": language, "dtmf_code": dtmf_code}
136 | )
137 | self.sessions[session.session_id] = session
138 |
139 | # Start voice agent
140 | agent_response = await self.elevenlabs.start_conversation(
141 | session_id=session.session_id,
142 | language=language
143 | )
144 |
145 | # Get initial content options
146 | content_options = await self.content.get_available_content()
147 |
148 | logger.info(
149 | "Call session started",
150 | session_id=session.session_id,
151 | caller_id=caller_id
152 | )
153 |
154 | return {
155 | "session_id": session.session_id,
156 | "status": "connected",
157 | "agent_ready": agent_response.get("ready", False),
158 | "content_options": content_options,
159 | "message": f"Welcome! Session {session.session_id} started."
160 | }
161 |
162 | except Exception as e:
163 | logger.error("Failed to handle incoming call", error=str(e))
164 | raise
165 |
166 | @self.app.tool()
167 | async def fetch_trending_content(
168 | content_type: str = "github",
169 | limit: int = 5,
170 | language: Optional[str] = None
171 | ) -> List[Dict[str, Any]]:
172 | """
173 | Fetch trending content from various sources.
174 |
175 | Args:
176 | content_type: Type of content (github, rss, news)
177 | limit: Maximum number of items to fetch
178 | language: Optional language filter
179 |
180 | Returns:
181 | List of trending content items
182 | """
183 | try:
184 | if content_type == "github":
185 | items = await self.content.fetch_github_trending(
186 | limit=limit,
187 | language=language
188 | )
189 | elif content_type == "rss":
190 | items = await self.content.fetch_rss_feeds(limit=limit)
191 | elif content_type == "news":
192 | items = await self.content.fetch_news(limit=limit)
193 | else:
194 | raise ValueError(f"Unknown content type: {content_type}")
195 |
196 | logger.info(
197 | "Fetched trending content",
198 | type=content_type,
199 | count=len(items)
200 | )
201 |
202 | return items
203 |
204 | except Exception as e:
205 | logger.error("Failed to fetch content", error=str(e))
206 | raise
207 |
208 | @self.app.tool()
209 | async def process_user_selection(
210 | session_id: str,
211 | selected_items: List[str],
212 | delivery_address: str,
213 | delivery_method: str = "post"
214 | ) -> Dict[str, Any]:
215 | """
216 | Process user's content selection and initiate print/delivery.
217 |
218 | Args:
219 | session_id: Active session ID
220 | selected_items: List of selected item IDs
221 | delivery_address: Delivery address
222 | delivery_method: Delivery method (post, courier)
223 |
224 | Returns:
225 | Order confirmation and tracking information
226 | """
227 | try:
228 | session = self.sessions.get(session_id)
229 | if not session:
230 | raise ValueError(f"Session {session_id} not found")
231 |
232 | # Update session
233 | session.selected_items = selected_items
234 | session.delivery_address = delivery_address
235 |
236 | # Orchestrate the workflow
237 | result = await self.orchestrator.process_order(
238 | session_id=session_id,
239 | selected_items=selected_items,
240 | delivery_address=delivery_address,
241 | delivery_method=delivery_method
242 | )
243 |
244 | # Update metrics
245 | await self.metrics.record_order(session_id, len(selected_items))
246 |
247 | logger.info(
248 | "Order processed",
249 | session_id=session_id,
250 | items_count=len(selected_items),
251 | tracking_id=result.get("tracking_id")
252 | )
253 |
254 | return result
255 |
256 | except Exception as e:
257 | logger.error("Failed to process selection", error=str(e))
258 | raise
259 |
260 | @self.app.tool()
261 | async def generate_print_preview(
262 | session_id: str,
263 | selected_items: List[str],
264 | format: str = "pdf"
265 | ) -> Dict[str, Any]:
266 | """
267 | Generate a print preview for selected items.
268 |
269 | Args:
270 | session_id: Active session ID
271 | selected_items: List of selected item IDs
272 | format: Output format (pdf, html)
273 |
274 | Returns:
275 | Preview file path and metadata
276 | """
277 | try:
278 | # Generate preview document
279 | preview = await self.printer.generate_preview(
280 | items=selected_items,
281 | format=format
282 | )
283 |
284 | logger.info(
285 | "Print preview generated",
286 | session_id=session_id,
287 | format=format
288 | )
289 |
290 | return {
291 | "preview_url": preview["url"],
292 | "page_count": preview["pages"],
293 | "file_size": preview["size"],
294 | "format": format
295 | }
296 |
297 | except Exception as e:
298 | logger.error("Failed to generate preview", error=str(e))
299 | raise
300 |
301 | @self.app.tool()
302 | async def get_delivery_quote(
303 | delivery_address: str,
304 | delivery_method: str = "post",
305 | weight_grams: int = 100
306 | ) -> Dict[str, Any]:
307 | """
308 | Get delivery cost estimate.
309 |
310 | Args:
311 | delivery_address: Delivery address
312 | delivery_method: Delivery method
313 | weight_grams: Estimated weight in grams
314 |
315 | Returns:
316 | Delivery quote with pricing and timing
317 | """
318 | try:
319 | quote = await self.delivery.get_quote(
320 | address=delivery_address,
321 | method=delivery_method,
322 | weight=weight_grams
323 | )
324 |
325 | return {
326 | "price": quote["price"],
327 | "currency": quote["currency"],
328 | "estimated_delivery": quote["estimated_delivery"],
329 | "carrier": quote["carrier"]
330 | }
331 |
332 | except Exception as e:
333 | logger.error("Failed to get delivery quote", error=str(e))
334 | raise
335 |
336 | @self.app.tool()
337 | async def end_call_session(
338 | session_id: str,
339 | reason: str = "completed"
340 | ) -> Dict[str, Any]:
341 | """
342 | End an active call session.
343 |
344 | Args:
345 | session_id: Session to end
346 | reason: Reason for ending (completed, cancelled, error)
347 |
348 | Returns:
349 | Session summary
350 | """
351 | try:
352 | session = self.sessions.get(session_id)
353 | if not session:
354 | raise ValueError(f"Session {session_id} not found")
355 |
356 | # Update session status
357 | session.status = reason
358 |
359 | # Stop voice agent
360 | await self.elevenlabs.end_conversation(session_id)
361 |
362 | # Generate session summary
363 | duration = (datetime.now() - session.start_time).total_seconds()
364 |
365 | summary = {
366 | "session_id": session_id,
367 | "duration_seconds": duration,
368 | "items_selected": len(session.selected_items),
369 | "status": reason,
370 | "caller_id": session.caller_id
371 | }
372 |
373 | # Clean up session after delay
374 | asyncio.create_task(self._cleanup_session(session_id))
375 |
376 | logger.info("Call session ended", **summary)
377 |
378 | return summary
379 |
380 | except Exception as e:
381 | logger.error("Failed to end session", error=str(e))
382 | raise
383 |
384 | def _register_resources(self):
385 | """Register MCP resources for monitoring and configuration."""
386 |
387 | @self.app.resource("resource://sessions/active")
388 | async def get_active_sessions() -> str:
389 | """Get list of active call sessions."""
390 | active = [
391 | {
392 | "session_id": s.session_id,
393 | "caller_id": s.caller_id,
394 | "start_time": s.start_time.isoformat(),
395 | "status": s.status,
396 | "items_selected": len(s.selected_items)
397 | }
398 | for s in self.sessions.values()
399 | if s.status == "active"
400 | ]
401 | return json.dumps(active, indent=2)
402 |
403 | @self.app.resource("resource://config/services")
404 | async def get_service_config() -> str:
405 | """Get current service configuration."""
406 | config = {
407 | "asterisk": {
408 | "enabled": self.asterisk.is_connected(),
409 | "host": self.config.get("asterisk", {}).get("host", "localhost")
410 | },
411 | "elevenlabs": {
412 | "enabled": self.elevenlabs.is_configured(),
413 | "model": self.config.get("elevenlabs", {}).get("model", "eleven_multilingual_v2")
414 | },
415 | "printing": {
416 | "enabled": self.printer.is_available(),
417 | "printer": self.config.get("printing", {}).get("default_printer", "default")
418 | },
419 | "delivery": {
420 | "enabled": self.delivery.is_configured(),
421 | "carriers": self.config.get("delivery", {}).get("carriers", [])
422 | }
423 | }
424 | return json.dumps(config, indent=2)
425 |
426 | @self.app.resource("resource://metrics/daily")
427 | async def get_daily_metrics() -> str:
428 | """Get daily usage metrics."""
429 | metrics = await self.metrics.get_daily_stats()
430 | return json.dumps(metrics, indent=2)
431 |
432 | async def _cleanup_session(self, session_id: str, delay: int = 300):
433 | """
434 | Clean up session data after delay.
435 |
436 | Args:
437 | session_id: Session to clean up
438 | delay: Delay in seconds before cleanup
439 | """
440 | await asyncio.sleep(delay)
441 | if session_id in self.sessions:
442 | del self.sessions[session_id]
443 | logger.info("Session cleaned up", session_id=session_id)
444 |
445 | async def start(self):
446 | """Start the MCP server and all services."""
447 | try:
448 | # Initialize all services
449 | await self.asterisk.connect()
450 | await self.elevenlabs.initialize()
451 | await self.printer.initialize()
452 | await self.delivery.initialize()
453 |
454 | # Start metrics collection
455 | asyncio.create_task(self.metrics.start_collection())
456 |
457 | # Start the MCP server
458 | logger.info("Starting PrintCast MCP Server")
459 | await self.app.run()
460 |
461 | except Exception as e:
462 | logger.error("Failed to start server", error=str(e))
463 | raise
464 |
465 | async def stop(self):
466 | """Stop the MCP server and cleanup."""
467 | try:
468 | # End all active sessions
469 | for session_id in list(self.sessions.keys()):
470 | await self.end_call_session(session_id, reason="shutdown")
471 |
472 | # Disconnect services
473 | await self.asterisk.disconnect()
474 | await self.elevenlabs.shutdown()
475 | await self.printer.shutdown()
476 | await self.delivery.shutdown()
477 |
478 | logger.info("PrintCast MCP Server stopped")
479 |
480 | except Exception as e:
481 | logger.error("Error during shutdown", error=str(e))
482 | raise
```
--------------------------------------------------------------------------------
/mcp-project-orchestrator/openssl/tests/test_template_validation.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Tests for template rendering and JSON schema validation.
3 |
4 | This module contains tests for Jinja2 template rendering and
5 | JSON configuration schema validation.
6 | """
7 |
8 | import pytest
9 | import json
10 | import tempfile
11 | import shutil
12 | from pathlib import Path
13 | from unittest.mock import Mock, patch
14 |
15 | from mcp_orchestrator.cursor_deployer import CursorConfigDeployer
16 | from mcp_orchestrator.yaml_validator import YAMLFrontmatterValidator
17 | from mcp_orchestrator.env_config import EnvironmentConfig
18 |
19 |
20 | class TestTemplateRendering:
21 | """Test cases for Jinja2 template rendering."""
22 |
23 | def setup_method(self):
24 | """Set up test fixtures."""
25 | self.temp_dir = tempfile.mkdtemp()
26 | self.repo_root = Path(self.temp_dir) / "test_repo"
27 | self.package_root = Path(self.temp_dir) / "test_package"
28 |
29 | # Create test repository
30 | self.repo_root.mkdir(parents=True)
31 |
32 | # Create test package structure
33 | self.package_root.mkdir(parents=True)
34 | (self.package_root / "cursor-rules" / "rules").mkdir(parents=True)
35 | (self.package_root / "cursor-rules" / "prompts").mkdir(parents=True)
36 |
37 | # Create test templates
38 | self._create_test_templates()
39 |
40 | # Create deployer
41 | self.deployer = CursorConfigDeployer(self.repo_root, self.package_root)
42 |
43 | def teardown_method(self):
44 | """Clean up test fixtures."""
45 | shutil.rmtree(self.temp_dir)
46 |
47 | def _create_test_templates(self):
48 | """Create test template files."""
49 | # Create shared rule template
50 | shared_template = self.package_root / "cursor-rules" / "rules" / "shared.mdc.jinja2"
51 | shared_template.write_text("""---
52 | title: {{ title }}
53 | description: {{ description }}
54 | created: {{ timestamp }}
55 | platform: {{ platform }}
56 | user: {{ user }}
57 | ---
58 |
59 | # {{ title }}
60 |
61 | This is a test shared rule template.
62 | Platform: {{ os }}
63 | User: {{ user }}
64 | Python: {{ python_version }}
65 | """)
66 |
67 | # Create MCP config template
68 | mcp_template = self.package_root / "cursor-rules" / "mcp.json.jinja2"
69 | mcp_template.write_text("""{
70 | "mcpServers": {
71 | "test-server": {
72 | "command": "{% if os == 'windows' %}npx.cmd{% else %}npx{% endif %}",
73 | "args": ["-y", "@test/server"],
74 | "env": {
75 | "PLATFORM": "{{ os }}",
76 | "USER": "{{ user }}",
77 | "HOME": "{{ home }}",
78 | "CI": {{ is_ci | lower }}
79 | }
80 | }
81 | },
82 | "platform": {
83 | "os": "{{ os }}",
84 | "architecture": "{{ architecture }}",
85 | "pythonVersion": "{{ python_version }}"
86 | }
87 | }
88 | """)
89 |
90 | def test_template_rendering_basic(self):
91 | """Test basic template rendering."""
92 | platform_info = self.deployer.detect_platform()
93 |
94 | # Test shared rule template
95 | content = self.deployer._render_template_content(
96 | "rules/shared.mdc.jinja2",
97 | platform_info
98 | )
99 |
100 | assert "title: Shared Rules" in content
101 | assert f"Platform: {platform_info['os']}" in content
102 | assert f"User: {platform_info['user']}" in content
103 | assert f"Python: {platform_info['python_version']}" in content
104 |
105 | def test_template_rendering_with_custom_variables(self):
106 | """Test template rendering with custom variables."""
107 | platform_info = self.deployer.detect_platform()
108 | platform_info.update({
109 | "title": "Custom Test Rules",
110 | "description": "Custom test description",
111 | "platform": "test"
112 | })
113 |
114 | content = self.deployer._render_template_content(
115 | "rules/shared.mdc.jinja2",
116 | platform_info
117 | )
118 |
119 | assert "title: Custom Test Rules" in content
120 | assert "description: Custom test description" in content
121 | assert "platform: test" in content
122 |
123 | def test_mcp_config_rendering(self):
124 | """Test MCP configuration template rendering."""
125 | platform_info = self.deployer.detect_platform()
126 |
127 | content = self.deployer._render_template_content(
128 | "mcp.json.jinja2",
129 | platform_info
130 | )
131 |
132 | # Parse as JSON to validate
133 | config = json.loads(content)
134 |
135 | assert "mcpServers" in config
136 | assert "test-server" in config["mcpServers"]
137 | assert "platform" in config
138 |
139 | # Check platform-specific command
140 | expected_command = "npx.cmd" if platform_info["os"] == "windows" else "npx"
141 | assert config["mcpServers"]["test-server"]["command"] == expected_command
142 |
143 | # Check environment variables
144 | env = config["mcpServers"]["test-server"]["env"]
145 | assert env["PLATFORM"] == platform_info["os"]
146 | assert env["USER"] == platform_info["user"]
147 | assert env["HOME"] == platform_info["home"]
148 | assert env["CI"] == platform_info["is_ci"]
149 |
150 | def test_template_rendering_error_handling(self):
151 | """Test template rendering error handling."""
152 | # Create invalid template
153 | invalid_template = self.package_root / "cursor-rules" / "invalid.jinja2"
154 | invalid_template.write_text("{{ invalid_variable_that_does_not_exist }}")
155 |
156 | with pytest.raises(Exception):
157 | self.deployer._render_template_content(
158 | "invalid.jinja2",
159 | {"os": "linux"}
160 | )
161 |
162 | def test_template_rendering_with_filters(self):
163 | """Test template rendering with Jinja2 filters."""
164 | platform_info = self.deployer.detect_platform()
165 |
166 | # Test boolean filter
167 | content = self.deployer._render_template_content(
168 | "mcp.json.jinja2",
169 | platform_info
170 | )
171 |
172 | config = json.loads(content)
173 | env = config["mcpServers"]["test-server"]["env"]
174 | assert isinstance(env["CI"], bool)
175 | assert env["CI"] == platform_info["is_ci"]
176 |
177 |
178 | class TestJSONSchemaValidation:
179 | """Test cases for JSON configuration schema validation."""
180 |
181 | def setup_method(self):
182 | """Set up test fixtures."""
183 | self.temp_dir = tempfile.mkdtemp()
184 | self.repo_root = Path(self.temp_dir) / "test_repo"
185 | self.package_root = Path(self.temp_dir) / "test_package"
186 |
187 | # Create test repository
188 | self.repo_root.mkdir(parents=True)
189 |
190 | # Create test package structure
191 | self.package_root.mkdir(parents=True)
192 | (self.package_root / "cursor-rules").mkdir(parents=True)
193 |
194 | # Create test MCP template
195 | self._create_mcp_template()
196 |
197 | # Create deployer
198 | self.deployer = CursorConfigDeployer(self.repo_root, self.package_root)
199 |
200 | def teardown_method(self):
201 | """Clean up test fixtures."""
202 | shutil.rmtree(self.temp_dir)
203 |
204 | def _create_mcp_template(self):
205 | """Create MCP configuration template."""
206 | mcp_template = self.package_root / "cursor-rules" / "mcp.json.jinja2"
207 | mcp_template.write_text("""{
208 | "mcpServers": {
209 | "openssl-context": {
210 | "command": "{% if os == 'windows' %}npx.cmd{% else %}npx{% endif %}",
211 | "args": ["-y", "@sparesparrow/mcp-openssl-context"],
212 | "env": {
213 | "OPENSSL_PROJECT_ROOT": "{{ repo_root }}",
214 | "CONAN_USER_HOME": "{{ home }}/.conan2",
215 | "PLATFORM": "{{ os }}",
216 | "ARCHITECTURE": "{{ architecture }}",
217 | "PYTHON_VERSION": "{{ python_version }}",
218 | "USER": "{{ user }}"
219 | }
220 | },
221 | "build-intelligence": {
222 | "command": "{% if os == 'windows' %}npx.cmd{% else %}npx{% endif %}",
223 | "args": ["-y", "@sparesparrow/mcp-build-intelligence"],
224 | "env": {
225 | "OPENSSL_PROJECT_ROOT": "{{ repo_root }}",
226 | "PLATFORM": "{{ os }}",
227 | "ARCHITECTURE": "{{ architecture }}",
228 | "BUILD_TYPE": "{% if is_ci %}release{% else %}debug{% endif %}",
229 | "CONAN_USER_HOME": "{{ home }}/.conan2"
230 | }
231 | }
232 | },
233 | "globalShortcut": "Ctrl+Shift+.",
234 | "logging": {
235 | "level": "{% if is_ci %}error{% else %}info{% endif %}",
236 | "file": "{{ repo_root }}/.cursor/cursor.log",
237 | "maxSize": "10MB",
238 | "maxFiles": 5
239 | },
240 | "features": {
241 | "autoComplete": true,
242 | "syntaxHighlighting": true,
243 | "errorChecking": true,
244 | "codeFormatting": true,
245 | "intelligentSuggestions": true
246 | },
247 | "platform": {
248 | "os": "{{ os }}",
249 | "architecture": "{{ architecture }}",
250 | "pythonVersion": "{{ python_version }}",
251 | "user": "{{ user }}",
252 | "home": "{{ home }}",
253 | "ciEnvironment": {{ is_ci }},
254 | "timestamp": "{{ timestamp }}"
255 | }
256 | }
257 | """)
258 |
259 | def test_mcp_config_schema_validation(self):
260 | """Test MCP configuration JSON schema validation."""
261 | platform_info = self.deployer.detect_platform()
262 | platform_info["repo_root"] = str(self.repo_root)
263 |
264 | # Render template
265 | content = self.deployer._render_template_content(
266 | "mcp.json.jinja2",
267 | platform_info
268 | )
269 |
270 | # Parse as JSON
271 | config = json.loads(content)
272 |
273 | # Validate schema
274 | self._validate_mcp_config_schema(config)
275 |
276 | def _validate_mcp_config_schema(self, config: dict):
277 | """Validate MCP configuration schema."""
278 | # Check required top-level fields
279 | required_fields = ["mcpServers", "globalShortcut", "logging", "features", "platform"]
280 | for field in required_fields:
281 | assert field in config, f"Missing required field: {field}"
282 |
283 | # Validate mcpServers
284 | mcp_servers = config["mcpServers"]
285 | assert isinstance(mcp_servers, dict), "mcpServers must be a dictionary"
286 |
287 | for server_name, server_config in mcp_servers.items():
288 | assert isinstance(server_config, dict), f"Server {server_name} config must be a dictionary"
289 |
290 | # Check required server fields
291 | required_server_fields = ["command", "args", "env"]
292 | for field in required_server_fields:
293 | assert field in server_config, f"Server {server_name} missing required field: {field}"
294 |
295 | # Validate command
296 | assert isinstance(server_config["command"], str), f"Server {server_name} command must be a string"
297 | assert server_config["command"] in ["npx", "npx.cmd"], f"Server {server_name} has invalid command"
298 |
299 | # Validate args
300 | assert isinstance(server_config["args"], list), f"Server {server_name} args must be a list"
301 |
302 | # Validate env
303 | assert isinstance(server_config["env"], dict), f"Server {server_name} env must be a dictionary"
304 |
305 | # Validate logging
306 | logging = config["logging"]
307 | assert "level" in logging, "Logging missing level field"
308 | assert logging["level"] in ["error", "info", "debug", "warn"], "Invalid logging level"
309 |
310 | # Validate features
311 | features = config["features"]
312 | boolean_features = ["autoComplete", "syntaxHighlighting", "errorChecking", "codeFormatting", "intelligentSuggestions"]
313 | for feature in boolean_features:
314 | assert feature in features, f"Missing feature: {feature}"
315 | assert isinstance(features[feature], bool), f"Feature {feature} must be boolean"
316 |
317 | # Validate platform
318 | platform = config["platform"]
319 | required_platform_fields = ["os", "architecture", "pythonVersion", "user", "home", "ciEnvironment", "timestamp"]
320 | for field in required_platform_fields:
321 | assert field in platform, f"Platform missing required field: {field}"
322 |
323 | def test_mcp_config_rendering_consistency(self):
324 | """Test that MCP configuration rendering is consistent across platforms."""
325 | platforms = [
326 | {"os": "linux", "is_ci": False},
327 | {"os": "macos", "is_ci": False},
328 | {"os": "windows", "is_ci": False},
329 | {"os": "linux", "is_ci": True},
330 | ]
331 |
332 | for platform_info in platforms:
333 | platform_info.update({
334 | "architecture": "x86_64",
335 | "python_version": "3.9.0",
336 | "user": "testuser",
337 | "home": "/home/testuser",
338 | "timestamp": "2024-01-01T00:00:00",
339 | "repo_root": str(self.repo_root)
340 | })
341 |
342 | # Render template
343 | content = self.deployer._render_template_content(
344 | "mcp.json.jinja2",
345 | platform_info
346 | )
347 |
348 | # Parse as JSON
349 | config = json.loads(content)
350 |
351 | # Validate schema
352 | self._validate_mcp_config_schema(config)
353 |
354 | # Check platform-specific values
355 | assert config["platform"]["os"] == platform_info["os"]
356 | assert config["platform"]["ciEnvironment"] == platform_info["is_ci"]
357 |
358 | # Check command based on OS
359 | expected_command = "npx.cmd" if platform_info["os"] == "windows" else "npx"
360 | for server_config in config["mcpServers"].values():
361 | assert server_config["command"] == expected_command
362 |
363 |
364 | class TestYAMLFrontmatterValidation:
365 | """Test cases for YAML frontmatter validation."""
366 |
367 | def setup_method(self):
368 | """Set up test fixtures."""
369 | self.temp_dir = tempfile.mkdtemp()
370 | self.validator = YAMLFrontmatterValidator()
371 |
372 | def teardown_method(self):
373 | """Clean up test fixtures."""
374 | shutil.rmtree(self.temp_dir)
375 |
376 | def test_valid_frontmatter(self):
377 | """Test validation of valid frontmatter."""
378 | # Create valid .mdc file
379 | mdc_file = Path(self.temp_dir) / "valid.mdc"
380 | mdc_file.write_text("""---
381 | title: Test Rule
382 | description: A test rule for validation
383 | created: 2024-01-01T00:00:00
384 | platform: linux
385 | user: testuser
386 | ---
387 |
388 | # Test Rule
389 |
390 | This is a test rule.
391 | """)
392 |
393 | result = self.validator.validate_file(mdc_file)
394 |
395 | assert result.is_valid
396 | assert len(result.errors) == 0
397 | assert result.frontmatter is not None
398 | assert result.frontmatter["title"] == "Test Rule"
399 |
400 | def test_missing_required_fields(self):
401 | """Test validation with missing required fields."""
402 | # Create .mdc file with missing required fields
403 | mdc_file = Path(self.temp_dir) / "invalid.mdc"
404 | mdc_file.write_text("""---
405 | title: Test Rule
406 | ---
407 |
408 | # Test Rule
409 |
410 | This is a test rule.
411 | """)
412 |
413 | result = self.validator.validate_file(mdc_file)
414 |
415 | assert not result.is_valid
416 | assert len(result.errors) > 0
417 | assert any("Missing required field" in error for error in result.errors)
418 |
419 | def test_invalid_yaml_syntax(self):
420 | """Test validation with invalid YAML syntax."""
421 | # Create .mdc file with invalid YAML
422 | mdc_file = Path(self.temp_dir) / "invalid_yaml.mdc"
423 | mdc_file.write_text("""---
424 | title: Test Rule
425 | description: A test rule
426 | created: 2024-01-01T00:00:00
427 | platform: linux
428 | user: testuser
429 | invalid_yaml: [unclosed list
430 | ---
431 |
432 | # Test Rule
433 |
434 | This is a test rule.
435 | """)
436 |
437 | result = self.validator.validate_file(mdc_file)
438 |
439 | assert not result.is_valid
440 | assert any("Invalid YAML syntax" in error for error in result.errors)
441 |
442 | def test_invalid_platform(self):
443 | """Test validation with invalid platform."""
444 | # Create .mdc file with invalid platform
445 | mdc_file = Path(self.temp_dir) / "invalid_platform.mdc"
446 | mdc_file.write_text("""---
447 | title: Test Rule
448 | description: A test rule
449 | created: 2024-01-01T00:00:00
450 | platform: invalid_platform
451 | user: testuser
452 | ---
453 |
454 | # Test Rule
455 |
456 | This is a test rule.
457 | """)
458 |
459 | result = self.validator.validate_file(mdc_file)
460 |
461 | assert not result.is_valid
462 | assert any("Invalid platform" in error for error in result.errors)
463 |
464 | def test_no_frontmatter(self):
465 | """Test validation with no frontmatter."""
466 | # Create .mdc file without frontmatter
467 | mdc_file = Path(self.temp_dir) / "no_frontmatter.mdc"
468 | mdc_file.write_text("""# Test Rule
469 |
470 | This is a test rule without frontmatter.
471 | """)
472 |
473 | result = self.validator.validate_file(mdc_file)
474 |
475 | assert not result.is_valid
476 | assert any("No YAML frontmatter found" in error for error in result.errors)
477 |
478 |
479 | class TestEnvironmentConfiguration:
480 | """Test cases for environment configuration."""
481 |
482 | def setup_method(self):
483 | """Set up test fixtures."""
484 | self.env_config = EnvironmentConfig()
485 |
486 | def test_get_conan_home_fallback(self):
487 | """Test Conan home directory fallback."""
488 | # Clear cache
489 | self.env_config._cache.clear()
490 |
491 | # Test with environment variable set
492 | with patch.dict(os.environ, {"CONAN_USER_HOME": "/custom/conan/home"}):
493 | conan_home = self.env_config.get_conan_home()
494 | assert conan_home == "/custom/conan/home"
495 |
496 | # Test fallback
497 | with patch.dict(os.environ, {}, clear=True):
498 | conan_home = self.env_config.get_conan_home()
499 | assert conan_home.endswith(".conan2")
500 |
501 | def test_validate_required_openssl(self):
502 | """Test validation of required variables for OpenSSL project."""
503 | # Test with all required variables
504 | with patch.dict(os.environ, {
505 | "CONAN_USER_HOME": "/test/conan",
506 | "OPENSSL_ROOT_DIR": "/test/openssl"
507 | }):
508 | is_valid, missing = self.env_config.validate_required("openssl")
509 | assert is_valid
510 | assert len(missing) == 0
511 |
512 | # Test with missing variables
513 | with patch.dict(os.environ, {}, clear=True):
514 | is_valid, missing = self.env_config.validate_required("openssl")
515 | assert not is_valid
516 | assert "CONAN_USER_HOME" in missing
517 | assert "OPENSSL_ROOT_DIR" in missing
518 |
519 | def test_get_validation_errors(self):
520 | """Test getting validation error messages."""
521 | with patch.dict(os.environ, {}, clear=True):
522 | errors = self.env_config.get_validation_errors("openssl")
523 | assert len(errors) > 0
524 | assert any("Missing required environment variables" in error for error in errors)
525 | assert any("CONAN_USER_HOME" in error for error in errors)
526 | assert any("OPENSSL_ROOT_DIR" in error for error in errors)
527 |
528 |
529 | # Import os for patching
530 | import os
```
--------------------------------------------------------------------------------
/src/mcp_project_orchestrator/core/fastmcp.py:
--------------------------------------------------------------------------------
```python
1 | #!/usr/bin/env python3
2 | """
3 | Enhanced FastMCP server implementation for the MCP Project Orchestrator.
4 |
5 | This module provides a comprehensive MCP server that handles communication
6 | with MCP clients like Claude Desktop, exposing project orchestration,
7 | prompt management, and diagram generation capabilities through the Model
8 | Context Protocol.
9 | """
10 | import os
11 | import sys
12 | import signal
13 | import logging
14 | import json
15 | import asyncio
16 | from typing import Dict, Any, Optional, Callable, List
17 |
18 | # Set up logging
19 | logging.basicConfig(
20 | level=logging.INFO,
21 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
22 | handlers=[logging.StreamHandler()]
23 | )
24 | logger = logging.getLogger("mcp-project-orchestrator")
25 |
26 | class MCP_Error(Exception):
27 | """Base exception class for MCP server errors."""
28 | pass
29 |
30 | class FastMCPServer:
31 | """
32 | Enhanced FastMCP server implementation for project orchestration.
33 |
34 | This class provides a comprehensive MCP server that handles communication
35 | with MCP clients, exposing orchestration capabilities through
36 | registered tools and resources with robust error handling.
37 | """
38 |
39 | def __init__(self, config):
40 | """
41 | Initialize the MCP server with the given configuration.
42 |
43 | Args:
44 | config: The server configuration object
45 | """
46 | self.name = config.name if hasattr(config, 'name') else "MCP Project Orchestrator"
47 | self.config = config
48 | self.tools: Dict[str, Dict[str, Any]] = {}
49 | self.resources: Dict[str, Any] = {}
50 |
51 | # Set up signal handlers
52 | signal.signal(signal.SIGINT, self._handle_signal)
53 | signal.signal(signal.SIGTERM, self._handle_signal)
54 |
55 | logger.info(f"Initialized FastMCP server '{self.name}'")
56 |
57 | async def initialize(self) -> None:
58 | """
59 | Initialize the server asynchronously.
60 |
61 | This method should be called after the server is created to set up
62 | all required components before starting the server.
63 | """
64 | logger.info("Initializing FastMCPServer")
65 |
66 | # Additional initialization logic can be added here
67 |
68 | logger.info("FastMCPServer initialization complete")
69 |
70 | async def start(self, host: Optional[str] = None, port: Optional[int] = None) -> None:
71 | """
72 | Start the server asynchronously.
73 |
74 | Args:
75 | host: Optional host to bind to (overrides config)
76 | port: Optional port to bind to (overrides config)
77 | """
78 | # Use provided values or fall back to config
79 | self.host = host or self.config.host
80 | self.port = port or self.config.port
81 |
82 | logger.info(f"Starting FastMCP server on {self.host}:{self.port}")
83 |
84 | # Server startup logic would go here
85 |
86 | logger.info(f"FastMCP server started successfully on {self.host}:{self.port}")
87 |
88 | async def stop(self) -> None:
89 | """
90 | Stop the server gracefully.
91 | """
92 | logger.info("Stopping FastMCP server")
93 |
94 | # Server shutdown logic would go here
95 |
96 | logger.info("FastMCP server stopped")
97 |
98 | def tool(self, func: Optional[Callable] = None,
99 | name: Optional[str] = None,
100 | description: Optional[str] = None,
101 | parameters: Optional[Dict[str, Any]] = None):
102 | """
103 | Decorator to register a function as an MCP tool.
104 |
105 | Args:
106 | func: The function to register
107 | name: Optional name for the tool (defaults to function name)
108 | description: Optional description of the tool
109 | parameters: Optional parameters schema for the tool
110 |
111 | Returns:
112 | The decorated function
113 | """
114 | def decorator(fn):
115 | tool_name = name or fn.__name__
116 | tool_desc = description or fn.__doc__ or f"Tool {tool_name}"
117 |
118 | # Extract parameters from function signature if not provided
119 | tool_params = parameters or {}
120 | if not tool_params:
121 | import inspect
122 | sig = inspect.signature(fn)
123 | tool_params = {
124 | "type": "object",
125 | "properties": {},
126 | "required": []
127 | }
128 |
129 | for param_name, param in sig.parameters.items():
130 | if param_name == "self":
131 | continue
132 |
133 | param_type = "string" # Default type
134 | if param.annotation is not inspect.Parameter.empty:
135 | if param.annotation == str:
136 | param_type = "string"
137 | elif param.annotation == int:
138 | param_type = "integer"
139 | elif param.annotation == float:
140 | param_type = "number"
141 | elif param.annotation == bool:
142 | param_type = "boolean"
143 | elif param.annotation == dict or param.annotation == Dict:
144 | param_type = "object"
145 | elif param.annotation == list or param.annotation == List:
146 | param_type = "array"
147 |
148 | tool_params["properties"][param_name] = {
149 | "type": param_type,
150 | "description": f"Parameter {param_name}"
151 | }
152 |
153 | # Add to required params if no default value
154 | if param.default is inspect.Parameter.empty:
155 | tool_params["required"].append(param_name)
156 |
157 | self.tools[tool_name] = {
158 | "function": fn,
159 | "description": tool_desc,
160 | "parameters": tool_params
161 | }
162 |
163 | logger.info(f"Registered tool '{tool_name}'")
164 | return fn
165 |
166 | if func is None:
167 | return decorator
168 | return decorator(func)
169 |
170 | def resource(self, name: str, content: Any) -> None:
171 | """
172 | Register a resource with the MCP server.
173 |
174 | Args:
175 | name: Name of the resource
176 | content: Content of the resource
177 | """
178 | self.resources[name] = content
179 | logger.info(f"Registered resource '{name}'")
180 |
181 | def register_tool(self, name: str, description: str, parameters: Dict[str, Any], handler: Callable):
182 | """
183 | Register a tool with the MCP server.
184 |
185 | Args:
186 | name: Name of the tool
187 | description: Description of the tool
188 | parameters: Parameters schema for the tool
189 | handler: Handler function for the tool
190 | """
191 | logger.info(f"Registering tool: {name}")
192 |
193 | self.tools[name] = {
194 | "function": handler,
195 | "description": description,
196 | "parameters": parameters
197 | }
198 |
199 | logger.debug(f"Tool registered: {name} - {description}")
200 |
201 | def _handle_signal(self, signum: int, frame: Any) -> None:
202 | """
203 | Handle termination signals gracefully.
204 |
205 | Args:
206 | signum: Signal number
207 | frame: Current stack frame
208 | """
209 | logger.info(f"Received signal {signum}, shutting down...")
210 |
211 | # Create and run an asyncio task to stop the server
212 | loop = asyncio.get_event_loop()
213 | loop.create_task(self.stop())
214 |
215 | # Allow some time for cleanup
216 | loop.call_later(2, loop.stop)
217 |
218 | def _handle_client_message(self, message: Dict[str, Any]) -> Dict[str, Any]:
219 | """
220 | Handle an MCP protocol message from a client.
221 |
222 | Args:
223 | message: The message from the client
224 |
225 | Returns:
226 | The response to send back to the client
227 | """
228 | try:
229 | if "jsonrpc" not in message or message["jsonrpc"] != "2.0":
230 | return self._error_response(message.get("id"), -32600, "Invalid request")
231 |
232 | if "method" not in message:
233 | return self._error_response(message.get("id"), -32600, "Method not specified")
234 |
235 | method = message["method"]
236 | params = message.get("params", {})
237 |
238 | if method == "mcp/initialize":
239 | return self._handle_initialize(message["id"], params)
240 | elif method == "mcp/listTools":
241 | return self._handle_list_tools(message["id"])
242 | elif method == "mcp/callTool":
243 | return self._handle_call_tool(message["id"], params)
244 | elif method == "mcp/listResources":
245 | return self._handle_list_resources(message["id"])
246 | elif method == "mcp/readResource":
247 | return self._handle_read_resource(message["id"], params)
248 | else:
249 | return self._error_response(message["id"], -32601, f"Method '{method}' not supported")
250 |
251 | except Exception as e:
252 | logger.error(f"Error handling message: {str(e)}")
253 | return self._error_response(message.get("id"), -32603, f"Internal error: {str(e)}")
254 |
255 | def _error_response(self, id: Any, code: int, message: str) -> Dict[str, Any]:
256 | """
257 | Create an error response according to the JSON-RPC 2.0 spec.
258 |
259 | Args:
260 | id: The request ID
261 | code: The error code
262 | message: The error message
263 |
264 | Returns:
265 | The error response
266 | """
267 | return {
268 | "jsonrpc": "2.0",
269 | "id": id,
270 | "error": {
271 | "code": code,
272 | "message": message
273 | }
274 | }
275 |
276 | def _handle_initialize(self, id: Any, params: Dict[str, Any]) -> Dict[str, Any]:
277 | """
278 | Handle the mcp/initialize method.
279 |
280 | Args:
281 | id: The request ID
282 | params: The method parameters
283 |
284 | Returns:
285 | The response
286 | """
287 | # Return server capabilities
288 | return {
289 | "jsonrpc": "2.0",
290 | "id": id,
291 | "result": {
292 | "name": self.name,
293 | "version": "0.1.0",
294 | "capabilities": {
295 | "listTools": True,
296 | "callTool": True,
297 | "listResources": True,
298 | "readResource": True
299 | }
300 | }
301 | }
302 |
303 | def _handle_list_tools(self, id: Any) -> Dict[str, Any]:
304 | """
305 | Handle the mcp/listTools method.
306 |
307 | Args:
308 | id: The request ID
309 |
310 | Returns:
311 | The response
312 | """
313 | tools = []
314 | for name, tool in self.tools.items():
315 | tools.append({
316 | "name": name,
317 | "description": tool["description"],
318 | "parameters": tool["parameters"]
319 | })
320 |
321 | return {
322 | "jsonrpc": "2.0",
323 | "id": id,
324 | "result": {
325 | "tools": tools
326 | }
327 | }
328 |
329 | def _handle_call_tool(self, id: Any, params: Dict[str, Any]) -> Dict[str, Any]:
330 | """
331 | Handle the mcp/callTool method.
332 |
333 | Args:
334 | id: The request ID
335 | params: The method parameters
336 |
337 | Returns:
338 | The response
339 | """
340 | tool_name = params.get("name")
341 | tool_params = params.get("arguments", {})
342 |
343 | if not tool_name:
344 | return self._error_response(id, -32602, "Tool name not specified")
345 |
346 | if tool_name not in self.tools:
347 | return self._error_response(id, -32602, f"Tool '{tool_name}' not found")
348 |
349 | try:
350 | tool = self.tools[tool_name]["function"]
351 | result = tool(**tool_params)
352 |
353 | return {
354 | "jsonrpc": "2.0",
355 | "id": id,
356 | "result": {
357 | "result": result
358 | }
359 | }
360 | except Exception as e:
361 | logger.error(f"Error calling tool '{tool_name}': {str(e)}")
362 | return self._error_response(id, -32603, f"Error calling tool '{tool_name}': {str(e)}")
363 |
364 | def _handle_list_resources(self, id: Any) -> Dict[str, Any]:
365 | """
366 | Handle the mcp/listResources method.
367 |
368 | Args:
369 | id: The request ID
370 |
371 | Returns:
372 | The response
373 | """
374 | resources = []
375 | for name in self.resources:
376 | resources.append({
377 | "uri": f"mcp://{self.name.lower()}/resources/{name}",
378 | "name": name
379 | })
380 |
381 | return {
382 | "jsonrpc": "2.0",
383 | "id": id,
384 | "result": {
385 | "resources": resources
386 | }
387 | }
388 |
389 | def _handle_read_resource(self, id: Any, params: Dict[str, Any]) -> Dict[str, Any]:
390 | """
391 | Handle the mcp/readResource method.
392 |
393 | Args:
394 | id: The request ID
395 | params: The method parameters
396 |
397 | Returns:
398 | The response
399 | """
400 | uri = params.get("uri")
401 | if not uri:
402 | return self._error_response(id, -32602, "Resource URI not specified")
403 |
404 | # Parse the URI to get the resource name
405 | resource_name = uri.split("/")[-1]
406 |
407 | if resource_name not in self.resources:
408 | return self._error_response(id, -32602, f"Resource '{resource_name}' not found")
409 |
410 | return {
411 | "jsonrpc": "2.0",
412 | "id": id,
413 | "result": {
414 | "contents": self.resources[resource_name]
415 | }
416 | }
417 |
418 | def run(self, host: str = "127.0.0.1", port: int = 8080) -> None:
419 | """
420 | Run the MCP server and handle client connections.
421 |
422 | Args:
423 | host: The host to bind to
424 | port: The port to listen on
425 | """
426 | logger.info(f"FastMCP server '{self.name}' running with configuration: {self.config}")
427 |
428 | try:
429 | import asyncio
430 | import websockets
431 |
432 | async def handle_websocket(websocket: Any, path: str) -> None:
433 | """Handle a websocket connection."""
434 | async for message in websocket:
435 | try:
436 | request = json.loads(message)
437 | logger.debug(f"Received message: {request}")
438 |
439 | response = self._handle_client_message(request)
440 | logger.debug(f"Sending response: {response}")
441 |
442 | await websocket.send(json.dumps(response))
443 | except json.JSONDecodeError as e:
444 | logger.error(f"Error decoding message: {str(e)}")
445 | await websocket.send(json.dumps(self._error_response(None, -32700, "Parse error")))
446 | except Exception as e:
447 | logger.error(f"Error handling message: {str(e)}")
448 | await websocket.send(json.dumps(self._error_response(None, -32603, f"Internal error: {str(e)}")))
449 |
450 | # Start the server
451 | start_server = websockets.serve(handle_websocket, host, port)
452 | asyncio.get_event_loop().run_until_complete(start_server)
453 |
454 | logger.info(f"Server running on {host}:{port}")
455 | logger.info("Press Ctrl+C to stop")
456 |
457 | # Keep the event loop running
458 | asyncio.get_event_loop().run_forever()
459 |
460 | except ImportError:
461 | # Fallback to stdio for compatibility with Claude Desktop
462 | logger.info("Websockets not available, falling back to stdio")
463 | self._run_stdio()
464 | except KeyboardInterrupt:
465 | logger.info("Keyboard interrupt received, shutting down...")
466 | except Exception as e:
467 | logger.error(f"Error running server: {str(e)}")
468 | finally:
469 | logger.info("Server shutting down")
470 |
471 | def _run_stdio(self) -> None:
472 | """Run the MCP server using standard input and output streams."""
473 | logger.info("Running in stdio mode")
474 |
475 | # Handle UTF-8 encoding on Windows
476 | if sys.platform == "win32":
477 | import msvcrt
478 | msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
479 | msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
480 | sys.stdout = open(sys.stdout.fileno(), mode='w', encoding='utf-8', buffering=1)
481 | sys.stdin = open(sys.stdin.fileno(), mode='r', encoding='utf-8', buffering=1)
482 |
483 | while True:
484 | try:
485 | # Read the content length header
486 | header = sys.stdin.readline().strip()
487 | if not header:
488 | continue
489 |
490 | content_length = int(header.split(":")[1].strip())
491 |
492 | # Skip the empty line
493 | sys.stdin.readline()
494 |
495 | # Read the message content
496 | content = sys.stdin.read(content_length)
497 |
498 | # Parse and handle the message
499 | message = json.loads(content)
500 | response = self._handle_client_message(message)
501 |
502 | # Send the response
503 | response_json = json.dumps(response)
504 | response_bytes = response_json.encode('utf-8')
505 | sys.stdout.write(f"Content-Length: {len(response_bytes)}\r\n\r\n")
506 | sys.stdout.write(response_json)
507 | sys.stdout.flush()
508 |
509 | except Exception as e:
510 | logger.error(f"Error in stdio loop: {str(e)}")
511 | # Try to recover and continue
512 |
513 | if __name__ == "__main__":
514 | import argparse
515 | from .config import MCPConfig
516 |
517 | parser = argparse.ArgumentParser(description="FastMCP Server")
518 | parser.add_argument("--config", help="Path to configuration file")
519 | parser.add_argument("--host", default="127.0.0.1", help="Host to bind to")
520 | parser.add_argument("--port", type=int, default=8080, help="Port to bind to")
521 |
522 | args = parser.parse_args()
523 |
524 | # Create a config object
525 | config = MCPConfig(args.config) if args.config else MCPConfig()
526 |
527 | server = FastMCPServer(config)
528 | server.run(host=args.host, port=args.port)
```
--------------------------------------------------------------------------------
/printcast-agent/src/integrations/printing.py:
--------------------------------------------------------------------------------
```python
1 | """
2 | Print server integration for PrintCast Agent.
3 |
4 | Handles printing operations using CUPS and PDF generation.
5 | """
6 |
7 | import asyncio
8 | import os
9 | import tempfile
10 | from typing import Any, Dict, List, Optional
11 | from datetime import datetime
12 | from pathlib import Path
13 | import subprocess
14 | import base64
15 |
16 | import structlog
17 | from pydantic import BaseModel, Field
18 | from reportlab.lib.pagesizes import A4
19 | from reportlab.lib.styles import getSampleStyleSheet
20 | from reportlab.lib.units import mm
21 | from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, PageBreak, Table, TableStyle
22 | from reportlab.lib import colors
23 | from jinja2 import Template
24 |
25 | logger = structlog.get_logger(__name__)
26 |
27 |
28 | class PrintJob(BaseModel):
29 | """Represents a print job."""
30 |
31 | job_id: str
32 | session_id: str
33 | document_path: str
34 | printer_name: str
35 | status: str = "pending"
36 | pages: int = 0
37 | copies: int = 1
38 | created_at: datetime = Field(default_factory=datetime.now)
39 | completed_at: Optional[datetime] = None
40 | metadata: Dict[str, Any] = Field(default_factory=dict)
41 |
42 |
43 | class PrintManager:
44 | """
45 | Manages printing operations.
46 |
47 | Features:
48 | - CUPS integration for local/network printers
49 | - PDF generation from content
50 | - Print job queue management
51 | - Print preview generation
52 | """
53 |
54 | def __init__(self, config: Dict[str, Any]):
55 | """
56 | Initialize print manager.
57 |
58 | Args:
59 | config: Configuration including:
60 | - default_printer: Default printer name
61 | - cups_server: CUPS server address
62 | - temp_dir: Temporary directory for print files
63 | - pdf_settings: PDF generation settings
64 | """
65 | self.config = config
66 | self.default_printer = config.get("default_printer", "default")
67 | self.cups_server = config.get("cups_server", "localhost:631")
68 | self.temp_dir = Path(config.get("temp_dir", "/tmp/printcast"))
69 | self.pdf_settings = config.get("pdf_settings", {})
70 |
71 | # Create temp directory
72 | self.temp_dir.mkdir(parents=True, exist_ok=True)
73 |
74 | # Print job tracking
75 | self.jobs: Dict[str, PrintJob] = {}
76 | self.job_counter = 0
77 |
78 | # Check if CUPS is available
79 | self.cups_available = False
80 |
81 | logger.info(
82 | "Print manager initialized",
83 | default_printer=self.default_printer,
84 | temp_dir=str(self.temp_dir)
85 | )
86 |
87 | async def initialize(self):
88 | """Initialize print manager and check CUPS availability."""
89 | try:
90 | # Check if CUPS is available
91 | result = await asyncio.create_subprocess_exec(
92 | "lpstat", "-p",
93 | stdout=asyncio.subprocess.PIPE,
94 | stderr=asyncio.subprocess.PIPE
95 | )
96 | stdout, stderr = await result.communicate()
97 |
98 | if result.returncode == 0:
99 | self.cups_available = True
100 | logger.info("CUPS is available")
101 |
102 | # Parse available printers
103 | printers = []
104 | for line in stdout.decode().split("\n"):
105 | if line.startswith("printer"):
106 | parts = line.split()
107 | if len(parts) >= 2:
108 | printers.append(parts[1])
109 |
110 | logger.info(
111 | "Available printers",
112 | count=len(printers),
113 | printers=printers
114 | )
115 | else:
116 | logger.warning("CUPS not available", stderr=stderr.decode())
117 |
118 | except Exception as e:
119 | logger.warning("Could not check CUPS availability", error=str(e))
120 | self.cups_available = False
121 |
122 | async def shutdown(self):
123 | """Cleanup resources."""
124 | # Cancel pending jobs
125 | for job in self.jobs.values():
126 | if job.status == "pending":
127 | job.status = "cancelled"
128 |
129 | logger.info("Print manager shutdown")
130 |
131 | def is_available(self) -> bool:
132 | """Check if printing is available."""
133 | return self.cups_available
134 |
135 | async def generate_pdf(
136 | self,
137 | content: str,
138 | title: str = "PrintCast Document",
139 | format: str = "A4",
140 | output_path: Optional[str] = None
141 | ) -> str:
142 | """
143 | Generate PDF from content.
144 |
145 | Args:
146 | content: Content to print (text, HTML, or markdown)
147 | title: Document title
148 | format: Page format
149 | output_path: Optional output path
150 |
151 | Returns:
152 | Path to generated PDF
153 | """
154 | try:
155 | # Generate output path if not provided
156 | if not output_path:
157 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
158 | output_path = self.temp_dir / f"document_{timestamp}.pdf"
159 | else:
160 | output_path = Path(output_path)
161 |
162 | # Create PDF document
163 | doc = SimpleDocTemplate(
164 | str(output_path),
165 | pagesize=A4,
166 | rightMargin=20*mm,
167 | leftMargin=20*mm,
168 | topMargin=20*mm,
169 | bottomMargin=20*mm
170 | )
171 |
172 | # Container for the 'Flowable' objects
173 | elements = []
174 |
175 | # Define styles
176 | styles = getSampleStyleSheet()
177 | title_style = styles['Title']
178 | heading_style = styles['Heading1']
179 | normal_style = styles['Normal']
180 |
181 | # Add title
182 | elements.append(Paragraph(title, title_style))
183 | elements.append(Spacer(1, 12))
184 |
185 | # Add timestamp
186 | timestamp_text = f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
187 | elements.append(Paragraph(timestamp_text, normal_style))
188 | elements.append(Spacer(1, 20))
189 |
190 | # Parse and add content
191 | if content.startswith("<html>"):
192 | # HTML content - parse and convert
193 | from bs4 import BeautifulSoup
194 | soup = BeautifulSoup(content, "html.parser")
195 |
196 | for elem in soup.find_all(["h1", "h2", "h3", "p", "ul", "ol"]):
197 | if elem.name.startswith("h"):
198 | elements.append(Paragraph(elem.text, heading_style))
199 | else:
200 | elements.append(Paragraph(elem.text, normal_style))
201 | elements.append(Spacer(1, 6))
202 |
203 | elif content.startswith("#"):
204 | # Markdown content - convert to PDF elements
205 | lines = content.split("\n")
206 | for line in lines:
207 | if line.startswith("##"):
208 | elements.append(Paragraph(line[2:].strip(), heading_style))
209 | elif line.startswith("#"):
210 | elements.append(Paragraph(line[1:].strip(), title_style))
211 | elif line.strip():
212 | elements.append(Paragraph(line, normal_style))
213 | elements.append(Spacer(1, 6))
214 |
215 | else:
216 | # Plain text - split by paragraphs
217 | paragraphs = content.split("\n\n")
218 | for para in paragraphs:
219 | if para.strip():
220 | elements.append(Paragraph(para, normal_style))
221 | elements.append(Spacer(1, 12))
222 |
223 | # Build PDF
224 | doc.build(elements)
225 |
226 | logger.info(
227 | "PDF generated",
228 | path=str(output_path),
229 | size=output_path.stat().st_size
230 | )
231 |
232 | return str(output_path)
233 |
234 | except Exception as e:
235 | logger.error("Failed to generate PDF", error=str(e))
236 | raise
237 |
238 | async def generate_preview(
239 | self,
240 | items: List[str],
241 | format: str = "pdf"
242 | ) -> Dict[str, Any]:
243 | """
244 | Generate print preview.
245 |
246 | Args:
247 | items: Content items to preview
248 | format: Preview format
249 |
250 | Returns:
251 | Preview information
252 | """
253 | try:
254 | # Generate preview document
255 | timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
256 | preview_path = self.temp_dir / f"preview_{timestamp}.{format}"
257 |
258 | # Create preview content
259 | content = "# Print Preview\n\n"
260 | for i, item in enumerate(items, 1):
261 | content += f"## Item {i}\n{item}\n\n"
262 |
263 | # Generate document
264 | if format == "pdf":
265 | doc_path = await self.generate_pdf(
266 | content,
267 | title="Print Preview",
268 | output_path=str(preview_path)
269 | )
270 | else:
271 | # HTML preview
272 | html_content = f"""
273 | <!DOCTYPE html>
274 | <html>
275 | <head><title>Print Preview</title></head>
276 | <body>
277 | <h1>Print Preview</h1>
278 | {''.join(f'<div>{item}</div>' for item in items)}
279 | </body>
280 | </html>
281 | """
282 | preview_path.write_text(html_content)
283 | doc_path = str(preview_path)
284 |
285 | # Get file info
286 | file_stat = preview_path.stat()
287 |
288 | # Estimate page count (rough)
289 | page_count = max(1, len(items) // 3)
290 |
291 | return {
292 | "url": f"file://{doc_path}",
293 | "path": doc_path,
294 | "pages": page_count,
295 | "size": file_stat.st_size,
296 | "format": format
297 | }
298 |
299 | except Exception as e:
300 | logger.error("Failed to generate preview", error=str(e))
301 | raise
302 |
303 | async def print_document(
304 | self,
305 | document_path: str,
306 | printer_name: Optional[str] = None,
307 | copies: int = 1,
308 | options: Optional[Dict[str, str]] = None
309 | ) -> str:
310 | """
311 | Print a document.
312 |
313 | Args:
314 | document_path: Path to document
315 | printer_name: Printer to use
316 | copies: Number of copies
317 | options: Print options
318 |
319 | Returns:
320 | Print job ID
321 | """
322 | if not self.cups_available:
323 | logger.warning("CUPS not available, simulating print")
324 | return await self._simulate_print(document_path)
325 |
326 | try:
327 | printer = printer_name or self.default_printer
328 |
329 | # Create print job
330 | self.job_counter += 1
331 | job_id = f"job_{self.job_counter}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
332 |
333 | job = PrintJob(
334 | job_id=job_id,
335 | session_id="",
336 | document_path=document_path,
337 | printer_name=printer,
338 | copies=copies,
339 | status="pending"
340 | )
341 |
342 | self.jobs[job_id] = job
343 |
344 | # Build lpr command
345 | cmd = ["lpr", "-P", printer]
346 |
347 | if copies > 1:
348 | cmd.extend(["-#", str(copies)])
349 |
350 | if options:
351 | for key, value in options.items():
352 | cmd.extend(["-o", f"{key}={value}"])
353 |
354 | cmd.append(document_path)
355 |
356 | # Execute print command
357 | result = await asyncio.create_subprocess_exec(
358 | *cmd,
359 | stdout=asyncio.subprocess.PIPE,
360 | stderr=asyncio.subprocess.PIPE
361 | )
362 |
363 | stdout, stderr = await result.communicate()
364 |
365 | if result.returncode == 0:
366 | job.status = "printing"
367 | logger.info(
368 | "Document sent to printer",
369 | job_id=job_id,
370 | printer=printer,
371 | document=document_path
372 | )
373 |
374 | # Start monitoring job
375 | asyncio.create_task(self._monitor_print_job(job_id))
376 |
377 | else:
378 | job.status = "failed"
379 | logger.error(
380 | "Failed to print document",
381 | job_id=job_id,
382 | error=stderr.decode()
383 | )
384 | raise RuntimeError(f"Print failed: {stderr.decode()}")
385 |
386 | return job_id
387 |
388 | except Exception as e:
389 | logger.error("Failed to print document", error=str(e))
390 | raise
391 |
392 | async def _simulate_print(self, document_path: str) -> str:
393 | """Simulate printing when CUPS is not available."""
394 | self.job_counter += 1
395 | job_id = f"sim_job_{self.job_counter}"
396 |
397 | job = PrintJob(
398 | job_id=job_id,
399 | session_id="",
400 | document_path=document_path,
401 | printer_name="simulated",
402 | status="simulated"
403 | )
404 |
405 | self.jobs[job_id] = job
406 |
407 | logger.info(
408 | "Print simulated",
409 | job_id=job_id,
410 | document=document_path
411 | )
412 |
413 | # Simulate processing delay
414 | await asyncio.sleep(2)
415 | job.status = "completed"
416 | job.completed_at = datetime.now()
417 |
418 | return job_id
419 |
420 | async def _monitor_print_job(self, job_id: str):
421 | """Monitor print job status."""
422 | job = self.jobs.get(job_id)
423 | if not job:
424 | return
425 |
426 | try:
427 | # Poll job status
428 | for _ in range(60): # Monitor for up to 60 seconds
429 | await asyncio.sleep(1)
430 |
431 | # Check job status using lpstat
432 | result = await asyncio.create_subprocess_exec(
433 | "lpstat", "-W", "completed",
434 | stdout=asyncio.subprocess.PIPE,
435 | stderr=asyncio.subprocess.PIPE
436 | )
437 |
438 | stdout, _ = await result.communicate()
439 |
440 | # Simple check - in production would parse lpstat output properly
441 | if job_id in stdout.decode():
442 | job.status = "completed"
443 | job.completed_at = datetime.now()
444 | logger.info("Print job completed", job_id=job_id)
445 | break
446 |
447 | except Exception as e:
448 | logger.error(
449 | "Error monitoring print job",
450 | job_id=job_id,
451 | error=str(e)
452 | )
453 | job.status = "error"
454 |
455 | async def cancel_print_job(self, job_id: str) -> bool:
456 | """
457 | Cancel a print job.
458 |
459 | Args:
460 | job_id: Job ID to cancel
461 |
462 | Returns:
463 | True if cancelled successfully
464 | """
465 | job = self.jobs.get(job_id)
466 | if not job:
467 | return False
468 |
469 | if job.status in ["completed", "cancelled", "failed"]:
470 | return False
471 |
472 | try:
473 | if self.cups_available:
474 | # Cancel using lprm
475 | result = await asyncio.create_subprocess_exec(
476 | "lprm", "-P", job.printer_name, job_id,
477 | stdout=asyncio.subprocess.PIPE,
478 | stderr=asyncio.subprocess.PIPE
479 | )
480 |
481 | await result.communicate()
482 |
483 | job.status = "cancelled"
484 | logger.info("Print job cancelled", job_id=job_id)
485 | return True
486 |
487 | except Exception as e:
488 | logger.error(
489 | "Failed to cancel print job",
490 | job_id=job_id,
491 | error=str(e)
492 | )
493 | return False
494 |
495 | def get_job_status(self, job_id: str) -> Optional[Dict[str, Any]]:
496 | """Get print job status."""
497 | job = self.jobs.get(job_id)
498 | if not job:
499 | return None
500 |
501 | return {
502 | "job_id": job.job_id,
503 | "status": job.status,
504 | "printer": job.printer_name,
505 | "document": job.document_path,
506 | "created": job.created_at.isoformat(),
507 | "completed": job.completed_at.isoformat() if job.completed_at else None
508 | }
509 |
510 | async def get_printer_list(self) -> List[Dict[str, Any]]:
511 | """Get list of available printers."""
512 | printers = []
513 |
514 | if not self.cups_available:
515 | return [{
516 | "name": "simulated",
517 | "status": "ready",
518 | "default": True
519 | }]
520 |
521 | try:
522 | # Get printer list using lpstat
523 | result = await asyncio.create_subprocess_exec(
524 | "lpstat", "-p", "-d",
525 | stdout=asyncio.subprocess.PIPE,
526 | stderr=asyncio.subprocess.PIPE
527 | )
528 |
529 | stdout, _ = await result.communicate()
530 | output = stdout.decode()
531 |
532 | # Parse printer list
533 | default_printer = None
534 | for line in output.split("\n"):
535 | if line.startswith("printer"):
536 | parts = line.split()
537 | if len(parts) >= 2:
538 | printer_name = parts[1]
539 | status = "ready" if "enabled" in line else "offline"
540 |
541 | printers.append({
542 | "name": printer_name,
543 | "status": status,
544 | "default": False
545 | })
546 | elif line.startswith("system default"):
547 | parts = line.split(":")
548 | if len(parts) >= 2:
549 | default_printer = parts[1].strip()
550 |
551 | # Mark default printer
552 | for printer in printers:
553 | if printer["name"] == default_printer:
554 | printer["default"] = True
555 |
556 | except Exception as e:
557 | logger.error("Failed to get printer list", error=str(e))
558 |
559 | return printers
560 |
561 | async def create_print_batch(
562 | self,
563 | documents: List[Dict[str, Any]],
564 | printer_name: Optional[str] = None
565 | ) -> List[str]:
566 | """
567 | Create a batch print job.
568 |
569 | Args:
570 | documents: List of documents to print
571 | printer_name: Printer to use
572 |
573 | Returns:
574 | List of job IDs
575 | """
576 | job_ids = []
577 |
578 | for doc in documents:
579 | try:
580 | # Generate PDF if needed
581 | if "content" in doc:
582 | doc_path = await self.generate_pdf(
583 | doc["content"],
584 | title=doc.get("title", "Document")
585 | )
586 | else:
587 | doc_path = doc["path"]
588 |
589 | # Print document
590 | job_id = await self.print_document(
591 | doc_path,
592 | printer_name=printer_name,
593 | copies=doc.get("copies", 1)
594 | )
595 |
596 | job_ids.append(job_id)
597 |
598 | except Exception as e:
599 | logger.error(
600 | "Failed to print document in batch",
601 | document=doc.get("title", "Unknown"),
602 | error=str(e)
603 | )
604 |
605 | return job_ids
```