#
tokens: 48626/50000 124/126 files (page 1/2)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 1 of 2. Use http://codebase.md/darpai/darp_engine?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .gitignore
├── .gitlab-ci.yml
├── .pre-commit-config.yaml
├── alembic
│   ├── env.py
│   ├── README
│   ├── script.py.mako
│   └── versions
│       ├── 2025_03_24_0945-ffe7a88d5b15_initial_migration.py
│       ├── 2025_04_09_1117-e25328dbfe80_set_logo_to_none.py
│       ├── 2025_04_22_0854-1c6edbdb4642_add_status_to_servers.py
│       ├── 2025_04_25_1158-49e1ab420276_host_type.py
│       ├── 2025_04_30_1049-387bed3ce6ae_server_title.py
│       ├── 2025_05_12_1404-735603f5e4d5_creator_id.py
│       ├── 2025_05_13_0634-3b8b8a5b99ad_deployment_fields.py
│       ├── 2025_05_13_1326-4b751cb703e2_timestamps.py
│       ├── 2025_05_15_1414-7a3913a3bb4c_logs.py
│       ├── 2025_05_27_0757-7e8c31ddb2db_json_logs.py
│       ├── 2025_05_28_0632-f2298e6acd39_base_image.py
│       ├── 2025_05_30_1231-4b5c0d56e1ca_build_instructions.py
│       ├── 2025_05_31_1156-c98f44cea12f_add_transport_protocol_column_to_servers.py
│       ├── 2025_06_07_1322-aa9a14a698c5_jsonb_logs.py
│       └── 2025_06_17_1353-667874dd7dee_added_alias.py
├── alembic.ini
├── common
│   ├── __init__.py
│   ├── llm
│   │   ├── __init__.py
│   │   └── client.py
│   └── notifications
│       ├── __init__.py
│       ├── client.py
│       ├── enums.py
│       └── exceptions.py
├── deploy
│   ├── docker-version
│   ├── push-and-run
│   ├── ssh-agent
│   └── ssh-passphrase
├── docker-compose-debug.yaml
├── docker-compose.yaml
├── healthchecker
│   ├── __init__.py
│   ├── Dockerfile
│   ├── scripts
│   │   ├── worker-start-debug.sh
│   │   └── worker-start.sh
│   └── src
│       ├── __init__.py
│       ├── __main__.py
│       └── checker.py
├── LICENSE
├── mcp_server
│   ├── __init__.py
│   ├── Dockerfile
│   ├── requirements.txt
│   ├── scripts
│   │   ├── __init__.py
│   │   ├── mcp_health_check.py
│   │   ├── start-debug.sh
│   │   └── start.sh
│   ├── server.json
│   └── src
│       ├── __init__.py
│       ├── decorators.py
│       ├── llm
│       │   ├── __init__.py
│       │   ├── prompts.py
│       │   └── tool_manager.py
│       ├── logger.py
│       ├── main.py
│       ├── registry_client.py
│       ├── schemas.py
│       ├── settings.py
│       └── tools.py
├── pyproject.toml
├── README.md
├── registry
│   ├── __init__.py
│   ├── Dockerfile
│   ├── requirements.txt
│   ├── scripts
│   │   ├── requirements
│   │   ├── start-debug.sh
│   │   └── start.sh
│   └── src
│       ├── __init__.py
│       ├── base_schema.py
│       ├── database
│       │   ├── __init__.py
│       │   ├── models
│       │   │   ├── __init__.py
│       │   │   ├── base.py
│       │   │   ├── log.py
│       │   │   ├── mixins
│       │   │   │   ├── __init__.py
│       │   │   │   ├── has_created_at.py
│       │   │   │   ├── has_server_id.py
│       │   │   │   └── has_updated_at.py
│       │   │   ├── server.py
│       │   │   └── tool.py
│       │   └── session.py
│       ├── deployments
│       │   ├── __init__.py
│       │   ├── logs_repository.py
│       │   ├── router.py
│       │   ├── schemas.py
│       │   └── service.py
│       ├── errors.py
│       ├── logger.py
│       ├── main.py
│       ├── producer.py
│       ├── search
│       │   ├── __init__.py
│       │   ├── prompts.py
│       │   ├── schemas.py
│       │   └── service.py
│       ├── servers
│       │   ├── __init__.py
│       │   ├── repository.py
│       │   ├── router.py
│       │   ├── schemas.py
│       │   └── service.py
│       ├── settings.py
│       ├── types.py
│       └── validator
│           ├── __init__.py
│           ├── constants.py
│           ├── prompts.py
│           ├── schemas.py
│           ├── service.py
│           └── ssl.py
├── scripts
│   ├── darp-add.py
│   ├── darp-router.py
│   └── darp-search.py
└── worker
    ├── __init__.py
    ├── Dockerfile
    ├── requirements.txt
    ├── scripts
    │   ├── worker-start-debug.sh
    │   └── worker-start.sh
    └── src
        ├── __init__.py
        ├── constants.py
        ├── consumer.py
        ├── deployment_service.py
        ├── docker_service.py
        ├── dockerfile_generators
        │   ├── __init__.py
        │   ├── base.py
        │   ├── factory.py
        │   ├── python
        │   │   ├── __init__.py
        │   │   └── generic.py
        │   ├── typescript
        │   │   ├── __init__.py
        │   │   └── generic.py
        │   └── user_provided.py
        ├── errors.py
        ├── main.py
        ├── schemas.py
        └── user_logger.py
```

# Files

--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | exclude: 'alembic/versions'
 2 | repos:
 3 | -   repo: https://github.com/psf/black
 4 |     rev: 24.4.2
 5 |     hooks:
 6 |     -   id: black
 7 | 
 8 | -   repo: https://github.com/asottile/reorder-python-imports
 9 |     rev: v3.13.0
10 |     hooks:
11 |     -   id: reorder-python-imports
12 |         args: [--py312-plus]
13 | 
14 | -   repo: https://github.com/pre-commit/mirrors-mypy
15 |     rev: v1.10.1
16 |     hooks:
17 |     -   id: mypy
18 |         exclude: alembic
19 |         additional_dependencies: [types-requests]
20 | 
21 | -   repo: https://github.com/PyCQA/flake8
22 |     rev: 7.1.0
23 |     hooks:
24 |     -   id: flake8
25 |         additional_dependencies: [flake8-pyproject]
26 | 
```

--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------

```yaml
 1 | stages:
 2 |   - test
 3 |   - build
 4 |   - deploy
 5 | 
 6 | test-pre-commit:
 7 |   stage: test
 8 |   tags:
 9 |     - darp-group-shell-runner
10 |   script:
11 |     - source ~/miniconda3/bin/activate && pre-commit run --all-files
12 | 
13 | test-migrations:
14 |   stage: test
15 |   tags:
16 |     - darp-group-shell-runner
17 |   script:
18 |     - duplicates=`grep -h down_revision alembic/versions/*.py | sort | uniq -d`
19 |     - echo "$duplicates"
20 |     - test -z "$duplicates"
21 | 
22 | clean-pre-commit:
23 |   stage: test
24 |   tags:
25 |     - darp-group-shell-runner
26 |   script:
27 |     - source ~/miniconda3/bin/activate && pre-commit clean
28 |   when: manual
29 | 
30 | build:
31 |   stage: build
32 |   when: manual
33 |   tags:
34 |     - darp-group-shell-runner
35 |   script:
36 |     - source deploy/docker-version
37 |     - docker compose --profile main build
38 | 
39 | deploy-test:
40 |   stage: deploy
41 |   needs: ["build"]
42 |   when: manual
43 |   tags:
44 |     - darp-group-shell-runner
45 |   script:
46 |     - DOCKER_NETWORK=highkey_network deploy/push-and-run "$TEST_DOCKER_HOST"
47 | 
48 | deploy-memelabs:
49 |   stage: deploy
50 |   needs: ["build"]
51 |   when: manual
52 |   tags:
53 |     - darp-group-shell-runner
54 |   script:
55 |     - DOCKER_NETWORK=highkey_network deploy/push-and-run "$MEMELABS_DOCKER_HOST"
56 |   rules:
57 |     - if: $CI_COMMIT_BRANCH == "main"
58 | 
59 | 
60 | deploy-prod:
61 |   stage: deploy
62 |   when: manual
63 |   needs: ["build"]
64 |   tags:
65 |     - darp-group-shell-runner
66 |   script:
67 |     - DOCKER_NETWORK=highkey_network deploy/push-and-run "$PRODUCTION_DOCKER_HOST"
68 |   rules:
69 |     - if: $CI_COMMIT_BRANCH == "main"
70 | 
71 | deploy-toci-test:
72 |   stage: deploy
73 |   when: manual
74 |   needs: ["build"]
75 |   tags:
76 |     - darp-group-shell-runner
77 |   script:
78 |     - deploy/push-and-run "$TOCI_TEST_DOCKER_HOST"
79 | 
80 | deploy-toci-prod:
81 |   stage: deploy
82 |   when: manual
83 |   needs: ["build"]
84 |   tags:
85 |     - darp-group-shell-runner
86 |   script:
87 |     - deploy/push-and-run "$TOCI_PRODUCTION_DOCKER_HOST"
88 |   rules:
89 |     - if: $CI_COMMIT_BRANCH == "main"
90 | 
```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
  1 | # VSCode
  2 | .vscode/
  3 | 
  4 | # JetBrains
  5 | .idea
  6 | 
  7 | # WandB
  8 | wandb/
  9 | 
 10 | # Old
 11 | old/
 12 | temp/
 13 | profiler/
 14 | 
 15 | # Logs
 16 | logs/
 17 | 
 18 | # eval
 19 | eval_results/
 20 | evalplus_codegen/
 21 | 
 22 | # All datasets
 23 | dataset/
 24 | dataset_processed/
 25 | dataset_processed_*/
 26 | PDF/
 27 | *.xlsx
 28 | *.csv
 29 | 
 30 | # All evaluation results
 31 | eval_baselines/
 32 | eval_results/
 33 | eval_results_temp/
 34 | 
 35 | # Byte-compiled / optimized / DLL files
 36 | __pycache__/
 37 | *.py[cod]
 38 | *$py.class
 39 | 
 40 | # C extensions
 41 | *.so
 42 | 
 43 | # Distribution / packaging
 44 | .Python
 45 | build/
 46 | develop-eggs/
 47 | dist/
 48 | downloads/
 49 | eggs/
 50 | .eggs/
 51 | lib/
 52 | lib64/
 53 | parts/
 54 | sdist/
 55 | var/
 56 | wheels/
 57 | share/python-wheels/
 58 | *.egg-info/
 59 | .installed.cfg
 60 | *.egg
 61 | MANIFEST
 62 | *.ipynb
 63 | *.pid
 64 | 
 65 | # PyInstaller
 66 | #  Usually these files are written by a python script from a template
 67 | #  before PyInstaller builds the exe, so as to inject date/other infos into it.
 68 | *.manifest
 69 | *.spec
 70 | 
 71 | # Installer logs
 72 | pip-log.txt
 73 | pip-delete-this-directory.txt
 74 | 
 75 | # Unit test / coverage reports
 76 | htmlcov/
 77 | .tox/
 78 | .nox/
 79 | .coverage
 80 | .coverage.*
 81 | .cache
 82 | nosetests.xml
 83 | coverage.xml
 84 | *.cover
 85 | *.py,cover
 86 | .hypothesis/
 87 | .pytest_cache/
 88 | cover/
 89 | 
 90 | # Translations
 91 | *.mo
 92 | *.pot
 93 | 
 94 | # Django stuff:
 95 | *.log
 96 | local_settings.py
 97 | db.sqlite3
 98 | db.sqlite3-journal
 99 | 
100 | # Flask stuff:
101 | instance/
102 | .webassets-cache
103 | 
104 | # Scrapy stuff:
105 | .scrapy
106 | 
107 | # Sphinx documentation
108 | docs/_build/
109 | 
110 | # PyBuilder
111 | .pybuilder/
112 | target/
113 | 
114 | # Jupyter Notebook
115 | .ipynb_checkpoints
116 | 
117 | # IPython
118 | profile_default/
119 | ipython_config.py
120 | 
121 | # pdm
122 | .pdm.toml
123 | 
124 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
125 | __pypackages__/
126 | 
127 | # Celery stuff
128 | celerybeat-schedule
129 | celerybeat.pid
130 | 
131 | # SageMath parsed files
132 | *.sage.py
133 | 
134 | # Environments
135 | .env
136 | .env.*
137 | .venv
138 | env/
139 | venv/
140 | ENV/
141 | env.bak/
142 | venv.bak/
143 | 
144 | # Spyder project settings
145 | .spyderproject
146 | .spyproject
147 | 
148 | # Rope project settings
149 | .ropeproject
150 | 
151 | # mkdocs documentation
152 | /site
153 | 
154 | # mypy
155 | .mypy_cache/
156 | .dmypy.json
157 | dmypy.json
158 | 
159 | # Pyre type checker
160 | .pyre/
161 | 
162 | # pytype static type analyzer
163 | .pytype/
164 | 
165 | # Cython debug symbols
166 | cython_debug/
167 | 
168 | # MacOs
169 | .DS_Store
170 | 
171 | # Local history
172 | .history
173 | 
174 | # PyCharm
175 | .idea/
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
  1 | # DARPEngine
  2 | The MCP searchengine for DARP.
  3 | 
  4 | [![X][x-image]][x-url]
  5 | [![Code style: black][black-image]][black-url]
  6 | [![Imports: reorder-python-imports][imports-image]][imports-url]
  7 | [![Pydantic v2][pydantic-image]][pydantic-url]
  8 | [![pre-commit][pre-commit-image]][pre-commit-url]
  9 | [![License MIT][license-image]][license-url]
 10 | 
 11 | DARPEngine stores metadata for MCP servers hosted online and provides smart search capabilites.
 12 | 
 13 | ## Features
 14 | 
 15 | * Simple CLI
 16 | * API access to search
 17 | * MCP tool to retrieve search results for connecting manually
 18 | * Routing MCP tool based on the server: answer any question using the tools found for the user's request
 19 | 
 20 | ### Coming soon
 21 | 
 22 | * Support for `.well-known/mcp.json`
 23 | * Crawler
 24 | * Nice frontend
 25 | * Hosted version
 26 | * Validate different levels of SSL certificates and integrate this info smarly to make sensitive MCP servers difficult to spoof
 27 | 
 28 | ## Installation
 29 | 
 30 | ```
 31 | export OPENAI_API_KEY=sk-...
 32 | docker network create highkey_network
 33 | docker compose build
 34 | docker compose -f docker-compose.yaml -f docker-compose-debug.yaml up --build --wait
 35 | ```
 36 | 
 37 | ## Getting started
 38 | 
 39 | You can connect the DARPEngine to an MCP Client (e.g. Claude Desktop or Cursor) using mcp tools provided. Just select SSE mode & specify `http://localhost:4689/sse` as the endpoint.
 40 | 
 41 | 
 42 | ### Direct CLI use
 43 | 
 44 | Another way is to use CLI. Most of the scripts work with just standard Python libraries, but routing tool requires mcp package, you can install script requirements like this:
 45 | 
 46 | ```
 47 | conda create -n darp 'python>=3.10'
 48 | conda activate darp
 49 | pip install -r mcp_server/requirements.txt
 50 | ```
 51 | 
 52 | When installation is over we can use the scripts.
 53 | 
 54 | To begin with it we need to add some MCP servers to the engine, e.g:
 55 | 
 56 | ```
 57 | $ python scripts/darp-add.py --url http://memelabs.ai:3006/sse --name code_analysis --description "Analyze gitlab repo for quality, topics, packages use"
 58 | ```
 59 | 
 60 | Then we can make the requests:
 61 | 
 62 | ```
 63 | $ python scripts/darp-search.py "Analyze https://github.com/BenderV/autochat"
 64 | Found 1 servers:
 65 | code_analysis
 66 | ```
 67 | 
 68 | You can get more useful results with the routing tool:
 69 | 
 70 | ```
 71 | $ python scripts/darp-router.py "Analyze https://github.com/BenderV/autochat"
 72 | assistant: [tool_calls]
 73 | [tool] ...
 74 | [tool] ...
 75 | [tool] ...
 76 | assistant:
 77 |   ### Code Quality
 78 |   The code in the AutoChat repository demonstrates a good level of quality concerning **readability**, **maintainability**, and adherence to best practices:
 79 | 
 80 |   - **Readability**: Consistent naming conventions are evident, with adherence to Python's PEP 8 styling guidelines. Descriptive function and variable names enhance the understanding of the code's purpose.
 81 | 
 82 |   - **Maintainability**: The code is structured to allow easy updates and modifications, with a clear separation of concerns observed through the use of classes and methods handling distinct functionalities.
 83 | 
 84 |   - **Best Practices**: Extensive use of exception handling, type annotations, and docstrings reflect best practices in Python development. Some TODO comments suggest areas for improvement, indicating that further attention is needed.
 85 | 
 86 |   ### Code Structure
 87 |   The code is organized into multiple files and modules, each serving a distinct purpose:
 88 | 
 89 |   - **Modular Design**: Various classes (e.g., `Autochat`, `Image`, `Message`) indicate a well-structured object-oriented design that promotes separation of concerns, making the code easier to navigate.
 90 | 
 91 |   - **Logical Organization**: Files are logically separated based on functionality. For example, `chat.py` focuses on chat-related logic, while `model.py` handles message and image processing. The utility functions in `utils.py` enhance reusability.
 92 | 
 93 |   - **Testing**: The presence of a test file (`tests/test_utils.py`) shows commitment to testing, crucial for code reliability. The use of `unittest` indicates a structured approach to testing individual components.
 94 | 
 95 |   ### Main Functionality
 96 |   The code appears to be part of an **AutoChat package**, providing a framework for building conversational agents. Key functionalities include:
 97 | 
 98 |   - **Chat Management**: The `Autochat` class acts as the main interface for managing conversations, handling message history, context, and interaction limits.
 99 | 
100 |   - **Message Handling**: Classes like `Message` and `MessagePart` enable structured message creation and processing, accommodating different message types, including text and images.
101 | 
102 |   - **Functionality Extensions**: Methods like `add_tool` and `add_function` allow dynamic addition of tools and functions, facilitating customization of the chat experience.
103 | 
104 |   - **Provider Integration**: Different API provider integrations (e.g., OpenAI, Anthropic) are encapsulated within respective classes, allowing flexibility in backend communication.
105 | 
106 |   - **Utilities**: Utility functions offer additional capabilities such as CSV formatting and function parsing that support main chat operations.
107 | 
108 |   Overall, the codebase is well-organized and showcases a thoughtful approach to developing a conversational AI framework. There is room for further refinement and enhancement, particularly in documentation and clarity of variable names.
109 | 
110 |   ### Library Usage
111 |   The project makes use of **AI libraries**, indicated by its functionality related to conversational agents and integration with AI service providers. This supports its ability to manage interactions with AI models efficiently.
112 | 
113 |   ### Summary
114 |   The AutoChat project is a chat system designed for communication with various AI models, primarily through the `Autochat` class, which manages conversations and supports complex message types, including text and images. The code is moderately complex due to its integration with external APIs and its ability to handle diverse interactions through extensible methods like `add_tool` and `add_function`. The quality of code is commendable, featuring a well-structured modular design that promotes readability and maintainability, although some areas require further documentation and refinement, such as clarifying variable names and enhancing comments. The organization into separate files for models, utilities, and tests aids development, but the utility functions could benefit from better categorization for improved clarity.
115 | ```
116 | 
117 | Of course, the usefulness of the result depends on the MCP servers you connect to the engine.
118 | 
119 | 
120 | ## Get help and support
121 | 
122 | Please feel free to connect with us using the [discussion section](https://github.com/hipasus/darp_engine/discussions).
123 | 
124 | ## Contributing
125 | 
126 | Follow us on X: https://x.com/DARP_AI
127 | 
128 | ## License
129 | 
130 | The DARPEngine codebase is under MIT license.
131 | 
132 | <br>
133 | 
134 | [x-image]: https://img.shields.io/twitter/follow/DARP_AI?style=social
135 | [x-url]: https://x.com/DARP_AI
136 | [black-image]: https://img.shields.io/badge/code%20style-black-000000.svg
137 | [black-url]: https://github.com/psf/black
138 | [imports-image]: https://img.shields.io/badge/%20imports-reorder_python_imports-%231674b1?style=flat&labelColor=ef8336
139 | [imports-url]: https://github.com/asottile/reorder-python-imports/
140 | [pydantic-image]: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/pydantic/pydantic/main/docs/badge/v2.json
141 | [pydantic-url]: https://pydantic.dev
142 | [pre-commit-image]: https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white
143 | [pre-commit-url]: https://github.com/pre-commit/pre-commit
144 | [license-image]: https://img.shields.io/github/license/DARPAI/darp_engine
145 | [license-url]: https://opensource.org/licenses/MIT
146 | 
```

--------------------------------------------------------------------------------
/common/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/common/llm/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/common/notifications/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/healthchecker/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/healthchecker/src/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/mcp_server/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/mcp_server/scripts/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/llm/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/src/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/src/deployments/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/src/search/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/src/servers/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/registry/src/validator/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/worker/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/worker/src/__init__.py:
--------------------------------------------------------------------------------

```python
1 | 
```

--------------------------------------------------------------------------------
/worker/scripts/worker-start.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env sh
2 | 
3 | set -e
4 | 
5 | python -m worker.src.main
6 | 
```

--------------------------------------------------------------------------------
/healthchecker/scripts/worker-start.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env sh
2 | 
3 | set -e
4 | 
5 | python -m healthchecker.src
6 | 
```

--------------------------------------------------------------------------------
/mcp_server/scripts/start.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env bash
2 | 
3 | set -e
4 | 
5 | python3 -m mcp_server.src.main
6 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/python/__init__.py:
--------------------------------------------------------------------------------

```python
1 | from .generic import PythonGenerator
2 | 
3 | 
4 | __all__ = ["PythonGenerator"]
5 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/typescript/__init__.py:
--------------------------------------------------------------------------------

```python
1 | from .generic import TypeScriptGenerator
2 | 
3 | 
4 | __all__ = ["TypeScriptGenerator"]
5 | 
```

--------------------------------------------------------------------------------
/mcp_server/requirements.txt:
--------------------------------------------------------------------------------

```
1 | mcp==1.6.0
2 | httpx==0.28.1
3 | pydantic==2.11.1
4 | pydantic-settings==2.8.1
5 | openai==1.65.4
```

--------------------------------------------------------------------------------
/registry/src/database/models/base.py:
--------------------------------------------------------------------------------

```python
1 | from sqlalchemy.orm import DeclarativeBase
2 | 
3 | 
4 | class Base(DeclarativeBase):
5 |     pass
6 | 
```

--------------------------------------------------------------------------------
/common/notifications/exceptions.py:
--------------------------------------------------------------------------------

```python
1 | class NotificationsServiceError(Exception):
2 |     """Base notifications-service exception"""
3 | 
```

--------------------------------------------------------------------------------
/worker/scripts/worker-start-debug.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env sh
2 | 
3 | set -e
4 | 
5 | pip install --upgrade -r requirements.txt
6 | python -m worker.src.main
```

--------------------------------------------------------------------------------
/healthchecker/scripts/worker-start-debug.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env sh
2 | 
3 | set -e
4 | 
5 | pip install --upgrade -r requirements.txt
6 | python -m healthchecker.src
```

--------------------------------------------------------------------------------
/mcp_server/scripts/start-debug.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env bash
2 | 
3 | set -e
4 | 
5 | pip install --upgrade -r ./requirements.txt
6 | python3 -m mcp_server.src.main
7 | 
```

--------------------------------------------------------------------------------
/registry/scripts/start.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env bash
2 | 
3 | set -e
4 | 
5 | alembic upgrade head
6 | uvicorn --proxy-headers --host 0.0.0.0 --port $UVICORN_PORT registry.src.main:app
```

--------------------------------------------------------------------------------
/registry/src/base_schema.py:
--------------------------------------------------------------------------------

```python
1 | from pydantic import BaseModel
2 | from pydantic import ConfigDict
3 | 
4 | 
5 | class BaseSchema(BaseModel):
6 |     model_config = ConfigDict(from_attributes=True)
7 | 
```

--------------------------------------------------------------------------------
/mcp_server/server.json:
--------------------------------------------------------------------------------

```json
1 | {
2 |   "name": "darp_search_engine",
3 |   "description": "Search Engine for MCP servers",
4 |   "endpoint": "http://registry_mcp_server/sse",
5 |   "logo": null
6 | }
7 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/mixins/__init__.py:
--------------------------------------------------------------------------------

```python
1 | from .has_created_at import HasCreatedAt
2 | from .has_server_id import HasServerId
3 | from .has_updated_at import HasUpdatedAt
4 | 
5 | 
6 | __all__ = ["HasCreatedAt", "HasUpdatedAt", "HasServerId"]
7 | 
```

--------------------------------------------------------------------------------
/registry/scripts/start-debug.sh:
--------------------------------------------------------------------------------

```bash
1 | #!/usr/bin/env bash
2 | 
3 | set -e
4 | 
5 | pip install --upgrade -r requirements.txt
6 | alembic upgrade head
7 | uvicorn --reload --proxy-headers --host 0.0.0.0 --port $UVICORN_PORT registry.src.main:app
8 | 
```

--------------------------------------------------------------------------------
/mcp_server/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
1 | FROM python:3.11
2 | WORKDIR /workspace
3 | COPY mcp_server/requirements.txt .
4 | RUN pip install -r requirements.txt
5 | 
6 | COPY common ./common
7 | COPY mcp_server ./mcp_server
8 | ENV PATH "$PATH:/workspace/mcp_server/scripts"
```

--------------------------------------------------------------------------------
/worker/src/constants.py:
--------------------------------------------------------------------------------

```python
 1 | from enum import StrEnum
 2 | 
 3 | 
 4 | class BaseImage(StrEnum):
 5 |     python_3_10 = "python:3.10.17-slim"
 6 |     python_3_11 = "python:3.11.12-slim"
 7 |     python_3_12 = "python:3.12.10-slim"
 8 | 
 9 |     node_lts = "node:lts-alpine"
10 | 
```

--------------------------------------------------------------------------------
/worker/src/errors.py:
--------------------------------------------------------------------------------

```python
 1 | class ProcessingError(Exception):
 2 |     def __init__(self, message: str | None = None):
 3 |         self.message = message
 4 | 
 5 | 
 6 | class InvalidData(ProcessingError):
 7 |     pass
 8 | 
 9 | 
10 | class DependenciesError(ProcessingError):
11 |     pass
12 | 
```

--------------------------------------------------------------------------------
/common/notifications/enums.py:
--------------------------------------------------------------------------------

```python
1 | from enum import StrEnum
2 | 
3 | 
4 | class NotificationEvent(StrEnum):
5 |     email_confirmation = "email-confirmation"
6 |     deploy_failed = "server-deploy-failed"
7 |     deploy_successful = "server-success-deploy"
8 |     server_healthcheck_failed = "server-healthcheck-failed"
9 | 
```

--------------------------------------------------------------------------------
/registry/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM python:3.11
 2 | WORKDIR /workspace
 3 | COPY registry/requirements.txt .
 4 | RUN pip install -r requirements.txt
 5 | 
 6 | COPY alembic ./alembic
 7 | COPY alembic.ini ./alembic.ini
 8 | COPY common ./common
 9 | COPY registry ./registry
10 | COPY worker ./worker
11 | ENV PATH "$PATH:/workspace/registry/scripts"
12 | 
```

--------------------------------------------------------------------------------
/registry/src/producer.py:
--------------------------------------------------------------------------------

```python
 1 | from aiokafka import AIOKafkaProducer
 2 | 
 3 | from registry.src.settings import settings
 4 | 
 5 | 
 6 | async def get_producer(url=settings.broker_url) -> AIOKafkaProducer:
 7 |     return AIOKafkaProducer(
 8 |         bootstrap_servers=url,
 9 |         value_serializer=lambda m: m.model_dump_json().encode("utf-8"),
10 |     )
11 | 
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
 1 | # pyproject.toml
 2 | [tool.pytest.ini_options]
 3 | minversion = "6.0"
 4 | addopts = "--doctest-modules"
 5 | 
 6 | [tool.flake8]
 7 | ignore = ['E203', 'D203', 'E501', 'W503']
 8 | exclude = [
 9 |         '.git',
10 |         '__pycache__',
11 |         'docs/source/conf.py',
12 |         'old',
13 |         'build',
14 |         'dist',
15 |         '.venv',
16 | ]
17 | max-complexity = 25
```

--------------------------------------------------------------------------------
/registry/src/main.py:
--------------------------------------------------------------------------------

```python
 1 | from fastapi import FastAPI
 2 | 
 3 | from registry.src.deployments.router import router as deploy_router
 4 | from registry.src.servers.router import router as servers_router
 5 | 
 6 | app = FastAPI()
 7 | 
 8 | app.include_router(servers_router)
 9 | app.include_router(deploy_router)
10 | 
11 | 
12 | @app.get("/healthcheck")
13 | async def healthcheck():
14 |     return "Alive"
15 | 
```

--------------------------------------------------------------------------------
/worker/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM docker:latest
 2 | 
 3 | RUN apk add --no-cache python3 py3-pip gcc python3-dev libc-dev zlib-dev
 4 | WORKDIR /workspace
 5 | COPY worker/requirements.txt .
 6 | RUN pip config set global.break-system-packages true
 7 | RUN pip install -r requirements.txt
 8 | 
 9 | COPY registry ./registry
10 | COPY worker ./worker
11 | 
12 | ENV PATH "$PATH:/workspace/worker/scripts"
13 | 
```

--------------------------------------------------------------------------------
/registry/src/database/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | from .models.base import Base
 2 | from .models.log import ServerLogs
 3 | from .models.server import Server
 4 | from .models.tool import Tool
 5 | from .session import get_session
 6 | from .session import get_unmanaged_session
 7 | 
 8 | __all__ = [
 9 |     "Base",
10 |     "get_session",
11 |     "get_unmanaged_session",
12 |     "Server",
13 |     "Tool",
14 |     "ServerLogs",
15 | ]
16 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/llm/prompts.py:
--------------------------------------------------------------------------------

```python
 1 | default_prompt: str = (
 2 |     """
 3 | You are an expert in the topic of user's request.
 4 | 
 5 | Your assistant has prepared for you the tools that might be helpful to answer the request.
 6 | 
 7 | Your goal is to provide detailed, accurate information and analysis in response to user queries and if necessary to perform some actions using the tools.
 8 | """.strip()
 9 | )
10 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/mixins/has_created_at.py:
--------------------------------------------------------------------------------

```python
 1 | from datetime import datetime
 2 | 
 3 | from sqlalchemy import Column
 4 | from sqlalchemy import DateTime
 5 | from sqlalchemy.orm import declarative_mixin
 6 | from sqlalchemy.orm import declared_attr
 7 | 
 8 | 
 9 | @declarative_mixin
10 | class HasCreatedAt:
11 |     @declared_attr
12 |     @classmethod
13 |     def created_at(cls):
14 |         return Column(DateTime, default=datetime.now, nullable=False)
15 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/__init__.py:
--------------------------------------------------------------------------------

```python
 1 | from .base import DockerfileGenerator
 2 | from .factory import get_dockerfile_generator
 3 | from .python import PythonGenerator
 4 | from .typescript import TypeScriptGenerator
 5 | from .user_provided import UserDockerfileGenerator
 6 | 
 7 | 
 8 | __all__ = [
 9 |     "DockerfileGenerator",
10 |     "PythonGenerator",
11 |     "TypeScriptGenerator",
12 |     "UserDockerfileGenerator",
13 |     "get_dockerfile_generator",
14 | ]
15 | 
```

--------------------------------------------------------------------------------
/healthchecker/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | FROM docker:latest
 2 | 
 3 | RUN apk add --no-cache python3 py3-pip gcc python3-dev libc-dev zlib-dev
 4 | WORKDIR /workspace
 5 | COPY registry/requirements.txt .
 6 | 
 7 | RUN pip config set global.break-system-packages true
 8 | RUN pip install -r requirements.txt
 9 | 
10 | COPY common ./common
11 | COPY registry ./registry
12 | COPY worker ./worker
13 | COPY healthchecker ./healthchecker
14 | 
15 | ENV PATH "$PATH:/workspace/healthchecker/scripts"
16 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/mixins/has_updated_at.py:
--------------------------------------------------------------------------------

```python
 1 | from datetime import datetime
 2 | 
 3 | from sqlalchemy import Column
 4 | from sqlalchemy import DateTime
 5 | from sqlalchemy.orm import declarative_mixin
 6 | from sqlalchemy.orm import declared_attr
 7 | 
 8 | 
 9 | @declarative_mixin
10 | class HasUpdatedAt:
11 |     @declared_attr
12 |     @classmethod
13 |     def updated_at(cls):
14 |         return Column(
15 |             DateTime, onupdate=datetime.now, default=datetime.now, nullable=False
16 |         )
17 | 
```

--------------------------------------------------------------------------------
/mcp_server/scripts/mcp_health_check.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | 
 3 | from mcp import ClientSession
 4 | from mcp.client.sse import sse_client
 5 | 
 6 | from mcp_server.src.settings import settings
 7 | 
 8 | 
 9 | async def healthcheck():
10 |     async with sse_client(f"http://localhost:{settings.mcp_port}/sse") as (
11 |         read,
12 |         write,
13 |     ):
14 |         async with ClientSession(read, write) as session:
15 |             await session.initialize()
16 | 
17 | 
18 | if __name__ == "__main__":
19 |     asyncio.run(healthcheck())
20 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/mixins/has_server_id.py:
--------------------------------------------------------------------------------

```python
 1 | from sqlalchemy import Column
 2 | from sqlalchemy import ForeignKey
 3 | from sqlalchemy import Integer
 4 | from sqlalchemy.orm import declarative_mixin
 5 | from sqlalchemy.orm import declared_attr
 6 | 
 7 | 
 8 | @declarative_mixin
 9 | class HasServerId:
10 |     @declared_attr
11 |     def server_id(cls):
12 |         return Column(
13 |             Integer,
14 |             ForeignKey("server.id", ondelete="CASCADE"),
15 |             nullable=False,
16 |             primary_key=True,
17 |         )
18 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/log.py:
--------------------------------------------------------------------------------

```python
 1 | from sqlalchemy.dialects.postgresql import JSONB
 2 | from sqlalchemy.orm import Mapped
 3 | from sqlalchemy.orm import mapped_column
 4 | 
 5 | from .base import Base
 6 | from .mixins import HasCreatedAt
 7 | from .mixins import HasServerId
 8 | from .mixins import HasUpdatedAt
 9 | 
10 | 
11 | class ServerLogs(HasCreatedAt, HasUpdatedAt, HasServerId, Base):
12 |     __tablename__ = "server_logs"
13 | 
14 |     deployment_logs: Mapped[list[dict]] = mapped_column(
15 |         JSONB(), nullable=False, server_default="'[]'::jsonb"
16 |     )
17 | 
```

--------------------------------------------------------------------------------
/registry/src/logger.py:
--------------------------------------------------------------------------------

```python
 1 | import logging
 2 | 
 3 | from .settings import settings
 4 | 
 5 | logger = logging.getLogger("registry")
 6 | logger.setLevel(logging.INFO)
 7 | 
 8 | formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s]: %(message)s")
 9 | 
10 | stream_handler = logging.StreamHandler()
11 | stream_handler.setFormatter(formatter)
12 | logger.addHandler(stream_handler)
13 | 
14 | settings.log_dir.mkdir(parents=True, exist_ok=True)
15 | file_handler = logging.FileHandler(settings.log_dir / "registry.log")
16 | file_handler.setFormatter(formatter)
17 | logger.addHandler(file_handler)
18 | 
```

--------------------------------------------------------------------------------
/registry/src/types.py:
--------------------------------------------------------------------------------

```python
 1 | from enum import auto
 2 | from enum import StrEnum
 3 | 
 4 | 
 5 | class HostType(StrEnum):
 6 |     internal = auto()
 7 |     external = auto()
 8 | 
 9 | 
10 | class ServerStatus(StrEnum):
11 |     active = auto()
12 |     inactive = auto()
13 |     in_processing = auto()
14 |     stopped = auto()
15 |     processing_failed = auto()
16 | 
17 | 
18 | class TaskType(StrEnum):
19 |     deploy = auto()
20 |     start = auto()
21 |     stop = auto()
22 | 
23 | 
24 | class ServerTransportProtocol(StrEnum):
25 |     SSE = "SSE"
26 |     STREAMABLE_HTTP = "STREAMABLE_HTTP"
27 | 
28 | 
29 | class RoutingMode(StrEnum):
30 |     auto = "auto"
31 |     deepresearch = "deepresearch"
32 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/logger.py:
--------------------------------------------------------------------------------

```python
 1 | import logging
 2 | 
 3 | from mcp_server.src.settings import settings
 4 | 
 5 | logger = logging.getLogger("registry_mcp")
 6 | logger.setLevel(settings.log_level)
 7 | 
 8 | formatter = logging.Formatter("[%(asctime)s][%(name)s][%(levelname)s]: %(message)s")
 9 | 
10 | stream_handler = logging.StreamHandler()
11 | stream_handler.setFormatter(formatter)
12 | logger.addHandler(stream_handler)
13 | 
14 | settings.log_dir.mkdir(parents=True, exist_ok=True)
15 | file_handler = logging.FileHandler(settings.log_dir / "registry_mcp.log")
16 | file_handler.setFormatter(formatter)
17 | logger.addHandler(file_handler)
18 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_12_1404-735603f5e4d5_creator_id.py:
--------------------------------------------------------------------------------

```python
 1 | """creator_id
 2 | 
 3 | Revision ID: 735603f5e4d5
 4 | Revises: 1c6edbdb4642
 5 | Create Date: 2025-05-12 14:04:17.438611
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '735603f5e4d5'
14 | down_revision = '1c6edbdb4642'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column('server', sa.Column('creator_id', sa.String(), nullable=True))
22 |     # ### end Alembic commands ###
23 | 
24 | 
25 | def downgrade():
26 |     # ### commands auto generated by Alembic - please adjust! ###
27 |     op.drop_column('server', 'creator_id')
28 |     # ### end Alembic commands ###
29 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_28_0632-f2298e6acd39_base_image.py:
--------------------------------------------------------------------------------

```python
 1 | """base_image
 2 | 
 3 | Revision ID: f2298e6acd39
 4 | Revises: 7e8c31ddb2db
 5 | Create Date: 2025-05-28 06:32:44.781544
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = 'f2298e6acd39'
14 | down_revision = 'aa9a14a698c5'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column('server', sa.Column('base_image', sa.String(), nullable=True))
22 |     # ### end Alembic commands ###
23 | 
24 | 
25 | def downgrade():
26 |     # ### commands auto generated by Alembic - please adjust! ###
27 |     op.drop_column('server', 'base_image')
28 |     # ### end Alembic commands ###
29 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_30_1231-4b5c0d56e1ca_build_instructions.py:
--------------------------------------------------------------------------------

```python
 1 | """build_instructions
 2 | 
 3 | Revision ID: 4b5c0d56e1ca
 4 | Revises: f2298e6acd39
 5 | Create Date: 2025-05-30 12:31:08.515098
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '4b5c0d56e1ca'
14 | down_revision = 'f2298e6acd39'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column('server', sa.Column('build_instructions', sa.String(), nullable=True))
22 |     # ### end Alembic commands ###
23 | 
24 | 
25 | def downgrade():
26 |     # ### commands auto generated by Alembic - please adjust! ###
27 |     op.drop_column('server', 'build_instructions')
28 |     # ### end Alembic commands ###
29 | 
```

--------------------------------------------------------------------------------
/worker/src/main.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | 
 3 | from kafka import KafkaAdminClient
 4 | from kafka.admin import NewTopic
 5 | from kafka.errors import TopicAlreadyExistsError
 6 | 
 7 | from registry.src.logger import logger
 8 | from registry.src.settings import settings
 9 | from worker.src.consumer import Consumer
10 | 
11 | 
12 | if __name__ == "__main__":
13 |     try:
14 |         admin = KafkaAdminClient(bootstrap_servers=settings.broker_url)
15 | 
16 |         worker_topic = NewTopic(
17 |             name=settings.worker_topic, num_partitions=1, replication_factor=1
18 |         )
19 |         admin.create_topics([worker_topic])
20 |     except TopicAlreadyExistsError:
21 |         pass
22 | 
23 |     consumer = Consumer()
24 |     logger.info("Starting consumer")
25 |     asyncio.run(consumer.start())
26 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/registry_client.py:
--------------------------------------------------------------------------------

```python
 1 | from httpx import AsyncClient
 2 | 
 3 | from mcp_server.src.schemas import RegistryServer
 4 | from mcp_server.src.settings import settings
 5 | 
 6 | 
 7 | class RegistryClient:
 8 |     def __init__(self):
 9 |         self.client = AsyncClient(base_url=settings.registry_url)
10 |         self.timeout = 90
11 | 
12 |     async def search(self, request: str) -> list[RegistryServer]:
13 |         response = await self.client.get(
14 |             "/servers/search", params=dict(query=request), timeout=self.timeout
15 |         )
16 |         assert (
17 |             response.status_code == 200
18 |         ), f"{response.status_code=} {response.content=}"
19 |         data = response.json()
20 |         return [RegistryServer.model_validate(server) for server in data]
21 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/schemas.py:
--------------------------------------------------------------------------------

```python
 1 | from typing import Any
 2 | 
 3 | from openai.types.chat import ChatCompletionMessage
 4 | from openai.types.chat import ChatCompletionToolMessageParam
 5 | from pydantic import BaseModel
 6 | from pydantic import ConfigDict
 7 | 
 8 | 
 9 | class BaseSchema(BaseModel):
10 |     model_config = ConfigDict(from_attributes=True)
11 | 
12 | 
13 | class Tool(BaseSchema):
14 |     name: str
15 |     description: str
16 |     input_schema: dict[str, Any]
17 | 
18 | 
19 | class RegistryServer(BaseSchema):
20 |     name: str
21 |     description: str
22 |     url: str
23 |     logo: str | None
24 |     id: int
25 |     tools: list[Tool]
26 | 
27 | 
28 | class ToolInfo(BaseSchema):
29 |     tool_name: str
30 |     server: RegistryServer
31 | 
32 | 
33 | class RoutingResponse(BaseSchema):
34 |     conversation: list[ChatCompletionMessage | ChatCompletionToolMessageParam]
35 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_04_22_0854-1c6edbdb4642_add_status_to_servers.py:
--------------------------------------------------------------------------------

```python
 1 | """Add status to servers.
 2 | 
 3 | Revision ID: 1c6edbdb4642
 4 | Revises: 49e1ab420276
 5 | Create Date: 2025-04-22 08:54:20.892397
 6 | 
 7 | """
 8 | 
 9 | from alembic import op
10 | import sqlalchemy as sa
11 | 
12 | 
13 | # revision identifiers, used by Alembic.
14 | revision = "1c6edbdb4642"
15 | down_revision = "49e1ab420276"
16 | branch_labels = None
17 | depends_on = None
18 | 
19 | 
20 | def upgrade():
21 |     op.add_column(
22 |         "server",
23 |         sa.Column(
24 |             "status",
25 |             sa.String(),
26 |             nullable=True,
27 |         ),
28 |     )
29 |     op.execute("""UPDATE server SET status = 'active'""")
30 |     op.alter_column("server", "status", nullable=False)
31 | 
32 | 
33 | def downgrade():
34 |     # ### commands auto generated by Alembic - please adjust! ###
35 |     op.drop_column("server", "status")
36 |     # ### end Alembic commands ###
37 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_04_30_1049-387bed3ce6ae_server_title.py:
--------------------------------------------------------------------------------

```python
 1 | """server title
 2 | 
 3 | Revision ID: 387bed3ce6ae
 4 | Revises: 49e1ab420276
 5 | Create Date: 2025-04-30 10:49:34.277571
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '387bed3ce6ae'
14 | down_revision = '7a3913a3bb4c'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column('server', sa.Column('title', sa.String(), nullable=True))
22 |     op.execute("""UPDATE server SET title = name""")
23 |     op.alter_column("server", "title", nullable=False)
24 |     # ### end Alembic commands ###
25 | 
26 | 
27 | def downgrade():
28 |     # ### commands auto generated by Alembic - please adjust! ###
29 |     op.drop_column('server', 'title')
30 |     # ### end Alembic commands ###
31 | 
```

--------------------------------------------------------------------------------
/registry/src/search/prompts.py:
--------------------------------------------------------------------------------

```python
 1 | get_top_servers: str = (
 2 |     """
 3 | You are part of MCP search engine. You are given a list of MCP server descriptions and a user's query. Your task is to select a minimal list of MCP servers that have enough tools to satisfy the query. Results of this request will be returned to the user's system which will make a separate request to LLM with the selected tools.
 4 | 
 5 | Consider if it is possible to satisfy the request in one step or in several steps. Consider if some tools don't have enough data if they are enough to completely perform one step or if you need to select several alternatives for the step. It is important that you follow the output format precisely.
 6 | 
 7 | Input data for you:
 8 | 
 9 | ```
10 | mcp_servers = {servers_list!r}
11 | user_request = {request!r}
12 | ```
13 | """.strip()
14 | )
15 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_06_17_1353-667874dd7dee_added_alias.py:
--------------------------------------------------------------------------------

```python
 1 | """added llm_tool_name
 2 | 
 3 | Revision ID: 667874dd7dee
 4 | Revises: c98f44cea12f
 5 | Create Date: 2025-06-17 13:53:18.588902
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '667874dd7dee'
14 | down_revision = 'c98f44cea12f'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     op.add_column('tool', sa.Column('alias', sa.String(), nullable=True))
21 |     op.execute("""
22 |         UPDATE tool
23 |         SET alias = tool.name || '__' || server.name
24 |         FROM server
25 |         WHERE server.url = tool.server_url;
26 |     """)
27 |     op.alter_column("tool", "alias", nullable=False)
28 | 
29 | def downgrade():
30 |     # ### commands auto generated by Alembic - please adjust! ###
31 |     op.drop_column('tool', 'alias')
32 |     # ### end Alembic commands ###
33 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_04_25_1158-49e1ab420276_host_type.py:
--------------------------------------------------------------------------------

```python
 1 | """host_type
 2 | 
 3 | Revision ID: 49e1ab420276
 4 | Revises: e25328dbfe80
 5 | Create Date: 2025-04-25 11:58:17.768233
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '49e1ab420276'
14 | down_revision = 'e25328dbfe80'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column('server', sa.Column('host_type', sa.String(), nullable=True))
22 |     op.execute("UPDATE server SET host_type = 'internal'")
23 |     op.alter_column("server", "host_type", nullable=False)
24 |     # ### end Alembic commands ###
25 | 
26 | 
27 | def downgrade():
28 |     # ### commands auto generated by Alembic - please adjust! ###
29 |     op.drop_column('server', 'host_type')
30 |     # ### end Alembic commands ###
31 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/main.py:
--------------------------------------------------------------------------------

```python
 1 | from mcp.server.fastmcp import FastMCP
 2 | 
 3 | from .schemas import RoutingResponse
 4 | from mcp_server.src.settings import settings
 5 | from mcp_server.src.tools import Tools
 6 | 
 7 | 
 8 | mcp: FastMCP = FastMCP(settings.server_json.name, port=settings.mcp_port)
 9 | tools: Tools = Tools()
10 | 
11 | 
12 | @mcp.tool()
13 | async def search_urls(request: str) -> list[str]:
14 |     """Return URLs of the best MCP servers for processing the given request."""
15 |     return await tools.search(request=request)
16 | 
17 | 
18 | @mcp.tool()
19 | async def routing(
20 |     request: str,
21 | ) -> str:
22 |     """Respond to any user request using MCP tools selected specifically for it."""
23 |     response = await tools.routing(request=request)
24 |     return RoutingResponse(conversation=response).model_dump_json()
25 | 
26 | 
27 | if __name__ == "__main__":
28 |     mcp.run(transport="sse")
29 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_31_1156-c98f44cea12f_add_transport_protocol_column_to_servers.py:
--------------------------------------------------------------------------------

```python
 1 | """Add transport-protocol column to servers
 2 | 
 3 | Revision ID: c98f44cea12f
 4 | Revises: f2298e6acd39
 5 | Create Date: 2025-05-31 11:56:05.469429
 6 | 
 7 | """
 8 | 
 9 | from alembic import op
10 | import sqlalchemy as sa
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = "c98f44cea12f"
14 | down_revision = "4b5c0d56e1ca"
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column("server", sa.Column("transport_protocol", sa.String(), nullable=True))
22 |     op.execute("""UPDATE server SET transport_protocol = 'SSE' WHERE status = 'active'""")
23 |     # ### end Alembic commands ###
24 | 
25 | 
26 | def downgrade():
27 |     # ### commands auto generated by Alembic - please adjust! ###
28 |     op.drop_column("server", "transport_protocol")
29 |     # ### end Alembic commands ###
30 | 
```

--------------------------------------------------------------------------------
/registry/requirements.txt:
--------------------------------------------------------------------------------

```
 1 | alembic==1.15.1
 2 | annotated-types==0.7.0
 3 | anyio==4.9.0
 4 | asyncpg==0.30.0
 5 | certifi==2025.1.31
 6 | cfgv==3.4.0
 7 | chardet==5.2.0
 8 | charset-normalizer==3.4.1
 9 | click==8.1.8
10 | distlib==0.3.9
11 | fastapi==0.115.11
12 | fastapi-pagination==0.12.34
13 | filelock==3.18.0
14 | greenlet==3.1.1
15 | h11==0.14.0
16 | httpcore==1.0.7
17 | httpx==0.28.1
18 | httpx-sse==0.4.0
19 | identify==2.6.9
20 | idna==3.10
21 | instructor==1.7.3
22 | Mako==1.3.9
23 | MarkupSafe==3.0.2
24 | mcp==1.9.2
25 | nodeenv==1.9.1
26 | openai==1.65.4
27 | platformdirs==4.3.7
28 | pre_commit==4.2.0
29 | psycopg2-binary==2.9.10
30 | pydantic==2.10.6
31 | pydantic-settings==2.8.1
32 | pydantic_core==2.27.2
33 | cryptography==44.0.2
34 | python-dotenv==1.0.1
35 | PyYAML==6.0.2
36 | requests==2.32.3
37 | setuptools==75.8.0
38 | sniffio==1.3.1
39 | SQLAlchemy==2.0.39
40 | sse-starlette==2.2.1
41 | starlette==0.46.1
42 | typing_extensions==4.12.2
43 | urllib3==2.3.0
44 | uvicorn==0.34.0
45 | virtualenv==20.29.3
46 | wheel==0.45.1
47 | jinja2==3.1.6
48 | aiokafka==0.12.0
49 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_06_07_1322-aa9a14a698c5_jsonb_logs.py:
--------------------------------------------------------------------------------

```python
 1 | """jsonb_logs
 2 | 
 3 | Revision ID: aa9a14a698c5
 4 | Revises: 7e8c31ddb2db
 5 | Create Date: 2025-06-07 13:22:23.229230
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | from sqlalchemy.dialects import postgresql
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = 'aa9a14a698c5'
14 | down_revision = '7e8c31ddb2db'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     op.drop_column('server_logs', 'deployment_logs')
21 |     op.add_column('server_logs', sa.Column(
22 |         'deployment_logs',
23 |         postgresql.JSONB(),
24 |         nullable=False,
25 |         server_default=sa.text("'[]'::jsonb")
26 |     ))
27 | 
28 | 
29 | def downgrade():
30 |     op.drop_column('server_logs', 'deployment_logs')
31 |     op.add_column('server_logs', sa.Column(
32 |         'deployment_logs',
33 |         sa.VARCHAR(),
34 |         nullable=False,
35 |         server_default=sa.text("'[]'::character varying")
36 |     ))
37 | 
```

--------------------------------------------------------------------------------
/worker/requirements.txt:
--------------------------------------------------------------------------------

```
 1 | alembic==1.15.1
 2 | annotated-types==0.7.0
 3 | anyio==4.9.0
 4 | asyncpg==0.30.0
 5 | certifi==2025.1.31
 6 | cfgv==3.4.0
 7 | chardet==5.2.0
 8 | charset-normalizer==3.4.1
 9 | click==8.1.8
10 | distlib==0.3.9
11 | fastapi==0.115.11
12 | fastapi-pagination==0.12.34
13 | filelock==3.18.0
14 | greenlet==3.1.1
15 | h11==0.14.0
16 | httpcore==1.0.7
17 | httpx==0.28.1
18 | httpx-sse==0.4.0
19 | identify==2.6.9
20 | idna==3.10
21 | instructor==1.7.3
22 | Mako==1.3.9
23 | MarkupSafe==3.0.2
24 | mcp==1.9.2
25 | nodeenv==1.9.1
26 | openai==1.65.4
27 | platformdirs==4.3.7
28 | pre_commit==4.2.0
29 | psycopg2-binary==2.9.10
30 | pydantic==2.10.6
31 | pydantic-settings==2.8.1
32 | pydantic_core==2.27.2
33 | cryptography==44.0.2
34 | python-dotenv==1.0.1
35 | PyYAML==6.0.2
36 | requests==2.32.3
37 | setuptools==75.8.0
38 | sniffio==1.3.1
39 | SQLAlchemy==2.0.39
40 | sse-starlette==2.2.1
41 | starlette==0.46.1
42 | typing_extensions==4.12.2
43 | urllib3==2.3.0
44 | uvicorn==0.34.0
45 | virtualenv==20.29.3
46 | wheel==0.45.1
47 | jinja2==3.1.6
48 | aiokafka==0.12.0
49 | kafka-python==2.0.3
50 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_04_09_1117-e25328dbfe80_set_logo_to_none.py:
--------------------------------------------------------------------------------

```python
 1 | """set logo to none
 2 | 
 3 | Revision ID: e25328dbfe80
 4 | Revises: ffe7a88d5b15
 5 | Create Date: 2025-04-09 11:17:59.990186
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = 'e25328dbfe80'
14 | down_revision = 'ffe7a88d5b15'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     op.execute("UPDATE server SET logo = NULL WHERE logo = ''")
21 |     # ### commands auto generated by Alembic - please adjust! ###
22 |     op.alter_column('server', 'logo',
23 |                existing_type=sa.VARCHAR(),
24 |                nullable=True)
25 |     # ### end Alembic commands ###
26 | 
27 | 
28 | def downgrade():
29 |     op.execute("UPDATE server SET logo = '' WHERE logo IS NULL")
30 |     # ### commands auto generated by Alembic - please adjust! ###
31 |     op.alter_column('server', 'logo',
32 |                existing_type=sa.VARCHAR(),
33 |                nullable=False)
34 |     # ### end Alembic commands ###
35 | 
```

--------------------------------------------------------------------------------
/registry/src/validator/prompts.py:
--------------------------------------------------------------------------------

```python
 1 | from jinja2 import Template
 2 | 
 3 | SENS_TOPIC_CHECK_SYSTEM_PROMPT: Template = Template(
 4 |     """
 5 | Analyze the description of an MCP server and its tools.
 6 | 
 7 | Your task is to determine whether the server likely works with sensitive data.
 8 | 
 9 | Consider the following rules:
10 | 1.	Sensitive data indicators:
11 | •	Mentions of emails, phone numbers, SSNs, credit card numbers, access tokens, passwords, or other personal identifiable information (PII).
12 | •	Tools or functionality related to authentication, authorization, payments, identity verification, document uploads, etc.
13 | 2.	Sensitive domains:
14 | •	Banking
15 | •	Finance
16 | •	Government services
17 | •	Or any tool that may require sending sensitive user data
18 | """
19 | )
20 | 
21 | SENS_TOPIC_CHECK_USER_PROMPT: Template = Template(
22 |     """
23 |     '''
24 |     Server's data: {{ server_data.model_dump_json() }}
25 |     --------------
26 |     Server's tools: {{ tools_data.model_dump_json() }}
27 |     '''
28 |     """
29 | )
30 | 
```

--------------------------------------------------------------------------------
/registry/src/validator/schemas.py:
--------------------------------------------------------------------------------

```python
 1 | from enum import Enum
 2 | 
 3 | from pydantic import BaseModel
 4 | from pydantic import Field
 5 | from pydantic import RootModel
 6 | 
 7 | from registry.src.servers.schemas import ServerCreate
 8 | from registry.src.servers.schemas import Tool
 9 | 
10 | 
11 | class SSLAuthorityLevel(Enum):
12 |     NO_CERTIFICATE = 0
13 |     INVALID_CERTIFICATE = 1
14 |     SELF_SIGNED_CERTIFICATE = 2
15 |     CERTIFICATE_OK = 3
16 |     EXTENDED_CERTIFICATE = 4
17 | 
18 | 
19 | class ServerNeedsSensitiveDataResponse(BaseModel):
20 |     reasoning: list[str] = Field(
21 |         ...,
22 |         description="For each argument of each tool describe if you can send sensitive data to this argument, and what kind of it",
23 |     )
24 |     server_needs_sensitive_data: bool
25 | 
26 | 
27 | class ServerWithTools(ServerCreate):
28 |     tools: list[Tool]
29 | 
30 | 
31 | class ValidationResult(BaseModel):
32 |     server_requires_sensitive_data: bool = False
33 |     authority_level: SSLAuthorityLevel = SSLAuthorityLevel.NO_CERTIFICATE
34 | 
35 | 
36 | Tools = RootModel[list[Tool]]
37 | 
```

--------------------------------------------------------------------------------
/registry/src/validator/constants.py:
--------------------------------------------------------------------------------

```python
 1 | EV_OIDS: list[str] = [
 2 |     "2.16.756.5.14.7.4.8",
 3 |     "2.16.156.112554.3",
 4 |     "2.16.840.1.114028.10.1.2",
 5 |     "1.3.6.1.4.1.4788.2.202.1",
 6 |     "2.16.840.1.114404.1.1.2.4.1",
 7 |     "2.16.840.1.114413.1.7.23.3",
 8 |     "2.16.840.1.114412.2.1",
 9 |     "1.3.6.1.4.1.14777.6.1.2",
10 |     "2.16.578.1.26.1.3.3",
11 |     "1.3.6.1.4.1.14777.6.1.1",
12 |     "2.23.140.1.1",
13 |     "1.3.6.1.4.1.7879.13.24.1",
14 |     "1.3.6.1.4.1.40869.1.1.22.3",
15 |     "1.3.159.1.17.1",
16 |     "1.2.616.1.113527.2.5.1.1",
17 |     "2.16.840.1.114414.1.7.23.3",
18 |     "1.3.6.1.4.1.34697.2.1",
19 |     "2.16.756.1.89.1.2.1.1",
20 |     "1.3.6.1.4.1.6449.1.2.1.5.1",
21 |     "1.3.6.1.4.1.34697.2.3",
22 |     "1.3.6.1.4.1.13769.666.666.666.1.500.9.1",
23 |     "1.3.6.1.4.1.34697.2.2",
24 |     "1.2.156.112559.1.1.6.1",
25 |     "1.3.6.1.4.1.8024.0.2.100.1.2",
26 |     "1.3.6.1.4.1.34697.2.4",
27 |     "1.2.392.200091.100.721.1",
28 | ]
29 | SELF_SIGNED_CERTIFICATE_ERR_CODE: int = 18
30 | HTTPS_PORT: int = 443
31 | HTTP_PORT: int = 80
32 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_27_0757-7e8c31ddb2db_json_logs.py:
--------------------------------------------------------------------------------

```python
 1 | """json_logs
 2 | 
 3 | Revision ID: 7e8c31ddb2db
 4 | Revises: 7a3913a3bb4c
 5 | Create Date: 2025-05-27 07:57:51.044585
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | from sqlalchemy.dialects import postgresql
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '7e8c31ddb2db'
14 | down_revision = '387bed3ce6ae'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.execute("UPDATE server_logs SET deployment_logs = '[]'")
22 |     op.alter_column(
23 |         "server_logs",
24 |         "deployment_logs",
25 |         server_default="[]",
26 |         existing_server_default=""
27 |     )
28 |     # ### end Alembic commands ###
29 | 
30 | 
31 | def downgrade():
32 |     # ### commands auto generated by Alembic - please adjust! ###
33 |     op.alter_column(
34 |         "server_logs",
35 |         "deployment_logs",
36 |         server_default="",
37 |         existing_server_default="[]"
38 |     )
39 |     op.execute("UPDATE server_logs SET deployment_logs = ''")
40 |     # ### end Alembic commands ###
41 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_13_1326-4b751cb703e2_timestamps.py:
--------------------------------------------------------------------------------

```python
 1 | """timestamps
 2 | 
 3 | Revision ID: 4b751cb703e2
 4 | Revises: 3b8b8a5b99ad
 5 | Create Date: 2025-05-13 13:26:42.259154
 6 | 
 7 | """
 8 | from datetime import datetime
 9 | 
10 | from alembic import op
11 | import sqlalchemy as sa
12 | 
13 | 
14 | # revision identifiers, used by Alembic.
15 | revision = '4b751cb703e2'
16 | down_revision = '3b8b8a5b99ad'
17 | branch_labels = None
18 | depends_on = None
19 | 
20 | 
21 | def upgrade():
22 |     op.add_column('server', sa.Column('created_at', sa.DateTime(), nullable=True))
23 |     op.add_column('server', sa.Column('updated_at', sa.DateTime(), nullable=True))
24 | 
25 |     current_time = f"timestamp '{datetime.now().isoformat()}'"
26 |     op.execute(f"""UPDATE server SET created_at = {current_time}, updated_at = {current_time}""")
27 |     op.alter_column("server", "created_at", nullable=False)
28 |     op.alter_column("server", "updated_at", nullable=False)
29 | 
30 | 
31 | def downgrade():
32 |     # ### commands auto generated by Alembic - please adjust! ###
33 |     op.drop_column('server', 'updated_at')
34 |     op.drop_column('server', 'created_at')
35 |     # ### end Alembic commands ###
36 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/tool.py:
--------------------------------------------------------------------------------

```python
 1 | from sqlalchemy import ForeignKey
 2 | from sqlalchemy import String
 3 | from sqlalchemy import UniqueConstraint
 4 | from sqlalchemy.dialects.postgresql import JSONB
 5 | from sqlalchemy.orm import Mapped
 6 | from sqlalchemy.orm import mapped_column
 7 | from sqlalchemy.orm import relationship
 8 | 
 9 | from ...database import models
10 | from .base import Base
11 | 
12 | 
13 | class Tool(Base):
14 |     __tablename__ = "tool"
15 |     __table_args__ = (
16 |         UniqueConstraint("name", "server_url", name="uq_tool_name_server_url"),
17 |     )
18 | 
19 |     id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
20 |     name: Mapped[str] = mapped_column(String(), nullable=False)
21 |     description: Mapped[str] = mapped_column(String(), nullable=False)
22 |     input_schema: Mapped[dict] = mapped_column(JSONB, nullable=False)
23 |     alias: Mapped[str] = mapped_column(String(), nullable=False)
24 |     server_url: Mapped[str] = mapped_column(
25 |         ForeignKey("server.url", ondelete="CASCADE")
26 |     )
27 |     server: Mapped["models.server.Server"] = relationship(back_populates="tools")
28 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_15_1414-7a3913a3bb4c_logs.py:
--------------------------------------------------------------------------------

```python
 1 | """logs
 2 | 
 3 | Revision ID: 7a3913a3bb4c
 4 | Revises: 4b751cb703e2
 5 | Create Date: 2025-05-15 14:14:22.223452
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '7a3913a3bb4c'
14 | down_revision = '4b751cb703e2'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.create_table(
22 |         'server_logs',
23 |         sa.Column('deployment_logs', sa.String(), server_default='', nullable=False),
24 |         sa.Column('created_at', sa.DateTime(), nullable=False),
25 |         sa.Column('updated_at', sa.DateTime(), nullable=False),
26 |         sa.Column('server_id', sa.Integer(), nullable=False),
27 |         sa.ForeignKeyConstraint(['server_id'], ['server.id'], ondelete='CASCADE'),
28 |         sa.PrimaryKeyConstraint('server_id')
29 |     )
30 |     # ### end Alembic commands ###
31 | 
32 | 
33 | def downgrade():
34 |     # ### commands auto generated by Alembic - please adjust! ###
35 |     op.drop_table('server_logs')
36 |     # ### end Alembic commands ###
37 | 
```

--------------------------------------------------------------------------------
/worker/src/schemas.py:
--------------------------------------------------------------------------------

```python
 1 | from datetime import datetime
 2 | from enum import auto
 3 | from enum import StrEnum
 4 | 
 5 | from pydantic import Field
 6 | from pydantic import field_serializer
 7 | 
 8 | from registry.src.base_schema import BaseSchema
 9 | from registry.src.types import TaskType
10 | 
11 | 
12 | class Task(BaseSchema):
13 |     task_type: TaskType
14 |     server_id: int
15 | 
16 | 
17 | class LogLevel(StrEnum):
18 |     info = auto()
19 |     warning = auto()
20 |     error = auto()
21 | 
22 | 
23 | class LoggingEvent(StrEnum):
24 |     command_start = auto()
25 |     command_result = auto()
26 |     general_message = auto()
27 | 
28 | 
29 | class CommandStart(BaseSchema):
30 |     command: str
31 | 
32 | 
33 | class CommandResult(BaseSchema):
34 |     output: str
35 |     success: bool
36 | 
37 | 
38 | class GeneralLogMessage(BaseSchema):
39 |     log_level: LogLevel
40 |     message: str
41 | 
42 | 
43 | class LogMessage(BaseSchema):
44 |     event_type: LoggingEvent
45 |     data: CommandStart | CommandResult | GeneralLogMessage
46 |     timestamp: datetime = Field(default_factory=datetime.now)
47 | 
48 |     @field_serializer("timestamp")
49 |     def serialize_datetime(self, value: datetime, _info) -> str:
50 |         return value.isoformat()
51 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/user_provided.py:
--------------------------------------------------------------------------------

```python
 1 | from typing import override
 2 | from uuid import uuid4
 3 | 
 4 | from ..user_logger import UserLogger
 5 | from .base import DockerfileGenerator
 6 | from worker.src.constants import BaseImage
 7 | 
 8 | 
 9 | class UserDockerfileGenerator(DockerfileGenerator):
10 |     def __init__(
11 |         self,
12 |         base_image: BaseImage,
13 |         repo_folder: str,
14 |         user_logger: UserLogger,
15 |         build_instructions: str,
16 |     ) -> None:
17 |         self.build_instructions = build_instructions
18 |         super().__init__(
19 |             base_image=base_image, repo_folder=repo_folder, user_logger=user_logger
20 |         )
21 | 
22 |     @override
23 |     async def generate_dockerfile(self) -> str:
24 |         await self._image_setup()
25 |         await self._code_setup()
26 |         return self._save_dockerfile()
27 | 
28 |     @override
29 |     async def _code_setup(self) -> None:
30 |         heredoc_identifier = str(uuid4()).replace("-", "")
31 |         lines = [
32 |             "COPY ./ ./",
33 |             f"RUN <<{heredoc_identifier}",
34 |             self.build_instructions,
35 |             heredoc_identifier,
36 |         ]
37 |         self._add_to_file(lines)
38 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/settings.py:
--------------------------------------------------------------------------------

```python
 1 | from pathlib import Path
 2 | 
 3 | from pydantic import BaseModel
 4 | from pydantic import Field
 5 | from pydantic import field_validator
 6 | from pydantic_settings import BaseSettings
 7 | from pydantic_settings import SettingsConfigDict
 8 | 
 9 | 
10 | parent_folder = Path(__file__).parent.parent
11 | 
12 | 
13 | class ServerJson(BaseModel):
14 |     name: str
15 |     description: str
16 |     url: str = Field(alias="endpoint")
17 |     logo: str | None = None
18 | 
19 |     @field_validator("url")  # noqa
20 |     @classmethod
21 |     def url_ignore_anchor(cls, value: str) -> str:
22 |         return value.split("#")[0]
23 | 
24 | 
25 | class Settings(BaseSettings):
26 |     model_config = SettingsConfigDict(extra="ignore", env_ignore_empty=True)
27 | 
28 |     registry_url: str = "http://registry:80"
29 |     log_dir: Path = Path("logs")
30 |     mcp_port: int = 80
31 |     llm_proxy: str | None = None
32 |     openai_base_url: str = "https://openrouter.ai/api/v1"
33 |     openai_api_key: str
34 |     llm_model: str = "openai/gpt-4o-mini"
35 |     server_json: ServerJson = ServerJson.model_validate_json(
36 |         Path(parent_folder / "server.json").read_text()
37 |     )
38 |     log_level: str = "INFO"
39 | 
40 | 
41 | settings = Settings()
42 | 
```

--------------------------------------------------------------------------------
/registry/src/database/session.py:
--------------------------------------------------------------------------------

```python
 1 | from collections.abc import AsyncGenerator
 2 | from typing import Any
 3 | 
 4 | from sqlalchemy.ext.asyncio import async_sessionmaker
 5 | from sqlalchemy.ext.asyncio import AsyncSession
 6 | from sqlalchemy.ext.asyncio import create_async_engine
 7 | 
 8 | from registry.src.errors import FastApiError
 9 | from registry.src.logger import logger
10 | from registry.src.settings import settings
11 | 
12 | async_engine = create_async_engine(
13 |     settings.database_url_async,
14 |     pool_size=settings.db_pool_size,
15 |     max_overflow=settings.db_max_overflow,
16 |     pool_pre_ping=True,
17 | )
18 | 
19 | session_maker = async_sessionmaker(bind=async_engine, expire_on_commit=False)
20 | 
21 | 
22 | async def get_unmanaged_session() -> AsyncSession:
23 |     return session_maker()
24 | 
25 | 
26 | async def get_session() -> AsyncGenerator[AsyncSession, Any]:
27 |     session = session_maker()
28 | 
29 |     try:
30 |         yield session
31 |         await session.commit()
32 |     except FastApiError as error:
33 |         await session.rollback()
34 |         raise error
35 |     except Exception as error:
36 |         logger.error(f"Encountered an exception while processing request:\n{error}")
37 |         await session.rollback()
38 |         raise
39 |     finally:
40 |         await session.close()
41 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/factory.py:
--------------------------------------------------------------------------------

```python
 1 | from ..constants import BaseImage
 2 | from ..user_logger import UserLogger
 3 | from .base import DockerfileGenerator
 4 | from .python import PythonGenerator
 5 | from .typescript import TypeScriptGenerator
 6 | from .user_provided import UserDockerfileGenerator
 7 | 
 8 | 
 9 | async def get_dockerfile_generator(
10 |     base_image: BaseImage,
11 |     repo_folder: str,
12 |     build_instructions: str | None,
13 |     user_logger: UserLogger,
14 | ) -> DockerfileGenerator:
15 |     if build_instructions:
16 |         await user_logger.info(message="Build instructions found.")
17 |         return UserDockerfileGenerator(
18 |             base_image=base_image,
19 |             repo_folder=repo_folder,
20 |             build_instructions=build_instructions,
21 |             user_logger=user_logger,
22 |         )
23 |     await user_logger.warning(
24 |         message="No build instructions found. Attempting to generate from scratch."
25 |     )
26 |     if base_image == BaseImage.node_lts:
27 |         return TypeScriptGenerator(
28 |             base_image=base_image, repo_folder=repo_folder, user_logger=user_logger
29 |         )
30 |     else:
31 |         return PythonGenerator(
32 |             base_image=base_image, repo_folder=repo_folder, user_logger=user_logger
33 |         )
34 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_05_13_0634-3b8b8a5b99ad_deployment_fields.py:
--------------------------------------------------------------------------------

```python
 1 | """deployment fields
 2 | 
 3 | Revision ID: 3b8b8a5b99ad
 4 | Revises: 735603f5e4d5
 5 | Create Date: 2025-05-13 06:34:52.115171
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | 
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = '3b8b8a5b99ad'
14 | down_revision = '735603f5e4d5'
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.add_column('server', sa.Column('repo_url', sa.String(), nullable=True))
22 |     op.add_column('server', sa.Column('command', sa.String(), nullable=True))
23 |     op.alter_column('server', 'url',
24 |                existing_type=sa.VARCHAR(),
25 |                nullable=True)
26 |     op.create_unique_constraint("server_repo_url_unique", 'server', ['repo_url'])
27 |     # ### end Alembic commands ###
28 | 
29 | 
30 | def downgrade():
31 |     # ### commands auto generated by Alembic - please adjust! ###
32 |     op.drop_constraint("server_repo_url_unique", 'server', type_='unique')
33 |     op.alter_column('server', 'url',
34 |                existing_type=sa.VARCHAR(),
35 |                nullable=False)
36 |     op.drop_column('server', 'command')
37 |     op.drop_column('server', 'repo_url')
38 |     # ### end Alembic commands ###
39 | 
```

--------------------------------------------------------------------------------
/registry/src/search/schemas.py:
--------------------------------------------------------------------------------

```python
 1 | from pydantic import Field
 2 | 
 3 | from registry.src.base_schema import BaseSchema
 4 | 
 5 | 
 6 | class SearchServerURL(BaseSchema):
 7 |     url: str
 8 | 
 9 | 
10 | class SearchServer(SearchServerURL):
11 |     id: int
12 |     name: str
13 |     description: str
14 | 
15 | 
16 | class SolutionStep(BaseSchema):
17 |     step_description: str
18 |     best_server_description: str = Field(
19 |         ...,
20 |         description="Description of the best server for this step, copy this from `mcp_servers` entry",
21 |     )
22 |     best_server_name: str = Field(
23 |         ...,
24 |         description="Name of the best server for this step, copy this from `mcp_servers` entry",
25 |     )
26 |     best_server: SearchServerURL = Field(
27 |         ..., description="The best server for this step"
28 |     )
29 |     confidence: str = Field(
30 |         ...,
31 |         description="How confident you are that this server is enough for this step?",
32 |     )
33 |     additional_servers: list[SearchServerURL] = Field(
34 |         ...,
35 |         description="Alternative servers if you think the `best_server` may not be enough",
36 |     )
37 | 
38 | 
39 | class SearchResponse(BaseSchema):
40 |     solution_steps: list[SolutionStep] = Field(
41 |         ..., description="List of solution steps and servers for each step"
42 |     )
43 | 
```

--------------------------------------------------------------------------------
/scripts/darp-add.py:
--------------------------------------------------------------------------------

```python
 1 | #!/usr/bin/env python3
 2 | """
 3 | Add new MCP servers to the registry
 4 | """
 5 | import argparse
 6 | import json
 7 | 
 8 | import requests
 9 | 
10 | default_registry_url: str = "http://localhost:80"
11 | 
12 | 
13 | def main() -> None:
14 |     parser = argparse.ArgumentParser(description="Add a new MCP server to the registry")
15 |     parser.add_argument("--url", required=True, help="Endpoint URL for the server")
16 |     parser.add_argument("--name", required=True, help="Unique server name")
17 |     parser.add_argument("--description", required=True, help="Server description")
18 |     parser.add_argument("--logo", default=None, help="Logo URL")
19 |     parser.add_argument("--verbose", action="store_true", help="Verbose output")
20 |     parser.add_argument(
21 |         "--registry-url", default=default_registry_url, help="Registry API endpoint URL"
22 |     )
23 | 
24 |     args = parser.parse_args()
25 | 
26 |     server_data = {
27 |         "name": args.name,
28 |         "description": args.description,
29 |         "url": args.url,
30 |         "logo": args.logo,
31 |     }
32 | 
33 |     response = requests.post(f"{args.registry_url}/servers/", json=server_data)
34 |     response.raise_for_status()
35 |     if args.verbose:
36 |         print(json.dumps(response.json(), indent=2))
37 | 
38 | 
39 | if __name__ == "__main__":
40 |     main()
41 | 
```

--------------------------------------------------------------------------------
/registry/src/deployments/schemas.py:
--------------------------------------------------------------------------------

```python
 1 | from datetime import datetime
 2 | from typing import Annotated
 3 | 
 4 | from pydantic import Field
 5 | from pydantic import field_serializer
 6 | from pydantic import HttpUrl
 7 | 
 8 | from registry.src.base_schema import BaseSchema
 9 | from registry.src.servers.schemas import server_name_regex_pattern
10 | from registry.src.types import HostType
11 | from registry.src.types import ServerStatus
12 | from worker.src.constants import BaseImage
13 | from worker.src.schemas import LogMessage
14 | 
15 | 
16 | class ServerLogsSchema(BaseSchema):
17 |     server_id: int
18 |     deployment_logs: list[LogMessage]
19 |     created_at: datetime
20 |     updated_at: datetime
21 | 
22 | 
23 | class DeploymentCreateData(BaseSchema):
24 |     name: Annotated[str, Field(max_length=30, pattern=server_name_regex_pattern)]
25 |     title: str = Field(max_length=30)
26 |     description: str
27 |     repo_url: HttpUrl
28 |     command: str | None = None
29 |     logo: str | None = None
30 |     base_image: BaseImage | None = None
31 |     build_instructions: str | None = None
32 |     host_type: HostType = HostType.internal
33 |     status: ServerStatus = ServerStatus.in_processing
34 | 
35 |     @field_serializer("repo_url")
36 |     def serialize_url(self, repo_url: HttpUrl) -> str:
37 |         return str(repo_url)
38 | 
39 | 
40 | class DeploymentCreate(BaseSchema):
41 |     current_user_id: str
42 |     data: DeploymentCreateData
43 | 
44 | 
45 | class UserId(BaseSchema):
46 |     current_user_id: str
47 | 
```

--------------------------------------------------------------------------------
/registry/src/errors.py:
--------------------------------------------------------------------------------

```python
 1 | from fastapi import HTTPException
 2 | from fastapi import status
 3 | 
 4 | 
 5 | class FastApiError(HTTPException):
 6 | 
 7 |     def __init__(self, message: str, **kwargs) -> None:
 8 |         self.detail = {"message": message, **kwargs}
 9 | 
10 | 
11 | class InvalidServerNameError(FastApiError):
12 |     status_code: int = status.HTTP_400_BAD_REQUEST
13 | 
14 |     def __init__(self, name: str) -> None:
15 |         message = "Name must have only letters, digits, underscore. Name must not start with digits."
16 |         super().__init__(message=message, name=name)
17 | 
18 | 
19 | class ServerAlreadyExistsError(FastApiError):
20 |     status_code: int = status.HTTP_400_BAD_REQUEST
21 | 
22 |     def __init__(self, dict_servers: list[dict]) -> None:
23 |         servers_str = ", ".join(server["name"] for server in dict_servers)
24 |         message = f"Server already exists: {servers_str}"
25 |         super().__init__(message=message, servers=dict_servers)
26 | 
27 | 
28 | class ServersNotFoundError(FastApiError):
29 |     status_code = status.HTTP_404_NOT_FOUND
30 | 
31 |     def __init__(self, ids: list[int]) -> None:
32 |         super().__init__(message=f"Server(s) not found: {ids}", ids=ids)
33 | 
34 | 
35 | class NotAllowedError(FastApiError):
36 |     status_code = status.HTTP_403_FORBIDDEN
37 | 
38 | 
39 | class InvalidData(FastApiError):
40 |     status_code = status.HTTP_400_BAD_REQUEST
41 | 
42 | 
43 | class RemoteServerError(FastApiError):
44 |     status_code = status.HTTP_503_SERVICE_UNAVAILABLE
45 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/typescript/generic.py:
--------------------------------------------------------------------------------

```python
 1 | from pathlib import Path
 2 | from typing import override
 3 | 
 4 | from ...errors import DependenciesError
 5 | from ..base import DockerfileGenerator
 6 | 
 7 | 
 8 | class TypeScriptGenerator(DockerfileGenerator):
 9 |     @override
10 |     async def _dependencies_installation(self) -> None:
11 |         repo_path = Path(self.repo_folder)
12 |         package_json = self._find_file(repo_path, "package.json")
13 |         if not package_json:
14 |             await self.user_logger.error(message="Error: no package.json found")
15 |             raise DependenciesError("No package.json found")
16 |         await self.user_logger.info(message="package.json found. Adding to Dockerfile")
17 |         package_json_folder = self._relative_path(package_json.parent.absolute())
18 |         lines = [
19 |             f"COPY {package_json_folder}/package*.json ./",
20 |         ]
21 |         ts_config = self._find_file(repo_path, "tsconfig.json")
22 |         if ts_config:
23 |             await self.user_logger.info(
24 |                 message="tsconfig.json found. Adding to Dockerfile"
25 |             )
26 |             lines.append(f"COPY {self._relative_path(ts_config.absolute())} ./")
27 |         lines.append("RUN npm install --ignore-scripts")
28 |         self._add_to_file(lines)
29 | 
30 |     @override
31 |     async def _code_setup(self) -> None:
32 |         lines = [
33 |             "COPY ./ ./",
34 |             "RUN npm run build",
35 |         ]
36 |         self._add_to_file(lines)
37 | 
```

--------------------------------------------------------------------------------
/docker-compose-debug.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | services:
 2 | 
 3 |   registry:
 4 |     image: registry
 5 |     pull_policy: never
 6 |     restart: unless-stopped
 7 |     ports:
 8 |       - "${REGISTRY_PORT:-80}:80"
 9 |     profiles:
10 |       - ''
11 |     volumes:
12 |       - ./registry:/workspace/registry
13 |       - ./alembic:/workspace/alembic
14 |       - ./common:/workspace/common
15 |       - ./worker:/workspace/worker
16 |     command: start-debug.sh
17 | 
18 |   registry_mcp_server:
19 |     image: registry_mcp_server
20 |     pull_policy: never
21 |     restart: unless-stopped
22 |     ports:
23 |       - "${REGISTRY_MCP_PORT:-4689}:80"
24 |     profiles:
25 |       - ''
26 |     volumes:
27 |       - ./mcp_server:/workspace/mcp_server
28 |       - ./common:/workspace/common
29 |     command: start-debug.sh
30 | 
31 |   registry_deploy_worker:
32 |     pull_policy: never
33 |     profiles:
34 |       - ''
35 |     volumes:
36 |       - ./worker:/workspace/worker
37 |       - ./registry:/workspace/registry
38 |     restart: unless-stopped
39 |     command: worker-start-debug.sh
40 | 
41 |   portal_zookeeper:
42 |     profiles:
43 |       - ''
44 |     restart: unless-stopped
45 | 
46 |   portal_kafka:
47 |     profiles:
48 |       - ''
49 |     restart: unless-stopped
50 | 
51 |   registry_postgres:
52 |     ports:
53 |       - "${REGISTRY_POSTGRES_PORT:-5432}:5432"
54 |     profiles:
55 |       - ''
56 |     restart: unless-stopped
57 | 
58 |   registry_healthchecker:
59 |     pull_policy: never
60 |     profiles:
61 |       - ''
62 |     volumes:
63 |       - ./registry:/workspace/registry
64 |       - ./healthchecker:/workspace/healthchecker
65 |     restart: unless-stopped
66 |     command: worker-start-debug.sh
```

--------------------------------------------------------------------------------
/registry/src/settings.py:
--------------------------------------------------------------------------------

```python
 1 | from pathlib import Path
 2 | 
 3 | from pydantic_settings import BaseSettings
 4 | from pydantic_settings import SettingsConfigDict
 5 | 
 6 | 
 7 | class Settings(BaseSettings):
 8 |     model_config = SettingsConfigDict(extra="ignore", env_ignore_empty=True)
 9 | 
10 |     postgres_user: str
11 |     postgres_db: str
12 |     postgres_password: str
13 |     postgres_host: str
14 |     postgres_port: int = 5432
15 | 
16 |     db_pool_size: int = 50
17 |     db_max_overflow: int = 25
18 |     log_dir: Path = Path("logs")
19 |     llm_proxy: str | None = None
20 |     openai_api_key: str
21 |     llm_model: str = "openai/gpt-4o-mini"
22 |     openai_base_url: str = "https://openrouter.ai/api/v1"
23 | 
24 |     worker_topic: str = "deployment"
25 |     deployment_server: str = ""
26 |     deployment_network: str = ""
27 |     broker_url: str = "portal_kafka:19092"
28 |     dockerhub_login: str = ""
29 |     dockerhub_password: str = ""
30 |     registry_url: str = "http://registry/"
31 |     healthcheck_running_interval: int = 3600
32 |     tool_retry_count: int = 60
33 |     tool_wait_interval: int = 5
34 | 
35 |     deep_research_server_name: str = "deepresearch_darpserver"
36 | 
37 |     @property
38 |     def database_url(self) -> str:
39 |         auth_data = f"{self.postgres_user}:{self.postgres_password}"
40 |         host = f"{self.postgres_host}:{self.postgres_port}"
41 |         return f"{auth_data}@{host}/{self.postgres_db}"
42 | 
43 |     @property
44 |     def database_url_sync(self) -> str:
45 |         return f"postgresql+psycopg2://{self.database_url}"
46 | 
47 |     @property
48 |     def database_url_async(self) -> str:
49 |         return f"postgresql+asyncpg://{self.database_url}"
50 | 
51 | 
52 | settings = Settings()
53 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/decorators.py:
--------------------------------------------------------------------------------

```python
 1 | import functools
 2 | import inspect
 3 | import sys
 4 | import traceback
 5 | from collections.abc import Awaitable
 6 | from collections.abc import Callable
 7 | from collections.abc import Coroutine
 8 | from typing import Any
 9 | from typing import overload
10 | from typing import ParamSpec
11 | from typing import TypeVar
12 | 
13 | from mcp_server.src.logger import logger
14 | 
15 | X = TypeVar("X")
16 | P = ParamSpec("P")
17 | 
18 | 
19 | @overload
20 | def log_errors(  # noqa
21 |     func: Callable[P, Coroutine[Any, Any, X]]
22 | ) -> Callable[P, Coroutine[Any, Any, X]]: ...
23 | 
24 | 
25 | @overload
26 | def log_errors(func: Callable[P, X]) -> Callable[P, X]: ...  # noqa
27 | 
28 | 
29 | def log_errors(func: Callable[P, Any]) -> Callable[P, Any]:
30 | 
31 |     def log_error(args: tuple, kwargs: dict) -> None:
32 |         info = sys.exc_info()[2]
33 |         assert (
34 |             info is not None and info.tb_next is not None
35 |         ), f"No traceback available {sys.exc_info()[2]=}"
36 |         locals_vars = info.tb_frame.f_locals
37 |         logger.error(f"{traceback.format_exc()} \n{locals_vars=} \n{args=} \n{kwargs=}")
38 | 
39 |     @functools.wraps(func)
40 |     def sync_wrapped(*args: P.args, **kwargs: P.kwargs) -> Any:
41 |         try:
42 |             return func(*args, **kwargs)
43 |         except Exception:
44 |             log_error(args, kwargs)
45 |             raise
46 | 
47 |     @functools.wraps(func)
48 |     async def async_wrapped(*args: P.args, **kwargs: P.kwargs) -> Awaitable[Any]:
49 |         try:
50 |             return await func(*args, **kwargs)
51 |         except Exception:
52 |             log_error(args, kwargs)
53 |             raise
54 | 
55 |     if inspect.iscoroutinefunction(func):
56 |         return async_wrapped
57 | 
58 |     return sync_wrapped
59 | 
```

--------------------------------------------------------------------------------
/alembic/versions/2025_03_24_0945-ffe7a88d5b15_initial_migration.py:
--------------------------------------------------------------------------------

```python
 1 | """Initial migration
 2 | 
 3 | Revision ID: ffe7a88d5b15
 4 | Revises: 
 5 | Create Date: 2025-03-24 09:45:42.840081
 6 | 
 7 | """
 8 | from alembic import op
 9 | import sqlalchemy as sa
10 | from sqlalchemy.dialects import postgresql
11 | 
12 | # revision identifiers, used by Alembic.
13 | revision = 'ffe7a88d5b15'
14 | down_revision = None
15 | branch_labels = None
16 | depends_on = None
17 | 
18 | 
19 | def upgrade():
20 |     # ### commands auto generated by Alembic - please adjust! ###
21 |     op.create_table('server',
22 |     sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
23 |     sa.Column('url', sa.String(), nullable=False),
24 |     sa.Column('name', sa.String(), nullable=False),
25 |     sa.Column('description', sa.String(), nullable=False),
26 |     sa.Column('logo', sa.String(), nullable=False),
27 |     sa.PrimaryKeyConstraint('id'),
28 |     sa.UniqueConstraint('name'),
29 |     sa.UniqueConstraint('url')
30 |     )
31 |     op.create_table('tool',
32 |     sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
33 |     sa.Column('name', sa.String(), nullable=False),
34 |     sa.Column('description', sa.String(), nullable=False),
35 |     sa.Column('input_schema', postgresql.JSONB(astext_type=sa.Text()), nullable=False),
36 |     sa.Column('server_url', sa.String(), nullable=False),
37 |     sa.ForeignKeyConstraint(['server_url'], ['server.url'], ondelete='CASCADE'),
38 |     sa.PrimaryKeyConstraint('id'),
39 |     sa.UniqueConstraint('name', 'server_url', name='uq_tool_name_server_url')
40 |     )
41 |     # ### end Alembic commands ###
42 | 
43 | 
44 | def downgrade():
45 |     # ### commands auto generated by Alembic - please adjust! ###
46 |     op.drop_table('tool')
47 |     op.drop_table('server')
48 |     # ### end Alembic commands ###
49 | 
```

--------------------------------------------------------------------------------
/healthchecker/src/__main__.py:
--------------------------------------------------------------------------------

```python
 1 | import asyncio
 2 | import logging
 3 | 
 4 | from sqlalchemy.ext.asyncio import AsyncSession
 5 | 
 6 | from common.notifications.client import NotificationsClient
 7 | from healthchecker.src.checker import HealthChecker
 8 | from registry.src.database.session import get_unmanaged_session
 9 | from registry.src.servers.repository import ServerRepository
10 | from registry.src.settings import settings
11 | 
12 | logger: logging.Logger = logging.getLogger("Healthchecker")
13 | 
14 | 
15 | def setup_logging() -> None:
16 |     logging.basicConfig(level=logging.INFO)
17 |     logger.info("Starting healthchecker")
18 |     logger.info(
19 |         "Healthchecker will run every %d seconds", settings.healthcheck_running_interval
20 |     )
21 | 
22 | 
23 | async def perform_healthcheck_step() -> None:
24 |     logger.info("Running health-checks")
25 |     session: AsyncSession = await get_unmanaged_session()
26 |     repo: ServerRepository = ServerRepository(session)
27 |     notifications_client: NotificationsClient = NotificationsClient()
28 |     healthchecker: HealthChecker = HealthChecker(
29 |         session=session,
30 |         servers_repo=repo,
31 |         notifications_client=notifications_client,
32 |     )
33 | 
34 |     try:
35 |         await healthchecker.run_once()
36 |     except Exception as error:
37 |         logger.error("Error in healthchecker", exc_info=error)
38 | 
39 |     logger.info("Health-checks finished")
40 |     logger.info("Sleeping for %d seconds", settings.healthcheck_running_interval)
41 | 
42 |     await session.commit()
43 |     await session.close()
44 | 
45 | 
46 | async def main() -> None:
47 |     setup_logging()
48 | 
49 |     while True:
50 |         await perform_healthcheck_step()
51 |         await asyncio.sleep(settings.healthcheck_running_interval)
52 | 
53 | 
54 | if __name__ == "__main__":
55 |     asyncio.run(main())
56 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/base.py:
--------------------------------------------------------------------------------

```python
 1 | from pathlib import Path
 2 | 
 3 | from ..constants import BaseImage
 4 | from ..user_logger import UserLogger
 5 | 
 6 | 
 7 | class DockerfileGenerator:
 8 |     def __init__(
 9 |         self, base_image: BaseImage, repo_folder: str, user_logger: UserLogger
10 |     ):
11 |         self.base_image = base_image
12 |         self.lines: list[str] = []
13 |         self.repo_folder = repo_folder
14 |         self.user_logger = user_logger
15 | 
16 |     async def generate_dockerfile(self) -> str:
17 |         await self._image_setup()
18 |         await self._dependencies_installation()
19 |         await self._code_setup()
20 |         return self._save_dockerfile()
21 | 
22 |     async def _image_setup(self) -> None:
23 |         lines = [
24 |             f"FROM {self.base_image.value}",
25 |             "WORKDIR /workspace",
26 |         ]
27 |         self._add_to_file(lines)
28 | 
29 |     async def _dependencies_installation(self) -> None:
30 |         raise NotImplementedError
31 | 
32 |     async def _code_setup(self) -> None:
33 |         raise NotImplementedError
34 | 
35 |     def _add_to_file(self, lines: list[str]) -> None:
36 |         self.lines.extend(lines)
37 | 
38 |     @staticmethod
39 |     def _find_file(repo_folder: Path, file: str) -> Path | None:
40 |         files = repo_folder.rglob(file)
41 |         try:
42 |             return next(files)
43 |         except StopIteration:
44 |             return None
45 | 
46 |     def _save_dockerfile(self) -> str:
47 |         dockerfile_text = "\n".join(self.lines)
48 |         with open(f"{self.repo_folder}/Dockerfile", "w") as dockerfile:
49 |             dockerfile.write(dockerfile_text)
50 |         return dockerfile_text
51 | 
52 |     def _relative_path(self, path: Path) -> str:
53 |         str_path = str(path)
54 |         if str_path.startswith(self.repo_folder):
55 |             return "." + str(str_path[len(self.repo_folder) :])
56 |         return str_path
57 | 
```

--------------------------------------------------------------------------------
/registry/src/deployments/router.py:
--------------------------------------------------------------------------------

```python
 1 | from fastapi import APIRouter
 2 | from fastapi import Depends
 3 | from fastapi import status
 4 | 
 5 | from .schemas import DeploymentCreate
 6 | from .schemas import ServerLogsSchema
 7 | from .schemas import UserId
 8 | from .service import DeploymentService
 9 | from registry.src.database import Server
10 | from registry.src.database import ServerLogs
11 | from registry.src.servers.schemas import ServerRead
12 | 
13 | router = APIRouter(prefix="/deployments")
14 | 
15 | 
16 | @router.post("/", status_code=status.HTTP_201_CREATED, response_model=ServerRead)
17 | async def create_server(
18 |     data: DeploymentCreate,
19 |     service: DeploymentService = Depends(DeploymentService.get_new_instance),
20 | ) -> Server:
21 |     server = await service.create_server(data)
22 |     return server
23 | 
24 | 
25 | @router.get("/{server_id}/logs", response_model=ServerLogsSchema)
26 | async def get_logs(
27 |     server_id: int,
28 |     current_user_id: str,
29 |     service: DeploymentService = Depends(DeploymentService.get_new_instance),
30 | ) -> ServerLogs:
31 |     logs = await service.get_logs(server_id=server_id, current_user_id=current_user_id)
32 |     return logs
33 | 
34 | 
35 | @router.post("/{server_id}/stop", status_code=status.HTTP_202_ACCEPTED)
36 | async def stop_server(
37 |     server_id: int,
38 |     user_id: UserId,
39 |     service: DeploymentService = Depends(DeploymentService.get_new_instance),
40 | ) -> None:
41 |     await service.stop_server(
42 |         server_id=server_id, current_user_id=user_id.current_user_id
43 |     )
44 | 
45 | 
46 | @router.post("/{server_id}/start", status_code=status.HTTP_202_ACCEPTED)
47 | async def start_server(
48 |     server_id: int,
49 |     user_id: UserId,
50 |     service: DeploymentService = Depends(DeploymentService.get_new_instance),
51 | ) -> None:
52 |     await service.start_server(
53 |         server_id=server_id, current_user_id=user_id.current_user_id
54 |     )
55 | 
```

--------------------------------------------------------------------------------
/registry/src/database/models/server.py:
--------------------------------------------------------------------------------

```python
 1 | from sqlalchemy import String
 2 | from sqlalchemy.orm import Mapped
 3 | from sqlalchemy.orm import mapped_column
 4 | from sqlalchemy.orm import relationship
 5 | 
 6 | from ...database import models
 7 | from ...types import HostType
 8 | from ...types import ServerTransportProtocol
 9 | from .base import Base
10 | from .mixins import HasCreatedAt
11 | from .mixins import HasUpdatedAt
12 | from registry.src.types import ServerStatus
13 | 
14 | 
15 | class Server(HasCreatedAt, HasUpdatedAt, Base):
16 |     __tablename__ = "server"
17 | 
18 |     id: Mapped[int] = mapped_column(primary_key=True, autoincrement=True)
19 |     title: Mapped[str] = mapped_column(String(), nullable=False)
20 |     url: Mapped[str | None] = mapped_column(String(), nullable=True, unique=True)
21 |     name: Mapped[str] = mapped_column(String(), nullable=False, unique=True)
22 |     description: Mapped[str] = mapped_column(String(), nullable=False)
23 |     logo: Mapped[str | None] = mapped_column(String(), nullable=True)
24 |     host_type: Mapped[HostType] = mapped_column(String(), nullable=False)
25 |     creator_id: Mapped[str | None] = mapped_column(String(), nullable=True)
26 |     repo_url: Mapped[str | None] = mapped_column(String(), nullable=True, unique=True)
27 |     command: Mapped[str | None] = mapped_column(String(), nullable=True)
28 |     base_image: Mapped[str | None] = mapped_column(String(), nullable=True)
29 |     transport_protocol: Mapped[ServerTransportProtocol | None] = mapped_column(
30 |         String(),
31 |         nullable=True,
32 |     )
33 |     build_instructions: Mapped[str | None] = mapped_column(String(), nullable=True)
34 |     tools: Mapped[list["models.tool.Tool"]] = relationship(
35 |         back_populates="server", cascade="all, delete-orphan"
36 |     )
37 |     status: Mapped[ServerStatus] = mapped_column(
38 |         String(), nullable=False, default=ServerStatus.active
39 |     )
40 | 
```

--------------------------------------------------------------------------------
/common/notifications/client.py:
--------------------------------------------------------------------------------

```python
 1 | from httpx import AsyncClient
 2 | 
 3 | from common.notifications.enums import NotificationEvent
 4 | from common.notifications.exceptions import NotificationsServiceError
 5 | 
 6 | DEFAULT_NOTIFICATIONS_CLIENT_URI: str = "http://mailing_api:8000"
 7 | 
 8 | 
 9 | class NotificationsClient:
10 |     def __init__(self, base_url: str | None = None) -> None:
11 |         if base_url is None:
12 |             base_url = DEFAULT_NOTIFICATIONS_CLIENT_URI
13 | 
14 |         self._client: AsyncClient = AsyncClient(base_url=base_url)
15 | 
16 |     async def _notify(
17 |         self,
18 |         user_id: str,
19 |         template: NotificationEvent,
20 |         data: dict,
21 |     ) -> None:
22 |         response = await self._client.post(
23 |             f"/v1/emails/{template.value}",
24 |             json=dict(user_id=user_id, **data),
25 |         )
26 | 
27 |         if response.status_code != 200:
28 |             raise NotificationsServiceError(f"Failed to send email: {response.text}")
29 | 
30 |     async def send_server_down_alert(
31 |         self,
32 |         user_id: str,
33 |         server_name: str,
34 |         server_logs: str | None = None,
35 |     ):
36 |         await self._notify(
37 |             user_id,
38 |             NotificationEvent.server_healthcheck_failed,
39 |             data={
40 |                 "server_name": server_name,
41 |                 "server_logs": server_logs,
42 |             },
43 |         )
44 | 
45 |     async def notify_deploy_failed(self, user_id: str, server_name: str) -> None:
46 |         await self._notify(
47 |             user_id,
48 |             NotificationEvent.deploy_failed,
49 |             data={
50 |                 "server_name": server_name,
51 |             },
52 |         )
53 | 
54 |     async def notify_deploy_successful(self, user_id: str, server_name: str) -> None:
55 |         await self._notify(
56 |             user_id,
57 |             NotificationEvent.deploy_successful,
58 |             data={
59 |                 "server_name": server_name,
60 |             },
61 |         )
62 | 
```

--------------------------------------------------------------------------------
/registry/src/search/service.py:
--------------------------------------------------------------------------------

```python
 1 | from typing import Self
 2 | 
 3 | import instructor
 4 | from httpx import AsyncClient
 5 | from openai import AsyncOpenAI
 6 | 
 7 | from .prompts import get_top_servers
 8 | from .schemas import SearchResponse
 9 | from .schemas import SearchServer
10 | from registry.src.logger import logger
11 | from registry.src.settings import settings
12 | 
13 | 
14 | class SearchService:
15 |     def __init__(self) -> None:
16 |         http_client: AsyncClient = (
17 |             AsyncClient()
18 |             if settings.llm_proxy is None
19 |             else AsyncClient(proxy=settings.llm_proxy)
20 |         )
21 |         self.llm = instructor.from_openai(
22 |             AsyncOpenAI(http_client=http_client, base_url=settings.openai_base_url)
23 |         )
24 | 
25 |     async def _get_search_response(
26 |         self, servers: list[SearchServer], query: str
27 |     ) -> SearchResponse:
28 |         prompt = get_top_servers.format(servers_list=servers, request=query)
29 |         completion = await self.llm.chat.completions.create(
30 |             model=settings.llm_model,
31 |             messages=[
32 |                 {
33 |                     "role": "user",
34 |                     "content": prompt,
35 |                 },
36 |             ],
37 |             response_model=SearchResponse,
38 |         )
39 |         logger.debug(f"{completion=}")
40 |         return completion
41 | 
42 |     @staticmethod
43 |     def _collect_urls(search_response: SearchResponse) -> list[str]:
44 |         server_urls = set()
45 |         for solution_step in search_response.solution_steps:
46 |             server_urls.add(solution_step.best_server.url)
47 |             server_urls |= {server.url for server in solution_step.additional_servers}
48 |         return list(server_urls)
49 | 
50 |     async def get_fitting_servers(
51 |         self, servers: list[SearchServer], query: str
52 |     ) -> list[str]:
53 |         search_response = await self._get_search_response(servers=servers, query=query)
54 |         return self._collect_urls(search_response=search_response)
55 | 
56 |     @classmethod
57 |     async def get_new_instance(cls) -> Self:
58 |         return cls()
59 | 
```

--------------------------------------------------------------------------------
/worker/src/user_logger.py:
--------------------------------------------------------------------------------

```python
 1 | from sqlalchemy.ext.asyncio import AsyncSession
 2 | 
 3 | from .schemas import CommandResult
 4 | from .schemas import CommandStart
 5 | from .schemas import GeneralLogMessage
 6 | from .schemas import LoggingEvent
 7 | from .schemas import LogLevel
 8 | from .schemas import LogMessage
 9 | from registry.src.deployments.logs_repository import LogsRepository
10 | 
11 | 
12 | class UserLogger:
13 | 
14 |     def __init__(self, session: AsyncSession, server_id: int):
15 |         self.logs_repo = LogsRepository(session=session)
16 |         self.server_id = server_id
17 | 
18 |     async def clear_logs(self) -> None:
19 |         await self.logs_repo.clear_logs(server_id=self.server_id)
20 | 
21 |     async def command_start(self, command: str) -> None:
22 |         log = LogMessage(
23 |             event_type=LoggingEvent.command_start, data=CommandStart(command=command)
24 |         )
25 |         await self.logs_repo.append_to_deployment_logs(
26 |             server_id=self.server_id, log=log
27 |         )
28 | 
29 |     async def command_result(self, output: str, success: bool) -> None:
30 |         data = CommandResult(output=output, success=success)
31 |         log = LogMessage(event_type=LoggingEvent.command_result, data=data)
32 |         await self.logs_repo.append_to_deployment_logs(
33 |             server_id=self.server_id, log=log
34 |         )
35 | 
36 |     async def info(self, message: str) -> None:
37 |         await self._add_message(log_level=LogLevel.info, message=message)
38 | 
39 |     async def warning(self, message: str) -> None:
40 |         await self._add_message(log_level=LogLevel.warning, message=message)
41 | 
42 |     async def error(self, message: str) -> None:
43 |         await self._add_message(log_level=LogLevel.error, message=message)
44 | 
45 |     async def _add_message(self, log_level: LogLevel, message: str):
46 |         data = GeneralLogMessage(log_level=log_level, message=message)
47 |         log = LogMessage(event_type=LoggingEvent.general_message, data=data)
48 |         await self.logs_repo.append_to_deployment_logs(
49 |             server_id=self.server_id, log=log
50 |         )
51 | 
```

--------------------------------------------------------------------------------
/registry/src/deployments/logs_repository.py:
--------------------------------------------------------------------------------

```python
 1 | from typing import Self
 2 | 
 3 | from fastapi import Depends
 4 | from sqlalchemy import select
 5 | from sqlalchemy import update
 6 | from sqlalchemy.ext.asyncio import AsyncSession
 7 | 
 8 | from registry.src.database import get_session
 9 | from registry.src.database import ServerLogs
10 | from worker.src.schemas import LogMessage
11 | 
12 | 
13 | class LogsRepository:
14 |     def __init__(self, session: AsyncSession) -> None:
15 |         self.session = session
16 | 
17 |     async def create_logs(self, server_id: int) -> ServerLogs:
18 |         server_logs = ServerLogs(server_id=server_id)
19 |         self.session.add(server_logs)
20 |         await self.session.flush()
21 |         await self.session.refresh(server_logs)
22 |         return server_logs
23 | 
24 |     async def get_server_logs(self, server_id: int) -> ServerLogs | None:
25 |         query = select(ServerLogs).where(ServerLogs.server_id == server_id)
26 |         logs = (await self.session.execute(query)).scalar_one_or_none()
27 |         return logs
28 | 
29 |     async def update_server_logs(
30 |         self, server_id: int, deployment_logs: list[dict]
31 |     ) -> None:
32 |         query = (
33 |             update(ServerLogs)
34 |             .where(ServerLogs.server_id == server_id)
35 |             .values(deployment_logs=deployment_logs)
36 |         )
37 |         await self.session.execute(query)
38 |         await self.session.flush()
39 |         await self.session.commit()
40 | 
41 |     async def append_to_deployment_logs(self, server_id: int, log: LogMessage) -> None:
42 |         query = select(ServerLogs).where(ServerLogs.server_id == server_id)
43 |         logs: ServerLogs = (await self.session.execute(query)).scalar_one_or_none()
44 |         if not logs:
45 |             raise ValueError
46 |         deployment_logs = logs.deployment_logs + [log.model_dump()]
47 |         await self.update_server_logs(
48 |             server_id=server_id, deployment_logs=deployment_logs
49 |         )
50 | 
51 |     async def clear_logs(self, server_id: int) -> None:
52 |         await self.update_server_logs(server_id=server_id, deployment_logs=[])
53 | 
54 |     @classmethod
55 |     async def get_new_instance(
56 |         cls, session: AsyncSession = Depends(get_session)
57 |     ) -> Self:
58 |         return cls(session=session)
59 | 
```

--------------------------------------------------------------------------------
/alembic/env.py:
--------------------------------------------------------------------------------

```python
 1 | from logging.config import fileConfig
 2 | 
 3 | from sqlalchemy import engine_from_config
 4 | from sqlalchemy import pool
 5 | 
 6 | from alembic import context
 7 | from registry.src.database import Base
 8 | from registry.src.settings import settings
 9 | 
10 | # this is the Alembic Config object, which provides
11 | # access to the values within the .ini file in use.
12 | config = context.config
13 | 
14 | 
15 | # Interpret the config file for Python logging.
16 | # This line sets up loggers basically.
17 | if config.config_file_name is not None:
18 |     fileConfig(config.config_file_name)
19 | 
20 | # add your model's MetaData object here
21 | # for 'autogenerate' support
22 | # from myapp import mymodel
23 | # target_metadata = mymodel.Base.metadata
24 | 
25 | target_metadata = Base.metadata
26 | 
27 | # other values from the config, defined by the needs of env.py,
28 | # can be acquired:
29 | # my_important_option = config.get_main_option("my_important_option")
30 | # ... etc.
31 | 
32 | 
33 | config.set_main_option("sqlalchemy.url", settings.database_url_sync)
34 | 
35 | 
36 | def run_migrations_offline() -> None:
37 |     """Run migrations in 'offline' mode.
38 | 
39 |     This configures the context with just a URL
40 |     and not an Engine, though an Engine is acceptable
41 |     here as well.  By skipping the Engine creation
42 |     we don't even need a DBAPI to be available.
43 | 
44 |     Calls to context.execute() here emit the given string to the
45 |     script output.
46 | 
47 |     """
48 |     url = config.get_main_option("sqlalchemy.url")
49 |     context.configure(
50 |         url=url,
51 |         target_metadata=target_metadata,
52 |         literal_binds=True,
53 |         dialect_opts={"paramstyle": "named"},
54 |     )
55 | 
56 |     with context.begin_transaction():
57 |         context.run_migrations()
58 | 
59 | 
60 | def run_migrations_online() -> None:
61 |     """Run migrations in 'online' mode.
62 | 
63 |     In this scenario we need to create an Engine
64 |     and associate a connection with the context.
65 | 
66 |     """
67 |     connectable = engine_from_config(
68 |         config.get_section(config.config_ini_section, {}),
69 |         prefix="sqlalchemy.",
70 |         poolclass=pool.NullPool,
71 |     )
72 | 
73 |     with connectable.connect() as connection:
74 |         context.configure(connection=connection, target_metadata=target_metadata)
75 | 
76 |         with context.begin_transaction():
77 |             context.run_migrations()
78 | 
79 | 
80 | if context.is_offline_mode():
81 |     run_migrations_offline()
82 | else:
83 |     run_migrations_online()
84 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/llm/tool_manager.py:
--------------------------------------------------------------------------------

```python
 1 | import json
 2 | 
 3 | from mcp import ClientSession
 4 | from mcp.client.sse import sse_client
 5 | from mcp.types import CallToolResult
 6 | from openai.types.chat import ChatCompletionMessageToolCall
 7 | from openai.types.chat import ChatCompletionToolMessageParam
 8 | from openai.types.chat import ChatCompletionToolParam
 9 | 
10 | from mcp_server.src.schemas import RegistryServer
11 | from mcp_server.src.schemas import ToolInfo
12 | 
13 | 
14 | class ToolManager:
15 |     def __init__(self, servers: list[RegistryServer]) -> None:
16 |         self.renamed_tools: dict[str, ToolInfo] = {}
17 |         self.tools: list[ChatCompletionToolParam] = self.set_tools(servers)
18 | 
19 |     def rename_and_save(self, tool_name: str, server: RegistryServer) -> str:
20 |         renamed_tool = f"{tool_name}_mcp_{server.name}"
21 |         self.renamed_tools[renamed_tool] = ToolInfo(tool_name=tool_name, server=server)
22 |         return renamed_tool
23 | 
24 |     def set_tools(self, servers: list[RegistryServer]) -> list[ChatCompletionToolParam]:
25 |         return [
26 |             ChatCompletionToolParam(
27 |                 type="function",
28 |                 function={
29 |                     "name": self.rename_and_save(tool.name, server=server),
30 |                     "description": tool.description,
31 |                     "parameters": tool.input_schema,
32 |                 },
33 |             )
34 |             for server in servers
35 |             for tool in server.tools
36 |         ]
37 | 
38 |     @staticmethod
39 |     async def _request_mcp(
40 |         arguments: str,
41 |         server_url: str,
42 |         tool_name: str,
43 |     ) -> CallToolResult:
44 |         async with sse_client(server_url) as (read, write):
45 |             async with ClientSession(read, write) as session:
46 |                 await session.initialize()
47 | 
48 |                 return await session.call_tool(
49 |                     tool_name,
50 |                     arguments=(json.loads(arguments) if arguments else None),
51 |                 )
52 | 
53 |     async def handle_tool_call(
54 |         self, tool_call: ChatCompletionMessageToolCall
55 |     ) -> ChatCompletionToolMessageParam:
56 |         tool_info = self.renamed_tools.get(tool_call.function.name)
57 |         if not tool_info:
58 |             return ChatCompletionToolMessageParam(
59 |                 role="tool",
60 |                 content="Error: Incorrect tool name",
61 |                 tool_call_id=tool_call.id,
62 |             )
63 |         tool_call_result = await self._request_mcp(
64 |             arguments=tool_call.function.arguments,
65 |             server_url=tool_info.server.url,
66 |             tool_name=tool_info.tool_name,
67 |         )
68 |         return ChatCompletionToolMessageParam(
69 |             role="tool",
70 |             content=tool_call_result.content[0].text,
71 |             tool_call_id=tool_call.id,
72 |         )
73 | 
```

--------------------------------------------------------------------------------
/registry/src/servers/schemas.py:
--------------------------------------------------------------------------------

```python
  1 | from datetime import datetime
  2 | from typing import Annotated
  3 | from typing import Any
  4 | 
  5 | from pydantic import ConfigDict
  6 | from pydantic import Field
  7 | from pydantic import field_serializer
  8 | 
  9 | from registry.src.base_schema import BaseSchema
 10 | from registry.src.types import HostType
 11 | from registry.src.types import ServerStatus
 12 | from registry.src.types import ServerTransportProtocol
 13 | from worker.src.constants import BaseImage
 14 | 
 15 | 
 16 | tool_name_regex = "^[a-zA-Z0-9_-]{1,64}$"
 17 | server_name_regex_pattern = "^[a-z0-9]+(_[a-z0-9]+)*$"
 18 | 
 19 | 
 20 | class Tool(BaseSchema):
 21 |     model_config = ConfigDict(populate_by_name=True, **BaseSchema.model_config)
 22 |     name: str = Field(pattern=tool_name_regex)
 23 |     alias: str = Field(pattern=tool_name_regex)
 24 |     description: str
 25 |     input_schema: dict[str, Any] = Field(validation_alias="inputSchema")
 26 | 
 27 | 
 28 | class ServerCreateData(BaseSchema):
 29 |     name: Annotated[str, Field(pattern=server_name_regex_pattern, max_length=30)]
 30 |     title: str = Field(max_length=30)
 31 |     description: str
 32 |     url: str
 33 |     logo: str | None = None
 34 |     host_type: HostType = HostType.external
 35 | 
 36 | 
 37 | class ServerCreate(BaseSchema):
 38 |     data: ServerCreateData
 39 |     current_user_id: str | None = None
 40 | 
 41 | 
 42 | class ServerRead(BaseSchema):
 43 |     title: str
 44 |     id: int
 45 |     status: ServerStatus
 46 |     name: str
 47 |     creator_id: str | None
 48 |     description: str
 49 |     repo_url: str | None
 50 |     url: str | None
 51 |     command: str | None
 52 |     logo: str | None
 53 |     host_type: HostType
 54 |     base_image: BaseImage | None = None
 55 |     build_instructions: str | None = None
 56 |     transport_protocol: ServerTransportProtocol | None
 57 |     created_at: datetime
 58 |     updated_at: datetime
 59 | 
 60 |     @field_serializer("created_at", "updated_at")
 61 |     def serialize_datetime(self, value: datetime, _info) -> str:
 62 |         return value.isoformat()
 63 | 
 64 | 
 65 | class ServerWithTools(ServerRead):
 66 |     tools: list[Tool]
 67 | 
 68 | 
 69 | class ServerUpdateData(BaseSchema):
 70 |     title: str | None = Field(max_length=30, default=None)
 71 |     name: Annotated[
 72 |         str | None, Field(pattern=server_name_regex_pattern, max_length=30)
 73 |     ] = None
 74 |     description: str | None = None
 75 |     url: str | None = None
 76 |     logo: str | None = None
 77 | 
 78 | 
 79 | class DeploymentUpdateData(BaseSchema):
 80 |     command: str | None = None
 81 |     repo_url: str | None = None
 82 |     base_image: BaseImage | None = None
 83 |     build_instructions: str | None = None
 84 | 
 85 | 
 86 | class ServerUpdate(BaseSchema):
 87 |     data: ServerUpdateData
 88 |     deployment_data: DeploymentUpdateData
 89 |     current_user_id: str | None = None
 90 | 
 91 | 
 92 | class MCP(BaseSchema):
 93 |     name: str
 94 |     description: str
 95 |     endpoint: str = Field(validation_alias="url")
 96 |     logo: str | None = None
 97 | 
 98 | 
 99 | class MCPJson(BaseSchema):
100 |     version: str = "1.0"
101 |     servers: list[MCP]
102 | 
```

--------------------------------------------------------------------------------
/scripts/darp-search.py:
--------------------------------------------------------------------------------

```python
 1 | #!/usr/bin/env python3
 2 | """
 3 | List matching MCP servers from the registry.
 4 | """
 5 | import argparse
 6 | import json
 7 | import sys
 8 | from typing import Any
 9 | from typing import Dict
10 | from typing import List
11 | 
12 | import requests
13 | 
14 | default_registry_url: str = "http://localhost:80"
15 | 
16 | 
17 | def main() -> None:
18 |     parser = argparse.ArgumentParser(description="Search for servers in the registry")
19 |     parser.add_argument("query", help="Search query string")
20 |     parser.add_argument(
21 |         "--registry-url",
22 |         default=default_registry_url,
23 |         help=f"Registry URL (default: {default_registry_url})",
24 |     )
25 |     parser.add_argument(
26 |         "--format",
27 |         choices=["names", "fulltext", "json"],
28 |         default="names",
29 |         help="Output format (default: names)",
30 |     )
31 | 
32 |     args = parser.parse_args()
33 | 
34 |     servers = search_servers(args.query, args.registry_url)
35 |     display_servers(servers, args.format)
36 | 
37 | 
38 | def search_servers(
39 |     query: str, base_url: str = default_registry_url
40 | ) -> List[Dict[str, Any]]:
41 |     url = f"{base_url}/servers/search"
42 |     params = {"query": query}
43 | 
44 |     try:
45 |         response = requests.get(url, params=params)
46 |         response.raise_for_status()
47 |         return response.json()
48 |     except requests.RequestException as error:
49 |         print(f"Error: Failed to search servers - {error}", file=sys.stderr)
50 |         sys.exit(1)
51 | 
52 | 
53 | def display_servers(
54 |     servers: List[Dict[str, Any]], format_output: str = "names"
55 | ) -> None:
56 |     if format_output == "json":
57 |         display_servers_json(servers)
58 |     elif format_output == "names":
59 |         display_servers_names(servers)
60 |     elif format_output == "fulltext":
61 |         display_servers_fulltext(servers)
62 | 
63 | 
64 | def display_servers_json(servers: List[Dict[str, Any]]) -> None:
65 |     print(json.dumps(servers, indent=2))
66 | 
67 | 
68 | def display_servers_names(servers: List[Dict[str, Any]]) -> None:
69 |     print(f"Found {len(servers)} servers:")
70 |     for server in servers:
71 |         print(f"{server['name']}")
72 | 
73 | 
74 | def display_servers_fulltext(servers: List[Dict[str, Any]]) -> None:
75 |     print(f"Found {len(servers)} servers:")
76 |     for server in servers:
77 |         print(f"{server['id']}: {server['name']}")
78 |         print(f"{indent(server['url'], 1)}")
79 |         print(f"{indent(server['description'], 1)}")
80 |         if server["tools"]:
81 |             print(indent("Tools:", 1))
82 |         for tool in server["tools"]:
83 |             print(f"{indent(tool['name'], 2)}")
84 |             print(f"{indent(tool['description'], 2)}")
85 |             print(f"{indent(json.dumps(tool['input_schema'], indent=2), 2)}")
86 | 
87 | 
88 | def indent(text: str, level: int = 1, prefix: str = "  ") -> str:
89 |     return "\n".join(prefix * level + line for line in text.split("\n"))
90 | 
91 | 
92 | if __name__ == "__main__":
93 |     main()
94 | 
```

--------------------------------------------------------------------------------
/mcp_server/src/tools.py:
--------------------------------------------------------------------------------

```python
 1 | import logging
 2 | 
 3 | from openai.types.chat import ChatCompletionMessage
 4 | from openai.types.chat import ChatCompletionMessageParam
 5 | from openai.types.chat import ChatCompletionToolMessageParam
 6 | from openai.types.chat import ChatCompletionUserMessageParam
 7 | 
 8 | from common.llm.client import LLMClient
 9 | from mcp_server.src.decorators import log_errors
10 | from mcp_server.src.llm.prompts import default_prompt
11 | from mcp_server.src.llm.tool_manager import ToolManager
12 | from mcp_server.src.registry_client import RegistryClient
13 | from mcp_server.src.settings import settings
14 | 
15 | 
16 | class Tools:
17 |     def __init__(self):
18 |         self.registry_client = RegistryClient()
19 |         logger = logging.getLogger("LLMClient")
20 |         self.llm_client = LLMClient(
21 |             logger, proxy=settings.llm_proxy, base_url=settings.openai_base_url
22 |         )
23 | 
24 |     @log_errors
25 |     async def search(self, request: str) -> list[str]:
26 |         servers = await self.registry_client.search(request=request)
27 |         return [server.url for server in servers]
28 | 
29 |     @log_errors
30 |     async def routing(
31 |         self, request: str
32 |     ) -> list[ChatCompletionMessage | ChatCompletionToolMessageParam]:
33 |         servers = await self.registry_client.search(request=request)
34 |         manager = ToolManager(servers)
35 |         user_message = ChatCompletionUserMessageParam(content=request, role="user")
36 |         resulting_conversation = await self._llm_request(
37 |             messages=[user_message], manager=manager
38 |         )
39 |         return resulting_conversation
40 | 
41 |     async def _llm_request(
42 |         self,
43 |         messages: list[ChatCompletionMessageParam],
44 |         manager: ToolManager,
45 |         response_accumulator: (
46 |             list[ChatCompletionMessage | ChatCompletionToolMessageParam] | None
47 |         ) = None,
48 |     ) -> list[ChatCompletionMessage | ChatCompletionToolMessageParam]:
49 |         if response_accumulator is None:
50 |             response_accumulator = []
51 |         completion = await self.llm_client.request(
52 |             system_prompt=default_prompt,
53 |             model=settings.llm_model,
54 |             messages=messages,
55 |             tools=manager.tools,
56 |         )
57 |         choice = completion.choices[0]
58 |         response_accumulator.append(choice.message)
59 |         if choice.message.tool_calls:
60 |             tool_calls = choice.message.tool_calls
61 |             tool_result_messages = [
62 |                 await manager.handle_tool_call(tool_call) for tool_call in tool_calls
63 |             ]
64 |             response_accumulator += tool_result_messages
65 |             conversation = messages + [choice.message] + tool_result_messages
66 |             response_accumulator = await self._llm_request(
67 |                 messages=conversation,
68 |                 manager=manager,
69 |                 response_accumulator=response_accumulator,
70 |             )
71 |         return response_accumulator
72 | 
```

--------------------------------------------------------------------------------
/registry/src/validator/service.py:
--------------------------------------------------------------------------------

```python
 1 | import logging
 2 | from typing import Self
 3 | 
 4 | from fastapi import Depends
 5 | from openai.types.chat import ChatCompletionSystemMessageParam
 6 | from openai.types.chat import ChatCompletionUserMessageParam
 7 | 
 8 | from .prompts import SENS_TOPIC_CHECK_SYSTEM_PROMPT
 9 | from .prompts import SENS_TOPIC_CHECK_USER_PROMPT
10 | from common.llm.client import LLMClient
11 | from registry.src.servers.repository import ServerRepository
12 | from registry.src.servers.schemas import ServerCreate
13 | from registry.src.servers.schemas import Tool
14 | from registry.src.settings import settings
15 | from registry.src.validator.schemas import ServerNeedsSensitiveDataResponse
16 | from registry.src.validator.schemas import Tools
17 | from registry.src.validator.schemas import ValidationResult
18 | from registry.src.validator.ssl import SSLHelper
19 | 
20 | 
21 | class ValidationService:
22 |     def __init__(
23 |         self,
24 |         servers_repo: ServerRepository,
25 |         ssl_helper: SSLHelper,
26 |     ) -> None:
27 |         self._servers_repo = servers_repo
28 |         self._ssl_helper = ssl_helper
29 |         logger = logging.getLogger("LLMClient")
30 |         self._llm_client = LLMClient(
31 |             logger, proxy=settings.llm_proxy, base_url=settings.openai_base_url
32 |         )
33 | 
34 |     async def _mcp_needs_sensitive_data(
35 |         self,
36 |         data: ServerCreate,
37 |         tools: list[Tool],
38 |     ) -> bool:
39 |         system_message = ChatCompletionSystemMessageParam(
40 |             content=SENS_TOPIC_CHECK_SYSTEM_PROMPT.render(), role="system"
41 |         )
42 | 
43 |         message = ChatCompletionUserMessageParam(
44 |             content=SENS_TOPIC_CHECK_USER_PROMPT.render(
45 |                 server_data=data,
46 |                 tools_data=Tools.model_validate(tools),
47 |             ),
48 |             role="user",
49 |         )
50 | 
51 |         response = await self._llm_client.request_to_beta(
52 |             model=settings.llm_model,
53 |             messages=[system_message, message],
54 |             response_format=ServerNeedsSensitiveDataResponse,
55 |         )
56 | 
57 |         answer = ServerNeedsSensitiveDataResponse.model_validate_json(
58 |             response.choices[0].message.content,
59 |         )
60 | 
61 |         return answer.server_needs_sensitive_data
62 | 
63 |     async def validate_server(
64 |         self, data: ServerCreate, tools: list[Tool]
65 |     ) -> ValidationResult:
66 |         result = ValidationResult()
67 | 
68 |         result.server_requires_sensitive_data = await self._mcp_needs_sensitive_data(
69 |             data,
70 |             tools,
71 |         )
72 |         result.authority_level = await self._ssl_helper.get_ssl_authority_level(
73 |             data.url,
74 |         )
75 | 
76 |         return result
77 | 
78 |     @classmethod
79 |     def get_new_instance(
80 |         cls,
81 |         servers_repo: ServerRepository = Depends(ServerRepository.get_new_instance),
82 |         ssl_helper: SSLHelper = Depends(SSLHelper.get_new_instance),
83 |     ) -> Self:
84 |         return cls(
85 |             servers_repo=servers_repo,
86 |             ssl_helper=ssl_helper,
87 |         )
88 | 
```

--------------------------------------------------------------------------------
/worker/src/consumer.py:
--------------------------------------------------------------------------------

```python
 1 | from datetime import timedelta
 2 | from typing import Any
 3 | 
 4 | from aiokafka import AIOKafkaConsumer
 5 | 
 6 | from .deployment_service import WorkerDeploymentService
 7 | from registry.src.database import get_unmanaged_session
 8 | from registry.src.logger import logger
 9 | from registry.src.servers.repository import ServerRepository
10 | from registry.src.settings import settings
11 | from worker.src.schemas import Task
12 | from worker.src.schemas import TaskType
13 | 
14 | 
15 | class Consumer:
16 |     def __init__(self) -> None:
17 |         self.max_poll_interval: int = int(timedelta(minutes=30).total_seconds())
18 |         self.session_timeout: int = int(timedelta(minutes=10).total_seconds())
19 |         self.task_handlers: dict[TaskType, Any] = {
20 |             TaskType.deploy: self.deploy_server,
21 |             TaskType.start: self.start_server,
22 |             TaskType.stop: self.stop_server,
23 |         }
24 | 
25 |     async def start(self) -> None:
26 |         consumer = AIOKafkaConsumer(
27 |             settings.worker_topic,
28 |             bootstrap_servers=settings.broker_url,
29 |             auto_offset_reset="earliest",
30 |             group_id="deploy_workers",
31 |             group_instance_id="1",
32 |             max_poll_interval_ms=self.max_poll_interval * 1000,
33 |             session_timeout_ms=self.session_timeout * 1000,
34 |         )
35 |         await consumer.start()
36 |         try:
37 |             await self.process_messages(consumer)
38 |         finally:
39 |             await consumer.stop()
40 | 
41 |     async def process_messages(self, consumer: AIOKafkaConsumer) -> None:
42 |         async for message in consumer:
43 |             incoming_message = self.parse_message(message.value)
44 |             if not incoming_message:
45 |                 continue
46 |             await self.process_message(incoming_message)
47 | 
48 |     async def process_message(self, task: Task) -> None:
49 |         session = await get_unmanaged_session()
50 |         try:
51 |             repo = ServerRepository(session)
52 |             await self.task_handlers[task.task_type](task.server_id, repo)
53 |         except Exception as error:
54 |             logger.error("Error while processing a task", exc_info=error)
55 |         await session.commit()
56 | 
57 |     async def deploy_server(self, server_id: int, repo: ServerRepository) -> None:
58 |         service = WorkerDeploymentService(repo=repo, logger=logger)
59 |         await service.deploy_server(server_id=server_id)
60 | 
61 |     async def stop_server(self, server_id: int, repo: ServerRepository) -> None:
62 |         raise NotImplementedError()
63 | 
64 |     async def start_server(self, server_id: int, repo: ServerRepository) -> None:
65 |         raise NotImplementedError()
66 | 
67 |     @staticmethod
68 |     def parse_message(message: bytes) -> Task | None:
69 |         logger.debug(f"Incoming message - {message.decode()}")
70 |         try:
71 |             task = Task.model_validate_json(message.decode())
72 |             return task
73 |         except Exception as error:
74 |             logger.error("Incorrect message received", exc_info=error)
75 |             return None
76 | 
```

--------------------------------------------------------------------------------
/registry/src/validator/ssl.py:
--------------------------------------------------------------------------------

```python
  1 | import asyncio
  2 | import logging
  3 | import ssl
  4 | from typing import Self
  5 | from urllib.parse import urlparse
  6 | 
  7 | from cryptography import x509
  8 | from cryptography.hazmat._oid import ExtensionOID
  9 | from cryptography.hazmat.backends import default_backend
 10 | 
 11 | from .constants import EV_OIDS
 12 | from .constants import HTTP_PORT
 13 | from .constants import HTTPS_PORT
 14 | from .constants import SELF_SIGNED_CERTIFICATE_ERR_CODE
 15 | from .schemas import SSLAuthorityLevel
 16 | 
 17 | 
 18 | class SSLHelper:
 19 |     def __init__(self) -> None:
 20 |         self._logger = logging.getLogger(f"{__name__}.{self.__class__.__name__}")
 21 | 
 22 |     async def get_ssl_authority_level(self, url: str) -> SSLAuthorityLevel:
 23 |         try:
 24 |             cert = await self._load_certificate(url)
 25 |         except ssl.SSLCertVerificationError as exc:
 26 |             if exc.verify_code == SELF_SIGNED_CERTIFICATE_ERR_CODE:
 27 |                 return SSLAuthorityLevel.SELF_SIGNED_CERTIFICATE
 28 |             return SSLAuthorityLevel.INVALID_CERTIFICATE
 29 |         except Exception as exc:
 30 |             self._logger.error("Failed to fetch ssl cert", exc_info=exc)
 31 |             return SSLAuthorityLevel.NO_CERTIFICATE
 32 | 
 33 |         if self._is_cert_respects_ev(cert):
 34 |             return SSLAuthorityLevel.EXTENDED_CERTIFICATE
 35 | 
 36 |         return SSLAuthorityLevel.CERTIFICATE_OK
 37 | 
 38 |     @classmethod
 39 |     def get_new_instance(cls) -> Self:
 40 |         return cls()
 41 | 
 42 |     @staticmethod
 43 |     def _extract_port(uri: str) -> int:
 44 |         parsed = urlparse(uri)
 45 | 
 46 |         if parsed.port:
 47 |             return parsed.port
 48 | 
 49 |         if parsed.scheme == "https":
 50 |             return HTTPS_PORT
 51 |         elif parsed.scheme == "http":
 52 |             return HTTP_PORT
 53 | 
 54 |         raise ValueError("Invalid URI", uri)
 55 | 
 56 |     @staticmethod
 57 |     def _extract_host(uri: str) -> str:
 58 |         parsed = urlparse(uri)
 59 | 
 60 |         if parsed.hostname is None:
 61 |             raise ValueError("Invalid URL: No hostname found")
 62 | 
 63 |         return parsed.hostname
 64 | 
 65 |     @classmethod
 66 |     async def _load_certificate(cls, url: str) -> x509.Certificate:
 67 |         hostname = cls._extract_host(url)
 68 |         port = cls._extract_port(url)
 69 | 
 70 |         ssl_context = ssl.create_default_context()
 71 |         ssl_context.check_hostname = True
 72 |         ssl_context.verify_mode = ssl.CERT_REQUIRED
 73 | 
 74 |         reader, writer = await asyncio.open_connection(
 75 |             hostname, port, ssl=ssl_context, server_hostname=hostname
 76 |         )
 77 | 
 78 |         ssl_object = writer.get_extra_info("ssl_object")
 79 |         der_cert = ssl_object.getpeercert(binary_form=True)
 80 |         writer.close()
 81 |         await writer.wait_closed()
 82 | 
 83 |         cert_obj = x509.load_der_x509_certificate(der_cert, default_backend())
 84 |         return cert_obj
 85 | 
 86 |     @staticmethod
 87 |     def _is_cert_respects_ev(cert: x509.Certificate) -> bool:
 88 |         try:
 89 |             policies = cert.extensions.get_extension_for_oid(
 90 |                 ExtensionOID.CERTIFICATE_POLICIES
 91 |             ).value
 92 |         except x509.ExtensionNotFound:
 93 |             return False
 94 | 
 95 |         for policy in policies:
 96 |             if policy.policy_identifier.dotted_string in EV_OIDS:
 97 |                 return True
 98 | 
 99 |         return False
100 | 
```

--------------------------------------------------------------------------------
/worker/src/dockerfile_generators/python/generic.py:
--------------------------------------------------------------------------------

```python
 1 | from pathlib import Path
 2 | from typing import override
 3 | 
 4 | from ...errors import DependenciesError
 5 | from ..base import DockerfileGenerator
 6 | 
 7 | 
 8 | class PythonGenerator(DockerfileGenerator):
 9 |     @override
10 |     async def _dependencies_installation(self) -> None:
11 |         repo_path = Path(self.repo_folder)
12 |         await self._install_node()
13 |         uv_file = self._find_file(repo_path, "uv.lock")
14 |         pyproject_file = self._find_file(repo_path, "pyproject.toml")
15 |         readme_file = self._find_file(repo_path, "README.md")
16 |         if pyproject_file and readme_file:
17 |             await self.user_logger.info(
18 |                 message="pyproject.toml found. Installing dependencies with pip"
19 |             )
20 |             self._pip_dependencies(pyproject=pyproject_file, readme_file=readme_file)
21 |             return
22 |         requirements_file = self._find_file(repo_path, "requirements.txt")
23 |         if requirements_file:
24 |             await self.user_logger.info(
25 |                 message="requirements.txt found. Installing dependencies with legacy pip method"
26 |             )
27 |             self._legacy_pip_dependencies(requirements_file)
28 |             return
29 |         if uv_file and pyproject_file and readme_file:
30 |             await self.user_logger.info(
31 |                 message="uv.lock found. Installing dependencies with uv"
32 |             )
33 |             self._uv_dependencies(
34 |                 uv_file=uv_file, pyproject=pyproject_file, readme_file=readme_file
35 |             )
36 |             return
37 |         raise DependenciesError("No supported dependencies installation method found")
38 | 
39 |     @override
40 |     async def _code_setup(self) -> None:
41 |         lines = [
42 |             "COPY ./ ./",
43 |         ]
44 |         self._add_to_file(lines)
45 | 
46 |     async def _install_node(self) -> None:
47 |         await self.user_logger.info(message="Adding nodejs for supergateway")
48 |         lines = [
49 |             "RUN apt update && apt install -y nodejs npm",
50 |         ]
51 |         self._add_to_file(lines)
52 | 
53 |     def _legacy_pip_dependencies(self, requirements: Path) -> None:
54 |         lines = [
55 |             f"COPY {self._relative_path(requirements.absolute())} ./requirements.txt",
56 |             "RUN pip install -r requirements.txt",
57 |         ]
58 |         self._add_to_file(lines)
59 | 
60 |     def _pip_dependencies(self, pyproject: Path, readme_file: Path) -> None:
61 |         lines = [
62 |             f"COPY {self._relative_path(pyproject.absolute())} ./pyproject.toml",
63 |             f"COPY {self._relative_path(readme_file.absolute())} ./README.md",
64 |             "RUN pip install .",
65 |         ]
66 |         self._add_to_file(lines)
67 | 
68 |     def _uv_dependencies(
69 |         self, uv_file: Path, pyproject: Path, readme_file: Path
70 |     ) -> None:
71 |         lines = [
72 |             "COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /usr/local/bin/",
73 |             f"COPY {self._relative_path(uv_file.absolute())} ./uv.lock",
74 |             f"COPY {self._relative_path(pyproject.absolute())} ./pyproject.toml",
75 |             f"COPY {self._relative_path(readme_file.absolute())} ./README.md",
76 |             "RUN uv pip install . --system",
77 |         ]
78 |         self._add_to_file(lines)
79 | 
```

--------------------------------------------------------------------------------
/healthchecker/src/checker.py:
--------------------------------------------------------------------------------

```python
 1 | import logging
 2 | from collections.abc import Sequence
 3 | 
 4 | from sqlalchemy import Select
 5 | from sqlalchemy.ext.asyncio import AsyncSession
 6 | 
 7 | from common.notifications.client import NotificationsClient
 8 | from registry.src.database.models.server import Server
 9 | from registry.src.servers.repository import ServerRepository
10 | from registry.src.servers.schemas import Tool
11 | from registry.src.servers.service import ServerService
12 | from registry.src.types import ServerStatus
13 | 
14 | 
15 | class HealthChecker:
16 |     def __init__(
17 |         self,
18 |         session: AsyncSession,
19 |         servers_repo: ServerRepository,
20 |         notifications_client: NotificationsClient,
21 |     ) -> None:
22 |         self.session = session
23 |         self.repo = servers_repo
24 |         self.notifications_client = notifications_client
25 |         self.logger = logging.getLogger("Healthchecker")
26 | 
27 |     async def run_once(self) -> None:
28 |         servers = await self._fetch_servers()
29 | 
30 |         for server in servers:
31 |             await self._check_server(server)
32 | 
33 |     async def _fetch_servers(self) -> Sequence[Server]:
34 |         query: Select = await self.repo.get_all_servers()
35 |         servers: Sequence[Server] = (
36 |             (await self.session.execute(query)).scalars().fetchall()
37 |         )
38 |         return servers
39 | 
40 |     async def _get_tools(self, server: Server) -> list[Tool]:
41 |         return await ServerService.get_tools(server.url, server.name)
42 | 
43 |     async def _check_server(self, server: Server) -> None:
44 |         self.logger.info("Checking server %s", server.name)
45 | 
46 |         try:
47 |             tools = await self._get_tools(server)
48 |         except Exception as error:
49 |             await self._handle_failed_server(server, error)
50 |             return
51 | 
52 |         if not tools:
53 |             await self._handle_empty_tools(server)
54 |             return
55 | 
56 |         await self._mark_server_healthy(server, tools)
57 | 
58 |     async def _handle_failed_server(self, server: Server, error: Exception) -> None:
59 |         self.logger.error("Failed to fetch server's tools", exc_info=error)
60 |         self.logger.warning("Server %s will be marked as unhealthy", server.name)
61 |         await self.notifications_client.send_server_down_alert(
62 |             server.creator_id,
63 |             server_name=server.name,
64 |         )
65 |         await self._update_server_status(server, [], ServerStatus.inactive)
66 | 
67 |     async def _handle_empty_tools(self, server: Server) -> None:
68 |         self.logger.warning("Server %s is alive, but has no tools", server.name)
69 |         self.logger.warning("Server will be marked as unhealthy.")
70 |         await self.notifications_client.send_server_down_alert(
71 |             server.creator_id,
72 |             server_name=server.name,
73 |             server_logs=(
74 |                 "Server has no tools. It will be marked as unhealthy. "
75 |                 "Please, check it out."
76 |             ),
77 |         )
78 |         await self._update_server_status(server, [], ServerStatus.inactive)
79 | 
80 |     async def _mark_server_healthy(self, server: Server, tools: list[Tool]) -> None:
81 |         await self._update_server_status(server, tools, ServerStatus.active)
82 | 
83 |     async def _update_server_status(
84 |         self, server: Server, tools: list[Tool], status: ServerStatus
85 |     ) -> None:
86 |         await self.repo.update_server(
87 |             id=server.id,
88 |             tools=tools,
89 |             status=status,
90 |         )
91 | 
```

--------------------------------------------------------------------------------
/alembic.ini:
--------------------------------------------------------------------------------

```
  1 | # A generic, single database configuration.
  2 | 
  3 | [alembic]
  4 | # path to migration scripts
  5 | # Use forward slashes (/) also on windows to provide an os agnostic path
  6 | script_location = alembic
  7 | 
  8 | # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
  9 | # Uncomment the line below if you want the files to be prepended with date and time
 10 | # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
 11 | # for all available tokens
 12 | file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
 13 | 
 14 | # sys.path path, will be prepended to sys.path if present.
 15 | # defaults to the current working directory.
 16 | prepend_sys_path = .
 17 | 
 18 | # timezone to use when rendering the date within the migration file
 19 | # as well as the filename.
 20 | # If specified, requires the python>=3.9 or backports.zoneinfo library and tzdata library.
 21 | # Any required deps can installed by adding `alembic[tz]` to the pip requirements
 22 | # string value is passed to ZoneInfo()
 23 | # leave blank for localtime
 24 | # timezone =
 25 | 
 26 | # max length of characters to apply to the "slug" field
 27 | # truncate_slug_length = 40
 28 | 
 29 | # set to 'true' to run the environment during
 30 | # the 'revision' command, regardless of autogenerate
 31 | # revision_environment = false
 32 | 
 33 | # set to 'true' to allow .pyc and .pyo files without
 34 | # a source .py file to be detected as revisions in the
 35 | # versions/ directory
 36 | # sourceless = false
 37 | 
 38 | # version location specification; This defaults
 39 | # to alembic/versions.  When using multiple version
 40 | # directories, initial revisions must be specified with --version-path.
 41 | # The path separator used here should be the separator specified by "version_path_separator" below.
 42 | # version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
 43 | 
 44 | # version path separator; As mentioned above, this is the character used to split
 45 | # version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
 46 | # If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
 47 | # Valid values for version_path_separator are:
 48 | #
 49 | # version_path_separator = :
 50 | # version_path_separator = ;
 51 | # version_path_separator = space
 52 | # version_path_separator = newline
 53 | #
 54 | # Use os.pathsep. Default configuration used for new projects.
 55 | version_path_separator = os
 56 | 
 57 | # set to 'true' to search source files recursively
 58 | # in each "version_locations" directory
 59 | # new in Alembic version 1.10
 60 | # recursive_version_locations = false
 61 | 
 62 | # the output encoding used when revision files
 63 | # are written from script.py.mako
 64 | # output_encoding = utf-8
 65 | 
 66 | sqlalchemy.url = driver://user:pass@localhost/dbname
 67 | 
 68 | 
 69 | [post_write_hooks]
 70 | # post_write_hooks defines scripts or Python functions that are run
 71 | # on newly generated revision scripts.  See the documentation for further
 72 | # detail and examples
 73 | 
 74 | # format using "black" - use the console_scripts runner, against the "black" entrypoint
 75 | # hooks = black
 76 | # black.type = console_scripts
 77 | # black.entrypoint = black
 78 | # black.options = -l 79 REVISION_SCRIPT_FILENAME
 79 | 
 80 | # lint with attempts to fix using "ruff" - use the exec runner, execute a binary
 81 | # hooks = ruff
 82 | # ruff.type = exec
 83 | # ruff.executable = %(here)s/.venv/bin/ruff
 84 | # ruff.options = --fix REVISION_SCRIPT_FILENAME
 85 | 
 86 | # Logging configuration
 87 | [loggers]
 88 | keys = root,sqlalchemy,alembic
 89 | 
 90 | [handlers]
 91 | keys = console
 92 | 
 93 | [formatters]
 94 | keys = generic
 95 | 
 96 | [logger_root]
 97 | level = WARNING
 98 | handlers = console
 99 | qualname =
100 | 
101 | [logger_sqlalchemy]
102 | level = WARNING
103 | handlers =
104 | qualname = sqlalchemy.engine
105 | 
106 | [logger_alembic]
107 | level = INFO
108 | handlers =
109 | qualname = alembic
110 | 
111 | [handler_console]
112 | class = StreamHandler
113 | args = (sys.stderr,)
114 | level = NOTSET
115 | formatter = generic
116 | 
117 | [formatter_generic]
118 | format = %(levelname)-5.5s [%(name)s] %(message)s
119 | datefmt = %H:%M:%S
120 | 
```

--------------------------------------------------------------------------------
/common/llm/client.py:
--------------------------------------------------------------------------------

```python
  1 | from logging import Logger
  2 | from typing import Type
  3 | 
  4 | from httpx import AsyncClient
  5 | from openai import APIError
  6 | from openai import AsyncOpenAI
  7 | from openai import InternalServerError
  8 | from openai import NOT_GIVEN
  9 | from openai import OpenAIError
 10 | from openai.types.chat import ChatCompletion
 11 | from openai.types.chat import ChatCompletionMessageParam
 12 | from openai.types.chat import ChatCompletionToolParam
 13 | from pydantic import BaseModel
 14 | 
 15 | 
 16 | class LLMClient:
 17 |     def __init__(
 18 |         self, logger: Logger, proxy: str | None = None, base_url: str | None = None
 19 |     ) -> None:
 20 |         self._logger = logger
 21 | 
 22 |         if proxy is not None:
 23 |             self._logger.info("Enabled proxy %s", proxy)
 24 |             http_client = AsyncClient(proxy=proxy)
 25 |         else:
 26 |             http_client = AsyncClient()
 27 | 
 28 |         self.openai_client = AsyncOpenAI(http_client=http_client, base_url=base_url)
 29 | 
 30 |     @staticmethod
 31 |     def _get_full_messages(
 32 |         system_prompt: str | None, messages: list[ChatCompletionMessageParam]
 33 |     ) -> list[ChatCompletionMessageParam]:
 34 |         system_message: list[dict] = []
 35 |         if system_prompt:
 36 |             system_message = [
 37 |                 {
 38 |                     "role": "system",
 39 |                     "content": system_prompt,
 40 |                     # Anthropic prompt caching
 41 |                     "cache_control": {"type": "ephemeral"},
 42 |                 }
 43 |             ]
 44 |         full_messages = system_message + messages
 45 |         return full_messages
 46 | 
 47 |     async def request(
 48 |         self,
 49 |         model: str,
 50 |         messages: list[ChatCompletionMessageParam],
 51 |         system_prompt: str | None = None,
 52 |         max_tokens: int | None = None,
 53 |         tools: list[ChatCompletionToolParam] | None = None,
 54 |     ) -> ChatCompletion:
 55 |         full_messages = self._get_full_messages(
 56 |             system_prompt=system_prompt, messages=messages
 57 |         )
 58 |         try:
 59 |             response = await self.openai_client.chat.completions.create(
 60 |                 model=model,
 61 |                 messages=full_messages,
 62 |                 max_tokens=max_tokens,
 63 |                 tools=tools or NOT_GIVEN,
 64 |                 timeout=30,
 65 |             )
 66 |         except APIError as error:
 67 |             self._logger.error(f"{error.code=} {error.body=}")
 68 |             raise
 69 |         except InternalServerError as error:
 70 |             self._logger.error(error.response.json())
 71 |             raise
 72 |         except OpenAIError as error:
 73 |             self._logger.error(
 74 |                 "Request to Provider failed with the following exception",
 75 |                 exc_info=error,
 76 |             )
 77 |             raise
 78 |         return response
 79 | 
 80 |     async def request_to_beta(
 81 |         self,
 82 |         model: str,
 83 |         messages: list[ChatCompletionMessageParam],
 84 |         system_prompt: str | None = None,
 85 |         max_tokens: int | None = None,
 86 |         tools: list[ChatCompletionToolParam] | None = None,
 87 |         response_format: Type[BaseModel] = NOT_GIVEN,
 88 |     ) -> ChatCompletion:
 89 |         full_messages = self._get_full_messages(
 90 |             system_prompt=system_prompt, messages=messages
 91 |         )
 92 |         try:
 93 |             response = await self.openai_client.beta.chat.completions.parse(
 94 |                 model=model,
 95 |                 messages=full_messages,
 96 |                 max_tokens=max_tokens,
 97 |                 tools=tools or NOT_GIVEN,
 98 |                 timeout=30,
 99 |                 response_format=response_format,
100 |             )
101 |         except APIError as error:
102 |             self._logger.error(f"{error.code=} {error.body=}")
103 |             raise
104 |         except InternalServerError as error:
105 |             self._logger.error(error.response.json())
106 |             raise
107 |         except OpenAIError as error:
108 |             self._logger.error(
109 |                 "Request to Provider failed with the following exception",
110 |                 exc_info=error,
111 |             )
112 |             raise
113 |         return response
114 | 
```

--------------------------------------------------------------------------------
/worker/src/docker_service.py:
--------------------------------------------------------------------------------

```python
  1 | import subprocess
  2 | from pathlib import Path
  3 | 
  4 | from .constants import BaseImage
  5 | from .dockerfile_generators import DockerfileGenerator
  6 | from .dockerfile_generators import get_dockerfile_generator
  7 | from .errors import InvalidData
  8 | from .errors import ProcessingError
  9 | from .user_logger import UserLogger
 10 | from registry.src.database import Server
 11 | from registry.src.logger import logger
 12 | from registry.src.settings import settings
 13 | 
 14 | 
 15 | class DockerService:
 16 |     def __init__(
 17 |         self,
 18 |         docker_env: dict,
 19 |         repo_folder: str,
 20 |         server: Server,
 21 |         user_logger: UserLogger,
 22 |     ) -> None:
 23 |         self.docker_env = docker_env
 24 |         self.repo_folder = repo_folder
 25 |         self.server = server
 26 |         self.image_name = f"hipasus/{self.server.name}"
 27 |         self.container_name = f"darp_server_{server.id}"
 28 |         self.user_logger = user_logger
 29 | 
 30 |     async def clone_repo(self) -> None:
 31 |         await self._execute(["git", "clone", self.server.repo_url, self.repo_folder])
 32 | 
 33 |     def dockerfile_exists(self) -> bool:
 34 |         path = Path(f"{self.repo_folder}/Dockerfile")
 35 |         return path.exists()
 36 | 
 37 |     async def generate_dockerfile(self) -> None:
 38 |         if not self.server.base_image:
 39 |             await self.user_logger.error(message="No base image provided.")
 40 |             raise InvalidData
 41 |         generator: DockerfileGenerator = await get_dockerfile_generator(
 42 |             repo_folder=self.repo_folder,
 43 |             base_image=BaseImage(self.server.base_image),
 44 |             build_instructions=self.server.build_instructions,
 45 |             user_logger=self.user_logger,
 46 |         )
 47 |         try:
 48 |             logs = await generator.generate_dockerfile()
 49 |             await self.user_logger.info(message=f"Generated Dockerfile:\n{logs}")
 50 |             logger.info(logs)
 51 |         except ProcessingError as error:
 52 |             await self.user_logger.error(
 53 |                 message=f"Error generating Dockerfile: {error.message or ''}"
 54 |             )
 55 |             raise
 56 | 
 57 |     async def build_image(self) -> None:
 58 |         command = [
 59 |             "docker",
 60 |             "build",
 61 |             "-t",
 62 |             self.image_name,
 63 |             "-f",
 64 |             f"{self.repo_folder}/Dockerfile",
 65 |             self.repo_folder,
 66 |         ]
 67 |         await self._execute(command)
 68 | 
 69 |     async def push_image(self) -> None:
 70 |         docker_push_command = (
 71 |             f"docker login -u {settings.dockerhub_login} -p {settings.dockerhub_password} && docker image push {self.image_name}",
 72 |         )
 73 |         await self.user_logger.info("Pushing image...")
 74 |         result = subprocess.run(docker_push_command, shell=True)
 75 |         if result.returncode != 0:
 76 |             await self.user_logger.error(message="Docker push failed")
 77 |             logger.error("docker push failed")
 78 |             raise ProcessingError
 79 | 
 80 |     async def run_container(self) -> None:
 81 |         subprocess.run(
 82 |             ["docker", "rm", self.container_name, "--force"], env=self.docker_env
 83 |         )
 84 |         run_command = [
 85 |             "docker",
 86 |             "run",
 87 |             "--network",
 88 |             settings.deployment_network,
 89 |             "-d",
 90 |             "--restart",
 91 |             "always",
 92 |             "--pull",
 93 |             "always",
 94 |             "--name",
 95 |             self.container_name,
 96 |             self.image_name,
 97 |         ]
 98 |         if self.server.command:
 99 |             run_command.extend(["sh", "-c", self.server.command])
100 |         await self._execute(command=run_command, env=self.docker_env)
101 | 
102 |     async def _execute(self, command: list[str], env: dict | None = None) -> None:
103 |         command_str = " ".join(command)
104 |         logger.info(f"{command_str=}")
105 |         await self.user_logger.command_start(command=command_str)
106 |         arguments: dict = dict(
107 |             args=command,
108 |             stdout=subprocess.PIPE,
109 |             stderr=subprocess.STDOUT,
110 |         )
111 |         if env:
112 |             arguments["env"] = env
113 |         result = subprocess.run(**arguments)
114 |         output = result.stdout.decode() if result.stdout else ""  # noqa
115 |         if result.returncode != 0:
116 |             await self.user_logger.command_result(output=output, success=False)
117 |             logger.error(f"{' '.join(command[:2])} failed")
118 |             raise ProcessingError
119 |         await self.user_logger.command_result(output=output, success=True)
120 | 
```

--------------------------------------------------------------------------------
/registry/src/servers/router.py:
--------------------------------------------------------------------------------

```python
  1 | from typing import Literal
  2 | 
  3 | from fastapi import APIRouter
  4 | from fastapi import Depends
  5 | from fastapi import Query
  6 | from fastapi import status as status_codes
  7 | from fastapi_pagination import add_pagination
  8 | from fastapi_pagination import Page
  9 | from fastapi_pagination import Params
 10 | from fastapi_pagination.ext.sqlalchemy import paginate
 11 | from sqlalchemy import Select
 12 | from sqlalchemy.ext.asyncio import AsyncSession
 13 | 
 14 | from ..deployments.service import DeploymentService
 15 | from ..errors import InvalidData
 16 | from .schemas import MCPJson
 17 | from .schemas import ServerCreate
 18 | from .schemas import ServerRead
 19 | from .schemas import ServerUpdate
 20 | from .schemas import ServerWithTools
 21 | from .service import ServerService
 22 | from registry.src.database import get_session
 23 | from registry.src.database import Server
 24 | from registry.src.search.schemas import SearchServer
 25 | from registry.src.search.service import SearchService
 26 | from registry.src.types import RoutingMode
 27 | from registry.src.types import ServerStatus
 28 | 
 29 | router = APIRouter(prefix="/servers")
 30 | add_pagination(router)
 31 | 
 32 | 
 33 | @router.post(
 34 |     "/", response_model=ServerWithTools, status_code=status_codes.HTTP_201_CREATED
 35 | )
 36 | async def create(
 37 |     data: ServerCreate, service: ServerService = Depends(ServerService.get_new_instance)
 38 | ) -> Server:
 39 |     return await service.create(data)
 40 | 
 41 | 
 42 | @router.get("/search", response_model=list[ServerWithTools])
 43 | async def search(
 44 |     query: str,
 45 |     routing_mode: RoutingMode = RoutingMode.auto,
 46 |     service: ServerService = Depends(ServerService.get_new_instance),
 47 |     search_service: SearchService = Depends(SearchService.get_new_instance),
 48 | ) -> list[Server]:
 49 |     if routing_mode == RoutingMode.auto:
 50 |         servers = await service.get_search_servers()
 51 |         formatted_servers = [SearchServer.model_validate(server) for server in servers]
 52 |         server_urls = await search_service.get_fitting_servers(
 53 |             servers=formatted_servers, query=query
 54 |         )
 55 |         return await service.get_servers_by_urls(server_urls=server_urls)
 56 |     elif routing_mode == RoutingMode.deepresearch:
 57 |         return await service.get_deep_research()
 58 |     raise InvalidData(f"Unsupported {routing_mode=}", routing_mode=routing_mode)
 59 | 
 60 | 
 61 | @router.get("/batch", response_model=list[ServerWithTools])
 62 | async def get_servers_by_ids(
 63 |     ids: list[int] = Query(...),
 64 |     service: ServerService = Depends(ServerService.get_new_instance),
 65 | ) -> list[Server]:
 66 |     return await service.get_servers_by_ids(ids=ids)
 67 | 
 68 | 
 69 | @router.get("/.well-known/mcp.json", response_model=MCPJson)
 70 | async def get_mcp_json(
 71 |     service: ServerService = Depends(ServerService.get_new_instance),
 72 | ) -> MCPJson:
 73 |     return await service.get_mcp_json()
 74 | 
 75 | 
 76 | @router.delete("/{id}")
 77 | async def delete_server(
 78 |     id: int,
 79 |     current_user_id: str,
 80 |     service: ServerService = Depends(ServerService.get_new_instance),
 81 | ) -> None:
 82 |     return await service.delete_server(id=id, current_user_id=current_user_id)
 83 | 
 84 | 
 85 | @router.get("/", response_model=Page[ServerWithTools])
 86 | async def get_all_servers(
 87 |     query: str | None = None,
 88 |     status: Literal["any"] | ServerStatus = Query(
 89 |         title="Server status",
 90 |         default=ServerStatus.active,
 91 |         description="Filter servers by status, by default servers with status `active` will be returned",
 92 |     ),
 93 |     creator_id: str | None = None,
 94 |     params: Params = Depends(),
 95 |     service: ServerService = Depends(ServerService.get_new_instance),
 96 |     session: AsyncSession = Depends(get_session),
 97 | ) -> Page[Server]:
 98 |     status_filter: ServerStatus | None = None
 99 | 
100 |     if isinstance(status, ServerStatus):
101 |         status_filter = status
102 | 
103 |     servers: Select = await service.get_all_servers(
104 |         search_query=query,
105 |         status=status_filter,
106 |         creator_id=creator_id,
107 |     )
108 |     return await paginate(session, servers, params)
109 | 
110 | 
111 | @router.get("/{id}", response_model=ServerWithTools)
112 | async def get_server_by_id(
113 |     id: int,
114 |     service: ServerService = Depends(ServerService.get_new_instance),
115 | ) -> Server:
116 |     return await service.get_server_by_id(id=id)
117 | 
118 | 
119 | @router.put("/{id}", response_model=ServerRead)
120 | async def update_server(
121 |     id: int,
122 |     data: ServerUpdate,
123 |     service: ServerService = Depends(ServerService.get_new_instance),
124 |     deployment_service: DeploymentService = Depends(DeploymentService.get_new_instance),
125 | ) -> Server:
126 |     await deployment_service.update_deployment(server_id=id, data=data)
127 |     server = await service.update_server(id=id, data=data)
128 |     return server
129 | 
```

--------------------------------------------------------------------------------
/scripts/darp-router.py:
--------------------------------------------------------------------------------

```python
  1 | import argparse
  2 | import asyncio
  3 | from typing import Any
  4 | from typing import List
  5 | from typing import Literal
  6 | from typing import Union
  7 | 
  8 | from mcp import ClientSession
  9 | from mcp.client.sse import sse_client
 10 | from mcp.types import CallToolResult
 11 | from openai.types.chat import ChatCompletionMessage
 12 | from openai.types.chat.chat_completion_content_part_text_param import (
 13 |     ChatCompletionContentPartTextParam,
 14 | )
 15 | from pydantic import BaseModel
 16 | 
 17 | default_mcp_url: str = "http://localhost:4689/sse"
 18 | tool_name: str = "routing"
 19 | 
 20 | 
 21 | async def main() -> None:
 22 |     parser = argparse.ArgumentParser(description="DARP Router")
 23 |     parser.add_argument("request", type=str, help="Routing request")
 24 |     parser.add_argument(
 25 |         "--format", choices=["text", "json"], default="text", help="Output format"
 26 |     )
 27 |     parser.add_argument("--verbose", action="store_true", help="Verbose output")
 28 |     args = parser.parse_args()
 29 | 
 30 |     response = await request_mcp(
 31 |         server_url=default_mcp_url,
 32 |         tool_name=tool_name,
 33 |         arguments={"request": args.request},
 34 |     )
 35 |     display_response(response.content[0].text, format=args.format, verbose=args.verbose)
 36 | 
 37 | 
 38 | async def request_mcp(
 39 |     server_url: str = default_mcp_url,
 40 |     tool_name: str = tool_name,
 41 |     arguments: dict[str, Any] | None = None,
 42 | ) -> CallToolResult:
 43 |     async with sse_client(server_url) as (read, write):
 44 |         async with ClientSession(read, write) as session:
 45 |             await session.initialize()
 46 | 
 47 |             return await session.call_tool(
 48 |                 tool_name,
 49 |                 arguments=arguments or {},
 50 |             )
 51 | 
 52 | 
 53 | def display_response(response: str, format: str = "text", verbose: bool = False):
 54 |     if format == "json":
 55 |         print(response)
 56 |     else:
 57 |         routing_response = RoutingResponse.model_validate_json(response)
 58 |         for message in routing_response.conversation:
 59 |             print(message.__str__(verbose))
 60 | 
 61 | 
 62 | class PrintableChatCompletionMessage(ChatCompletionMessage):
 63 |     def __str__(self, verbose: bool = False) -> str:
 64 |         contents = "\n".join(
 65 |             filter(
 66 |                 None,
 67 |                 [
 68 |                     self._format_content("refusal", verbose),
 69 |                     self._format_content("function_call", verbose),
 70 |                     self._format_content("tool_calls", verbose),
 71 |                     self._format_content("content", verbose=True),
 72 |                 ],
 73 |             )
 74 |         )
 75 |         return f"{self.role}: {contents}"
 76 | 
 77 |     def _format_content(self, key: str, verbose: bool = False) -> str | None:
 78 |         value = getattr(self, f"_{key}_content")()
 79 |         if value is None:
 80 |             return None
 81 |         if not verbose:
 82 |             return f"[{key}]"
 83 |         else:
 84 |             return f"{value}"
 85 | 
 86 |     def _refusal_content(self) -> str | None:
 87 |         if self.refusal is not None:
 88 |             return "\n" + indent(self.refusal)
 89 |         return None
 90 | 
 91 |     def _function_call_content(self) -> str | None:
 92 |         if self.function_call is not None:
 93 |             return f"{self.function_call.name}({self.function_call.arguments})"
 94 |         return None
 95 | 
 96 |     def _tool_calls_content(self) -> str | None:
 97 |         if self.tool_calls is not None:
 98 |             return "\n" + indent(
 99 |                 "\n".join(
100 |                     f"{call.function.name}({call.function.arguments})"
101 |                     for call in self.tool_calls
102 |                 )
103 |             )
104 |         return None
105 | 
106 |     def _content_content(self) -> str | None:
107 |         if self.content is not None:
108 |             return "\n" + indent(self.content)
109 |         return None
110 | 
111 | 
112 | class PrintableChatCompletionToolMessageParam(BaseModel):
113 |     role: Literal["tool"]
114 |     content: Union[str, List[ChatCompletionContentPartTextParam]]
115 |     tool_call_id: str
116 | 
117 |     def __str__(self, verbose: bool = False) -> str:
118 |         if not verbose:
119 |             return f"[{self.role}] ..."
120 |         if isinstance(self.content, str):
121 |             return f"{self.role}: {self.content}"
122 |         elif isinstance(self.content, list):
123 |             contents = "\n".join(
124 |                 "{type}: {text}".format(**item) for item in self.content
125 |             )
126 |             return indent(f"{self.role}:\n{indent(contents)}")
127 |         else:
128 |             raise ValueError(f"Invalid content type: {type(self.content)}")
129 | 
130 | 
131 | class RoutingResponse(BaseModel):
132 |     conversation: list[
133 |         PrintableChatCompletionMessage | PrintableChatCompletionToolMessageParam
134 |     ]
135 | 
136 | 
137 | def indent(text: str, indent: int = 1, prefix: str = "  "):
138 |     return "\n".join(prefix * indent + line for line in text.split("\n"))
139 | 
140 | 
141 | if __name__ == "__main__":
142 |     asyncio.run(main())
143 | 
```

--------------------------------------------------------------------------------
/worker/src/deployment_service.py:
--------------------------------------------------------------------------------

```python
  1 | import asyncio
  2 | import os
  3 | import tempfile
  4 | from logging import Logger
  5 | 
  6 | from .docker_service import DockerService
  7 | from .user_logger import UserLogger
  8 | from common.notifications.client import NotificationsClient
  9 | from registry.src.database import Server
 10 | from registry.src.errors import InvalidData
 11 | from registry.src.servers.repository import ServerRepository
 12 | from registry.src.servers.schemas import ServerUpdateData
 13 | from registry.src.servers.schemas import Tool
 14 | from registry.src.servers.service import ServerService
 15 | from registry.src.settings import settings
 16 | from registry.src.types import ServerStatus
 17 | from registry.src.types import ServerTransportProtocol
 18 | from worker.src.errors import ProcessingError
 19 | 
 20 | 
 21 | class WorkerDeploymentService:
 22 |     def __init__(self, repo: ServerRepository, logger: Logger):
 23 |         self.repo = repo
 24 |         self.logger = logger
 25 |         self.docker_env = dict(**os.environ)
 26 |         self.docker_env["DOCKER_HOST"] = settings.deployment_server
 27 |         self.notifications_client = NotificationsClient()
 28 | 
 29 |     async def deploy_server(self, server_id: int) -> None:
 30 |         server = await self.repo.get_server(id=server_id)
 31 |         if not server:
 32 |             raise ProcessingError
 33 |         with tempfile.TemporaryDirectory(
 34 |             prefix=f"server_{server.id}", dir="/tmp"
 35 |         ) as repo_folder:
 36 |             user_logger = UserLogger(session=self.repo.session, server_id=server_id)
 37 |             await user_logger.clear_logs()
 38 |             service = DockerService(
 39 |                 docker_env=self.docker_env,
 40 |                 repo_folder=repo_folder,
 41 |                 server=server,
 42 |                 user_logger=user_logger,
 43 |             )
 44 |             server_url = f"http://{service.container_name}/sse"
 45 |             try:
 46 |                 await self._deploy_repo(service=service)
 47 |                 tools = await self._get_tools(
 48 |                     server=server, service=service, server_url=server_url
 49 |                 )
 50 |             except Exception:
 51 |                 await self._handle_deploy_failure(server)
 52 |                 raise
 53 |         await user_logger.info("Deployment complete.")
 54 |         await self._handle_deploy_success(
 55 |             tools=tools,
 56 |             server=server,
 57 |             url=server_url,
 58 |             transport_protocol=ServerTransportProtocol.SSE,
 59 |         )
 60 | 
 61 |     async def _deploy_repo(self, service: DockerService) -> None:
 62 |         await service.clone_repo()
 63 |         if not service.dockerfile_exists():
 64 |             self.logger.info(f"No Dockerfile in repo: {service.server.repo_url}")
 65 |             await self._ensure_command(service)
 66 |             await service.user_logger.info(
 67 |                 message="No Dockerfile found in the project. Attempting to generate..."
 68 |             )
 69 |             await service.generate_dockerfile()
 70 |         await service.build_image()
 71 |         await service.push_image()
 72 |         await service.run_container()
 73 | 
 74 |     async def _handle_deploy_failure(self, server: Server) -> None:
 75 |         await self.repo.update_server(
 76 |             id=server.id,
 77 |             data=ServerUpdateData(),
 78 |             status=ServerStatus.processing_failed,
 79 |         )
 80 | 
 81 |         await self.notifications_client.notify_deploy_failed(
 82 |             user_id=server.creator_id,
 83 |             server_name=server.name,
 84 |         )
 85 | 
 86 |     async def _handle_deploy_success(
 87 |         self,
 88 |         tools: list[Tool],
 89 |         server: Server,
 90 |         url: str,
 91 |         transport_protocol: ServerTransportProtocol,
 92 |     ) -> None:
 93 |         await self.repo.update_server(
 94 |             id=server.id,
 95 |             tools=tools,
 96 |             data=ServerUpdateData(url=url),
 97 |             status=ServerStatus.active,
 98 |             transport_protocol=transport_protocol,
 99 |         )
100 | 
101 |         await self.notifications_client.notify_deploy_successful(
102 |             user_id=server.creator_id,
103 |             server_name=server.name,
104 |         )
105 | 
106 |     async def _get_tools(
107 |         self, server: Server, service: DockerService, server_url: str
108 |     ) -> list[Tool]:
109 |         for i in range(settings.tool_retry_count):
110 |             tools = await self._fetch_tools(
111 |                 server=server, server_url=server_url, service=service
112 |             )
113 |             if tools:
114 |                 return tools
115 |         await service.user_logger.error("Error getting tools from server")
116 |         raise ProcessingError
117 | 
118 |     async def _fetch_tools(
119 |         self, server: Server, server_url: str, service: DockerService
120 |     ) -> list[Tool] | None:
121 |         try:
122 |             await service.user_logger.info(
123 |                 f"Waiting {settings.tool_wait_interval} seconds for server to start up"
124 |             )
125 |             await asyncio.sleep(settings.tool_wait_interval)
126 |             tools = await ServerService.get_tools(
127 |                 server_url=server_url, server_name=server.name
128 |             )
129 |             await service.user_logger.info(
130 |                 "Successfully got a list of tools from server"
131 |             )
132 |             return tools
133 |         except InvalidData:
134 |             await service.user_logger.warning("Failed to connect to server")
135 |             return None
136 | 
137 |     @staticmethod
138 |     async def _ensure_command(service: DockerService):
139 |         if not service.server.command:
140 |             await service.user_logger.error(
141 |                 "Command must not be empty if project does not have a Dockerfile"
142 |             )
143 |             raise ProcessingError
144 | 
```

--------------------------------------------------------------------------------
/registry/src/deployments/service.py:
--------------------------------------------------------------------------------

```python
  1 | import re
  2 | from typing import Self
  3 | 
  4 | from fastapi import Depends
  5 | 
  6 | from ..settings import settings
  7 | from ..types import HostType
  8 | from ..types import TaskType
  9 | from .logs_repository import LogsRepository
 10 | from .schemas import DeploymentCreate
 11 | from registry.src.database import Server
 12 | from registry.src.database import ServerLogs
 13 | from registry.src.errors import InvalidData
 14 | from registry.src.errors import InvalidServerNameError
 15 | from registry.src.errors import NotAllowedError
 16 | from registry.src.errors import ServerAlreadyExistsError
 17 | from registry.src.errors import ServersNotFoundError
 18 | from registry.src.producer import get_producer
 19 | from registry.src.servers.repository import ServerRepository
 20 | from registry.src.servers.schemas import ServerRead
 21 | from registry.src.servers.schemas import ServerUpdate
 22 | from registry.src.servers.service import ID_REGEX_PATTERN
 23 | from registry.src.types import ServerStatus
 24 | from worker.src.schemas import Task
 25 | 
 26 | 
 27 | class DeploymentService:
 28 |     def __init__(self, repo: ServerRepository, logs_repo: LogsRepository) -> None:
 29 |         self.repo = repo
 30 |         self.logs_repo = logs_repo
 31 | 
 32 |     async def get_server(self, server_id: int, creator_id: str | None = None) -> Server:
 33 |         await self._assure_server_found(server_id)
 34 |         server = await self.repo.get_server(server_id)
 35 |         if creator_id:
 36 |             await self._assure_server_creator(server=server, user_id=creator_id)
 37 |         return server
 38 | 
 39 |     async def get_logs(self, server_id: int, current_user_id: str) -> ServerLogs:
 40 |         server = await self.get_server(server_id=server_id, creator_id=current_user_id)
 41 |         if server.host_type != HostType.internal:
 42 |             raise InvalidData("Logs are only available for internal servers")
 43 |         logs = await self.logs_repo.get_server_logs(server_id=server_id)
 44 |         assert logs
 45 |         return logs
 46 | 
 47 |     async def create_server(self, data: DeploymentCreate) -> Server:
 48 |         self._assure_server_name_is_valid_id(data.data.name)
 49 |         await self._assure_server_not_exists(
 50 |             name=data.data.name, repo_url=str(data.data.repo_url)
 51 |         )
 52 |         server = await self.repo.create_server(
 53 |             data=data.data, creator_id=data.current_user_id, tools=[]
 54 |         )
 55 |         await self.logs_repo.create_logs(server_id=server.id)
 56 |         await self.repo.session.commit()
 57 |         await self._send_task(server_id=server.id, task_type=TaskType.deploy)
 58 |         return server
 59 | 
 60 |     async def update_deployment(self, server_id: int, data: ServerUpdate) -> Server:
 61 |         server = await self.get_server(
 62 |             server_id=server_id, creator_id=data.current_user_id
 63 |         )
 64 |         if not data.deployment_data.model_dump(exclude_none=True):
 65 |             return server
 66 |         server = await self.repo.update_server(
 67 |             id=server_id,
 68 |             **data.deployment_data.model_dump(),
 69 |             status=ServerStatus.in_processing,
 70 |         )
 71 |         await self.repo.session.commit()
 72 |         await self._send_task(server_id=server.id, task_type=TaskType.deploy)
 73 |         return server
 74 | 
 75 |     async def stop_server(self, server_id: int, current_user_id: str) -> None:
 76 |         server = await self.get_server(server_id=server_id, creator_id=current_user_id)
 77 |         if server.status == ServerStatus.stopped:
 78 |             return
 79 |         if server.status != ServerStatus.active:
 80 |             raise InvalidData("Server is not active", server_status=server.status)
 81 |         await self._send_task(server_id=server_id, task_type=TaskType.stop)
 82 | 
 83 |     async def start_server(self, server_id: int, current_user_id: str) -> None:
 84 |         server = await self.get_server(server_id=server_id, creator_id=current_user_id)
 85 |         if server.status == ServerStatus.active:
 86 |             return
 87 |         if server.status != ServerStatus.stopped:
 88 |             raise InvalidData("Invalid server state", server_status=server.status)
 89 |         await self._send_task(server_id=server_id, task_type=TaskType.start)
 90 | 
 91 |     @staticmethod
 92 |     async def _send_task(server_id: int, task_type: TaskType) -> None:
 93 |         producer = await get_producer()
 94 |         await producer.start()
 95 |         await producer.send_and_wait(
 96 |             topic=settings.worker_topic,
 97 |             value=Task(task_type=task_type, server_id=server_id),
 98 |         )
 99 |         await producer.stop()
100 | 
101 |     async def _assure_server_found(self, server_id: int) -> None:
102 |         if not await self.repo.find_servers(id=server_id):
103 |             raise ServersNotFoundError([server_id])
104 | 
105 |     @staticmethod
106 |     async def _assure_server_creator(server: Server, user_id: str) -> None:
107 |         if server.creator_id != user_id:
108 |             raise NotAllowedError(
109 |                 "Must be server creator to use this function",
110 |                 creator_id=server.creator_id,
111 |                 current_user_id=user_id,
112 |             )
113 | 
114 |     async def _assure_server_not_exists(
115 |         self,
116 |         name: str | None = None,
117 |         repo_url: str | None = None,
118 |         ignore_id: int | None = None,
119 |     ) -> None:
120 |         if servers := await self.repo.find_servers(
121 |             name=name, repo_url=repo_url, ignore_id=ignore_id
122 |         ):
123 |             dict_servers = [
124 |                 ServerRead.model_validate(server).model_dump() for server in servers
125 |             ]
126 |             raise ServerAlreadyExistsError(dict_servers)
127 | 
128 |     @staticmethod
129 |     def _assure_server_name_is_valid_id(name: str) -> None:
130 |         if not re.match(ID_REGEX_PATTERN, name):
131 |             raise InvalidServerNameError(name=name)
132 | 
133 |     @classmethod
134 |     def get_new_instance(
135 |         cls,
136 |         repo: ServerRepository = Depends(ServerRepository.get_new_instance),
137 |         logs_repo: LogsRepository = Depends(LogsRepository.get_new_instance),
138 |     ) -> Self:
139 |         return cls(repo=repo, logs_repo=logs_repo)
140 | 
```

--------------------------------------------------------------------------------
/registry/src/servers/repository.py:
--------------------------------------------------------------------------------

```python
  1 | from fastapi import Depends
  2 | from sqlalchemy import delete
  3 | from sqlalchemy import func
  4 | from sqlalchemy import or_
  5 | from sqlalchemy import Select
  6 | from sqlalchemy import select
  7 | from sqlalchemy.ext.asyncio import AsyncSession
  8 | from sqlalchemy.orm import selectinload
  9 | 
 10 | from .schemas import ServerCreateData
 11 | from .schemas import ServerUpdateData
 12 | from .schemas import Tool
 13 | from registry.src.database import get_session
 14 | from registry.src.database import Server
 15 | from registry.src.database import Tool as DBTool
 16 | from registry.src.deployments.schemas import DeploymentCreateData
 17 | from registry.src.errors import ServersNotFoundError
 18 | from registry.src.types import ServerStatus
 19 | from registry.src.types import ServerTransportProtocol
 20 | from worker.src.constants import BaseImage
 21 | 
 22 | 
 23 | class ServerRepository:
 24 | 
 25 |     def __init__(self, session: AsyncSession) -> None:
 26 |         self.session = session
 27 | 
 28 |     async def find_servers(
 29 |         self,
 30 |         id: int | None = None,
 31 |         name: str | None = None,
 32 |         url: str | None = None,
 33 |         ignore_id: int | None = None,
 34 |         repo_url: str | None = None,
 35 |     ) -> list[Server]:
 36 |         if id is None and name is None and url is None:
 37 |             raise ValueError("At least one of 'id', 'name', or 'url' must be provided.")
 38 |         if id == ignore_id and id is not None:
 39 |             raise ValueError("id and ignore_id cannot be the same.")
 40 | 
 41 |         query = select(Server).options(selectinload(Server.tools))
 42 |         conditions = [
 43 |             (Server.id == id),
 44 |             (func.lower(Server.name) == func.lower(name)),
 45 |         ]
 46 |         if url:
 47 |             conditions.append(Server.url == url)
 48 |         if repo_url:
 49 |             conditions.append(Server.repo_url == repo_url)
 50 |         query = query.filter(or_(*conditions))
 51 |         query = query.filter(Server.id != ignore_id)
 52 | 
 53 |         result = await self.session.execute(query)
 54 |         return result.scalars().all()
 55 | 
 56 |     async def get_server(self, id: int) -> Server:
 57 |         query = select(Server).filter(Server.id == id)
 58 |         query = query.options(selectinload(Server.tools))
 59 |         result = await self.session.execute(query)
 60 |         server = result.scalars().first()
 61 |         if server is None:
 62 |             raise ServersNotFoundError([id])
 63 |         return server
 64 | 
 65 |     async def create_server(
 66 |         self,
 67 |         data: ServerCreateData | DeploymentCreateData,
 68 |         tools: list[Tool],
 69 |         creator_id: str | None,
 70 |         transport_protocol: ServerTransportProtocol | None = None,
 71 |     ) -> Server:
 72 |         db_tools = []
 73 |         if isinstance(data, ServerCreateData):
 74 |             db_tools = self._convert_tools(tools, data.url)
 75 |         server = Server(
 76 |             **data.model_dump(exclude_none=True),
 77 |             tools=db_tools,
 78 |             creator_id=creator_id,
 79 |             transport_protocol=transport_protocol,
 80 |         )
 81 |         self.session.add(server)
 82 |         await self.session.flush()
 83 |         await self.session.refresh(server)
 84 |         return await self.get_server(server.id)
 85 | 
 86 |     async def get_all_servers(
 87 |         self,
 88 |         search_query: str | None = None,
 89 |         status: ServerStatus | None = None,
 90 |         creator_id: str | None = None,
 91 |     ) -> Select:
 92 |         query = select(Server).options(selectinload(Server.tools))
 93 | 
 94 |         if status is not None:
 95 |             query = query.where(Server.status == status)
 96 | 
 97 |         if creator_id:
 98 |             query = query.where(Server.creator_id == creator_id)
 99 | 
100 |         if search_query:
101 |             query_string = f"%{search_query}%"
102 |             query = query.where(
103 |                 or_(
104 |                     Server.name.ilike(query_string),
105 |                     Server.description.ilike(query_string),
106 |                     Server.title.ilike(query_string),
107 |                 )
108 |             )
109 |         return query
110 | 
111 |     async def get_servers_by_urls(self, urls: list[str]) -> list[Server]:
112 |         query = select(Server).where(Server.url.in_(urls))
113 |         result = await self.session.execute(query)
114 |         return list(result.scalars().all())
115 | 
116 |     async def get_servers_by_ids(self, ids: list[int]) -> list[Server]:
117 |         query = (
118 |             select(Server).where(Server.id.in_(ids)).options(selectinload(Server.tools))
119 |         )
120 |         result = await self.session.execute(query)
121 |         return list(result.scalars().all())
122 | 
123 |     async def delete_server(self, id: int) -> None:
124 |         query = delete(Server).where(Server.id == id)
125 |         await self.session.execute(query)
126 | 
127 |     async def update_server(
128 |         self,
129 |         id: int,
130 |         data: ServerUpdateData | None = None,
131 |         tools: list[Tool] | None = None,
132 |         status: ServerStatus | None = None,
133 |         repo_url: str | None = None,
134 |         command: str | None = None,
135 |         base_image: BaseImage | None = None,
136 |         build_instructions: str | None = None,
137 |         transport_protocol: ServerTransportProtocol | None = None,
138 |     ) -> Server:
139 |         server = await self.get_server(id)
140 |         update_values = dict(
141 |             status=status,
142 |             repo_url=repo_url,
143 |             command=command,
144 |             transport_protocol=transport_protocol,
145 |             base_image=base_image,
146 |             build_instructions=build_instructions,
147 |         )
148 |         update_values = {
149 |             key: value for key, value in update_values.items() if value is not None
150 |         }
151 |         if data:
152 |             update_values.update(data.model_dump(exclude_none=True))
153 |         for key, value in update_values.items():
154 |             setattr(server, key, value)
155 | 
156 |         if tools is not None:
157 |             query = delete(DBTool).where(DBTool.server_url == server.url)
158 |             await self.session.execute(query)
159 |             await self.session.flush()
160 |             await self.session.refresh(server)
161 |             server.tools.extend(self._convert_tools(tools, server.url))
162 | 
163 |         await self.session.flush()
164 |         await self.session.refresh(server)
165 |         return server
166 | 
167 |     def _convert_tools(self, tools: list[Tool], url: str) -> list[DBTool]:
168 |         return [
169 |             DBTool(**tool.model_dump(exclude_none=True), server_url=url)
170 |             for tool in tools
171 |         ]
172 | 
173 |     @classmethod
174 |     def get_new_instance(
175 |         cls, session: AsyncSession = Depends(get_session)
176 |     ) -> "ServerRepository":
177 |         return cls(session)
178 | 
```
Page 1/2FirstPrevNextLast