# Directory Structure
```
├── .gitignore
├── .python-version
├── LICENSE
├── pyproject.toml
├── README.md
├── src
│ └── image_gen
│ ├── __init__.py
│ └── server.py
└── uv.lock
```
# Files
--------------------------------------------------------------------------------
/.python-version:
--------------------------------------------------------------------------------
```
1 | 3.12
2 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Python-generated files
2 | __pycache__/
3 | *.py[oc]
4 | build/
5 | dist/
6 | wheels/
7 | *.egg-info
8 |
9 | # Virtual environments
10 | .venv
11 |
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # Image Generation MCP Server
2 |
3 | A Model Context Protocol (MCP) server that enables seamless generation of high-quality images via Together AI. This server provides a standardized interface to specify image generation parameters.
4 |
5 | <a href="https://glama.ai/mcp/servers/o0137xiz62">
6 | <img width="380" height="200" src="https://glama.ai/mcp/servers/o0137xiz62/badge" alt="Image Generation Server MCP server" />
7 | </a>
8 |
9 | ## Features
10 |
11 | - High-quality image generation powered by the Flux.1 Schnell model
12 | - Support for customizable dimensions (width and height)
13 | - Clear error handling for prompt validation and API issues
14 | - Easy integration with MCP-compatible clients
15 |
16 | ## Installation
17 |
18 | #### Claude Desktop
19 |
20 | - On MacOS: `~/Library/Application\ Support/Claude/claude_desktop_config.json`
21 | - On Windows: `%APPDATA%/Claude/claude_desktop_config.json`
22 |
23 | <summary>Development/Unpublished Servers Configuration</summary>
24 |
25 | ```json
26 | {
27 | "mcpServers": {
28 | "image-gen": {
29 | "command": "uv",
30 | "args": ["--directory", "/ABSOLUTE/PATH/TO/image-gen/", "run", "image-gen"],
31 | "env": {
32 | "TOGETHER_AI_API_KEY": "<API KEY>"
33 | }
34 | }
35 | }
36 | }
37 | ```
38 |
39 | ## Available Tools
40 |
41 | The server implements one tool:
42 |
43 | ### generate_image
44 |
45 | Generates an image based on the given textual prompt and optional dimensions.
46 |
47 | **Input Schema:**
48 |
49 | ```json
50 | {
51 | "prompt": {
52 | "type": "string",
53 | "description": "A descriptive prompt for generating the image (e.g., 'a futuristic cityscape at sunset')"
54 | },
55 | "width": {
56 | "type": "integer",
57 | "description": "Width of the generated image in pixels (optional)"
58 | },
59 | "height": {
60 | "type": "integer",
61 | "description": "Height of the generated image in pixels (optional)"
62 | },
63 | "model": {
64 | "type": "string",
65 | "description": "The exact model name as it appears in Together AI. If incorrect, it will fallback to the default model (black-forest-labs/FLUX.1-schnell)."
66 | }
67 | }
68 | ```
69 |
70 | ## Prerequisites
71 |
72 | - Python 3.12 or higher
73 | - httpx
74 | - mcp
75 |
76 | ## Contributing
77 |
78 | Contributions are welcome! Please follow these steps to contribute:
79 |
80 | 1. Fork the repository
81 | 2. Create a new branch (`feature/my-new-feature`)
82 | 3. Commit your changes
83 | 4. Push the branch to your fork
84 | 5. Open a Pull Request
85 |
86 | For significant changes, please open an issue first to discuss your proposed changes.
87 |
88 | ## License
89 |
90 | This project is licensed under the MIT License. See the LICENSE file for details.
```
--------------------------------------------------------------------------------
/src/image_gen/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from . import server
2 | import asyncio
3 |
4 | def main():
5 | """Main entry point for the server."""
6 | asyncio.run(server.main())
7 |
8 | __all__ = ['main', 'server']
9 |
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "image-gen"
3 | version = "0.1.1"
4 | description = "MCP server enabling Image Generation for LLMs, built in Python and integrated with Together AI."
5 | readme = "README.md"
6 | requires-python = ">=3.12"
7 | dependencies = [
8 | "httpx>=0.28.1",
9 | "mcp>=1.1.2",
10 | ]
11 |
12 | [build-system]
13 | requires = [ "hatchling",]
14 | build-backend = "hatchling.build"
15 |
16 | [project.scripts]
17 | image-gen = "image_gen:main"
18 |
```
--------------------------------------------------------------------------------
/src/image_gen/server.py:
--------------------------------------------------------------------------------
```python
1 | from typing import Any, Optional
2 | import asyncio
3 | import httpx
4 | from mcp.server.models import InitializationOptions
5 | import mcp.types as types
6 | from mcp.server import NotificationOptions, Server
7 | import mcp.server.stdio
8 | import os
9 |
10 | TOGETHER_AI_BASE = "https://api.together.xyz/v1/images/generations"
11 | API_KEY = os.getenv("TOGETHER_AI_API_KEY")
12 | DEFAULT_MODEL = "black-forest-labs/FLUX.1-schnell"
13 |
14 | server = Server("image-gen")
15 |
16 |
17 | @server.list_tools()
18 | async def handle_list_tools() -> list[types.Tool]:
19 | """
20 | List available tools.
21 | Each tool specifies its arguments using JSON Schema validation.
22 | """
23 | return [
24 | types.Tool(
25 | name="generate_image",
26 | description="Generate an image based on the text prompt, model, and optional dimensions",
27 | inputSchema={
28 | "type": "object",
29 | "properties": {
30 | "prompt": {
31 | "type": "string",
32 | "description": "The text prompt for image generation",
33 | },
34 | "model": {
35 | "type": "string",
36 | "description": "The exact model name as it appears in Together AI. If incorrect, it will fallback to the default model (black-forest-labs/FLUX.1-schnell).",
37 | },
38 | "width": {
39 | "type": "number",
40 | "description": "Optional width for the image",
41 | },
42 | "height": {
43 | "type": "number",
44 | "description": "Optional height for the image",
45 | },
46 | },
47 | "required": ["prompt", "model"],
48 | },
49 | )
50 | ]
51 |
52 |
53 | async def make_together_request(
54 | client: httpx.AsyncClient,
55 | prompt: str,
56 | model: str,
57 | width: Optional[int] = None,
58 | height: Optional[int] = None,
59 | ) -> dict[str, Any]:
60 | """Make a request to the Together API with error handling and fallback for incorrect model."""
61 | request_body = {"model": model, "prompt": prompt, "response_format": "b64_json"}
62 | headers = {"Authorization": f"Bearer {API_KEY}"}
63 |
64 | if width is not None:
65 | request_body["width"] = width
66 | if height is not None:
67 | request_body["height"] = height
68 |
69 | async def send_request(body: dict) -> (int, dict):
70 | response = await client.post(TOGETHER_AI_BASE, headers=headers, json=body)
71 | try:
72 | data = response.json()
73 | except Exception:
74 | data = {}
75 | return response.status_code, data
76 |
77 | # First request with user-provided model
78 | status, data = await send_request(request_body)
79 |
80 | # Check if the request failed due to an invalid model error
81 | if status != 200 and "error" in data:
82 | error_info = data["error"]
83 | error_msg = error_info.get("message", "").lower()
84 | error_code = error_info.get("code", "").lower()
85 | if (
86 | "model" in error_msg and "not available" in error_msg
87 | ) or error_code == "model_not_available":
88 | # Fallback to the default model
89 | request_body["model"] = DEFAULT_MODEL
90 | status, data = await send_request(request_body)
91 | if status != 200 or "error" in data:
92 | return {
93 | "error": f"Fallback API error: {data.get('error', 'Unknown error')} (HTTP {status})"
94 | }
95 | return data
96 | else:
97 | return {"error": f"Together API error: {data.get('error')}"}
98 | elif status != 200:
99 | return {"error": f"HTTP error {status}"}
100 |
101 | return data
102 |
103 |
104 | @server.call_tool()
105 | async def handle_call_tool(
106 | name: str, arguments: dict | None
107 | ) -> list[types.TextContent | types.ImageContent | types.EmbeddedResource]:
108 | """
109 | Handle tool execution requests.
110 | Tools can generate images and notify clients of changes.
111 | """
112 | if not arguments:
113 | return [
114 | types.TextContent(type="text", text="Missing arguments for the request")
115 | ]
116 |
117 | if name == "generate_image":
118 | prompt = arguments.get("prompt")
119 | model = arguments.get("model")
120 | width = arguments.get("width")
121 | height = arguments.get("height")
122 |
123 | if not prompt or not model:
124 | return [
125 | types.TextContent(type="text", text="Missing prompt or model parameter")
126 | ]
127 |
128 | async with httpx.AsyncClient() as client:
129 | response_data = await make_together_request(
130 | client=client,
131 | prompt=prompt,
132 | model=model, # User-provided model (or fallback will be used)
133 | width=width,
134 | height=height,
135 | )
136 |
137 | if "error" in response_data:
138 | return [types.TextContent(type="text", text=response_data["error"])]
139 |
140 | try:
141 | b64_image = response_data["data"][0]["b64_json"]
142 | return [
143 | types.ImageContent(
144 | type="image", data=b64_image, mimeType="image/jpeg"
145 | )
146 | ]
147 | except (KeyError, IndexError) as e:
148 | return [
149 | types.TextContent(
150 | type="text", text=f"Failed to parse API response: {e}"
151 | )
152 | ]
153 |
154 |
155 | async def main():
156 | async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
157 | await server.run(
158 | read_stream,
159 | write_stream,
160 | InitializationOptions(
161 | server_name="image-gen",
162 | server_version="0.1.0",
163 | capabilities=server.get_capabilities(
164 | notification_options=NotificationOptions(),
165 | experimental_capabilities={},
166 | ),
167 | ),
168 | )
169 |
170 |
171 | if __name__ == "__main__":
172 | asyncio.run(main())
173 |
```