# Directory Structure
```
├── Dockerfile
├── images
│ ├── cursor_config.png
│ ├── example.png
│ ├── logo_0.png
│ ├── logo_1.png
│ ├── logo_2.png
│ └── logo_3.png
├── LICENSE
├── proxy
│ ├── example.py
│ ├── jimeng
│ │ ├── __init__.py
│ │ ├── chat.py
│ │ ├── core.py
│ │ ├── exceptions.py
│ │ ├── images.py
│ │ ├── requirements.txt
│ │ └── utils.py
│ ├── README.md
│ └── setup.py
├── README.md
├── requirements.txt
├── server.py
└── smithery.yaml
```
# Files
--------------------------------------------------------------------------------
/proxy/jimeng/requirements.txt:
--------------------------------------------------------------------------------
```
1 | requests>=2.31.0
```
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
```
1 | aiohttp>=3.8.1
2 | aiofiles>=0.8.0
3 | requests>=2.26.0
4 | psutil>=5.8.0
5 | fastmcp==0.4.1
6 | mcp==1.2.1
7 | brotlipy>=0.7.0
8 | brotli==1.1.0
9 |
```
--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
2 |
3 | startCommand:
4 | type: stdio
5 | configSchema:
6 | # JSON Schema defining the configuration options for the MCP.
7 | {}
8 | commandFunction:
9 | # A JS function that produces the CLI command based on the given config to start the MCP on stdio.
10 | |-
11 | (config) => ({ command: 'uv', args: ['run', '--with', 'fastmcp', 'fastmcp', 'run', 'server.py'] })
12 | exampleConfig: {}
13 |
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
2 | FROM python:3.10-slim
3 |
4 | WORKDIR /app
5 |
6 | # Copy dependency list and install packages
7 | COPY requirements.txt ./
8 | RUN pip install --no-cache-dir -r requirements.txt \
9 | && pip install uv
10 |
11 | # Copy the rest of the application code
12 | COPY . .
13 |
14 | # Expose any needed ports (if applicable)
15 | EXPOSE 8000
16 |
17 | # Start the MCP server using uv and fastmcp
18 | CMD ["uv", "run", "--with", "fastmcp", "fastmcp", "run", "server.py"]
19 |
```
--------------------------------------------------------------------------------
/proxy/setup.py:
--------------------------------------------------------------------------------
```python
1 | #作者:凌封 (微信fengin)
2 | #GITHUB: https://github.com/fengin/image-gen-server.git
3 | #相关知识可以看AI全书:https://aibook.ren
4 |
5 | from setuptools import setup, find_packages
6 |
7 | setup(
8 | name="jimeng",
9 | version="0.0.1",
10 | packages=find_packages(),
11 | install_requires=[
12 | "requests>=2.31.0"
13 | ],
14 | author="Your Name",
15 | author_email="[email protected]",
16 | description="即梦AI Python模块",
17 | long_description=open("jimeng/README.md", encoding="utf-8").read(),
18 | long_description_content_type="text/markdown",
19 | url="https://github.com/yourusername/jimeng",
20 | classifiers=[
21 | "Programming Language :: Python :: 3",
22 | "License :: OSI Approved :: MIT License",
23 | "Operating System :: OS Independent",
24 | ],
25 | python_requires=">=3.7",
26 | )
```
--------------------------------------------------------------------------------
/proxy/jimeng/core.py:
--------------------------------------------------------------------------------
```python
1 | # 作者:凌封 (微信fengin)
2 | # GITHUB: https://github.com/fengin/image-gen-server.git
3 | # 相关知识可以看AI全书:https://aibook.ren
4 |
5 |
6 | """核心功能实现"""
7 |
8 | import json
9 | from typing import Any, Dict, Optional, Union
10 | import requests
11 | import logging
12 |
13 | from . import utils
14 | from .exceptions import API_REQUEST_FAILED, API_IMAGE_GENERATION_INSUFFICIENT_POINTS
15 |
16 | import gzip
17 | import brotli
18 | import json
19 | from io import BytesIO
20 |
21 | # 常量定义
22 | MODEL_NAME = "jimeng"
23 | DEFAULT_ASSISTANT_ID = "513695"
24 | VERSION_CODE = "5.8.0"
25 | PLATFORM_CODE = "7"
26 | DEVICE_ID = utils.generate_device_id()
27 | WEB_ID = utils.generate_web_id()
28 | USER_ID = utils.generate_uuid(False)
29 | MAX_RETRY_COUNT = 3
30 | RETRY_DELAY = 5000
31 | FILE_MAX_SIZE = 100 * 1024 * 1024
32 |
33 | # 请求头
34 | FAKE_HEADERS = {
35 | "Accept": "application/json, text/plain, */*",
36 | "Accept-Encoding": "gzip, deflate, br, zstd",
37 | "Accept-language": "zh-CN,zh;q=0.9",
38 | "Cache-control": "no-cache",
39 | "Last-event-id": "undefined",
40 | "Appid": DEFAULT_ASSISTANT_ID,
41 | "Appvr": VERSION_CODE,
42 | "Origin": "https://jimeng.jianying.com",
43 | "Pragma": "no-cache",
44 | "Priority": "u=1, i",
45 | "Referer": "https://jimeng.jianying.com",
46 | "Pf": PLATFORM_CODE,
47 | "Sec-Ch-Ua": '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
48 | "Sec-Ch-Ua-Mobile": "?0",
49 | "Sec-Ch-Ua-Platform": '"Windows"',
50 | "Sec-Fetch-Dest": "empty",
51 | "Sec-Fetch-Mode": "cors",
52 | "Sec-Fetch-Site": "same-origin",
53 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
54 | }
55 |
56 |
57 | def acquire_token(refresh_token: str) -> str:
58 | """获取访问token
59 |
60 | 目前jimeng的access_token是固定的,暂无刷新功能
61 |
62 | Args:
63 | refresh_token: 用于刷新access_token的refresh_token
64 |
65 | Returns:
66 | str: access_token
67 | """
68 | return refresh_token
69 |
70 |
71 | def generate_cookie(token: str) -> str:
72 | """生成Cookie
73 |
74 | Args:
75 | token: 访问token
76 |
77 | Returns:
78 | str: Cookie字符串
79 | """
80 | return f"sessionid={token}; sessionid_ss={token}; sid_tt={token}; uid_tt={token}; uid_tt_ss={token}"
81 |
82 |
83 | def check_result(response: requests.Response) -> Dict[str, Any]:
84 | """检查请求结果
85 |
86 | Args:
87 | response: 请求响应
88 |
89 | Returns:
90 | Dict: 响应数据
91 |
92 | Raises:
93 | API_IMAGE_GENERATION_INSUFFICIENT_POINTS: 积分不足
94 | API_REQUEST_FAILED: 请求失败
95 | """
96 | result = response.json()
97 | ret, errmsg, data = result.get('ret'), result.get('errmsg'), result.get('data')
98 |
99 | if not utils.is_finite(ret):
100 | return result
101 |
102 | if ret == '0':
103 | return data
104 |
105 | if ret == '5000':
106 | raise API_IMAGE_GENERATION_INSUFFICIENT_POINTS(f"即梦积分可能不足,{errmsg}")
107 |
108 | raise API_REQUEST_FAILED(f"请求jimeng失败: {errmsg}")
109 |
110 |
111 | def request(
112 | method: str,
113 | uri: str,
114 | refresh_token: str,
115 | params: Optional[Dict] = None,
116 | data: Optional[Dict] = None,
117 | headers: Optional[Dict] = None,
118 | **kwargs
119 | ) -> Dict[str, Any]:
120 | """请求即梦API
121 |
122 | Args:
123 | method: 请求方法
124 | uri: 请求路径
125 | refresh_token: 刷新token
126 | params: URL参数
127 | data: 请求数据
128 | headers: 请求头
129 | **kwargs: 其他参数
130 |
131 | Returns:
132 | Dict: 响应数据
133 | """
134 | token = acquire_token(refresh_token)
135 | device_time = utils.get_timestamp()
136 | sign = utils.md5(f"9e2c|{uri[-7:]}|{PLATFORM_CODE}|{VERSION_CODE}|{device_time}||11ac")
137 |
138 | _headers = {
139 | **FAKE_HEADERS,
140 | "Cookie": generate_cookie(token),
141 | "Device-Time": str(device_time),
142 | "Sign": sign,
143 | "Sign-Ver": "1"
144 | }
145 | if headers:
146 | _headers.update(headers)
147 |
148 | _params = {
149 | "aid": DEFAULT_ASSISTANT_ID,
150 | "device_platform": "web",
151 | "region": "CN",
152 | "web_id": WEB_ID
153 | }
154 | if params:
155 | _params.update(params)
156 |
157 | response = requests.request(
158 | method=method.lower(),
159 | url=f"https://jimeng.jianying.com{uri}",
160 | params=_params,
161 | json=data,
162 | headers=_headers,
163 | timeout=15,
164 | verify=True,
165 | **kwargs
166 | )
167 |
168 | # 检查响应
169 | try:
170 | logging.debug(f'请求uri:{uri},响应状态:{response.status_code}')
171 | # 检查Content-Encoding
172 | logging.debug(f'请求uri:{uri},响应状态:{response.status_code}')
173 | # 检查Content-Encoding并解压
174 | try:
175 | content = decompress_response(response)
176 | logging.debug(f'响应结果:{content}')
177 | except Exception as e:
178 | logging.debug(f'解压失败,使用原始响应: {str(e)}')
179 | content = response.text
180 | logging.debug(f'响应结果:{content}')
181 | # result = response.json()
182 | result = json.loads(content)
183 | except:
184 | raise API_REQUEST_FAILED("响应格式错误")
185 |
186 | ret = result.get('ret')
187 | if ret is None:
188 | return result
189 |
190 | if str(ret) == '0':
191 | return result.get('data', {})
192 |
193 | if str(ret) == '5000':
194 | raise API_IMAGE_GENERATION_INSUFFICIENT_POINTS(f"[无法生成图像]: 即梦积分可能不足,{result.get('errmsg')}")
195 |
196 | raise API_REQUEST_FAILED(f"[请求jimeng失败]: {result.get('errmsg')}")
197 |
198 |
199 | def decompress_response(response: requests.Response) -> str:
200 | """解压响应内容
201 |
202 | Args:
203 | response: 请求响应
204 |
205 | Returns:
206 | str: 解压后的内容
207 | """
208 | content = response.content
209 | encoding = response.headers.get('Content-Encoding', '').lower()
210 |
211 | if encoding == 'gzip':
212 | buffer = BytesIO(content)
213 | with gzip.GzipFile(fileobj=buffer) as f:
214 | content = f.read()
215 | elif encoding == 'br':
216 | content = brotli.decompress(content)
217 | # 如果之后需要支持其他压缩格式(如zstd),可以在这里添加
218 |
219 | return content.decode('utf-8')
```
--------------------------------------------------------------------------------
/proxy/jimeng/chat.py:
--------------------------------------------------------------------------------
```python
1 | #作者:凌封 (微信fengin)
2 | #GITHUB: https://github.com/fengin/image-gen-server.git
3 | #相关知识可以看AI全书:https://aibook.ren
4 |
5 |
6 | """对话补全相关功能"""
7 |
8 | import re
9 | import time
10 | from typing import Dict, List, Optional, Union, Generator
11 | import random
12 |
13 | from . import utils
14 | from .images import generate_images, DEFAULT_MODEL
15 | from .exceptions import API_REQUEST_PARAMS_INVALID
16 |
17 | MAX_RETRY_COUNT = 3
18 | RETRY_DELAY = 5000
19 |
20 | def parse_model(model: str) -> Dict[str, Union[str, int]]:
21 | """解析模型参数
22 |
23 | Args:
24 | model: 模型名称
25 |
26 | Returns:
27 | Dict: 模型信息
28 | """
29 | model_name, size = model.split(':') if ':' in model else (model, None)
30 | if not size:
31 | return {
32 | 'model': model_name,
33 | 'width': 1024,
34 | 'height': 1024
35 | }
36 |
37 | match = re.search(r'(\d+)[\W\w](\d+)', size)
38 | if not match:
39 | return {
40 | 'model': model_name,
41 | 'width': 1024,
42 | 'height': 1024
43 | }
44 |
45 | width, height = match.groups()
46 | return {
47 | 'model': model_name,
48 | 'width': int((int(width) + 1) // 2 * 2), # 确保是偶数
49 | 'height': int((int(height) + 1) // 2 * 2) # 确保是偶数
50 | }
51 |
52 | async def create_completion(
53 | messages: List[Dict[str, str]],
54 | refresh_token: str,
55 | model: str = DEFAULT_MODEL,
56 | retry_count: int = 0
57 | ) -> Dict:
58 | """同步对话补全
59 |
60 | Args:
61 | messages: 消息列表
62 | refresh_token: 刷新token
63 | model: 模型名称
64 | retry_count: 重试次数
65 |
66 | Returns:
67 | Dict: 补全结果
68 |
69 | Raises:
70 | API_REQUEST_PARAMS_INVALID: 参数无效
71 | """
72 | try:
73 | if not messages:
74 | raise API_REQUEST_PARAMS_INVALID("消息不能为空")
75 |
76 | # 解析模型参数
77 | model_info = parse_model(model)
78 |
79 | # 生成图像
80 | image_urls = generate_images(
81 | model=model_info['model'],
82 | prompt=messages[-1]['content'],
83 | width=model_info['width'],
84 | height=model_info['height'],
85 | refresh_token=refresh_token
86 | )
87 |
88 | # 构造返回结果
89 | return {
90 | 'id': utils.generate_uuid(),
91 | 'model': model or model_info['model'],
92 | 'object': 'chat.completion',
93 | 'choices': [{
94 | 'index': 0,
95 | 'message': {
96 | 'role': 'assistant',
97 | 'content': ''.join(f'\n' for i, url in enumerate(image_urls))
98 | },
99 | 'finish_reason': 'stop'
100 | }],
101 | 'usage': {
102 | 'prompt_tokens': 1,
103 | 'completion_tokens': 1,
104 | 'total_tokens': 2
105 | },
106 | 'created': utils.get_timestamp()
107 | }
108 | except Exception as e:
109 | if retry_count < MAX_RETRY_COUNT:
110 | print(f"Response error: {str(e)}")
111 | print(f"Try again after {RETRY_DELAY / 1000}s...")
112 | time.sleep(RETRY_DELAY / 1000)
113 | return await create_completion(messages, refresh_token, model, retry_count + 1)
114 | raise e
115 |
116 | async def create_completion_stream(
117 | messages: List[Dict[str, str]],
118 | refresh_token: str,
119 | model: str = DEFAULT_MODEL,
120 | retry_count: int = 0
121 | ) -> Generator[Dict, None, None]:
122 | """流式对话补全
123 |
124 | Args:
125 | messages: 消息列表
126 | refresh_token: 刷新token
127 | model: 模型名称
128 | retry_count: 重试次数
129 |
130 | Yields:
131 | Dict: 补全结果片段
132 | """
133 | try:
134 | if not messages:
135 | yield {
136 | 'id': utils.generate_uuid(),
137 | 'model': model,
138 | 'object': 'chat.completion.chunk',
139 | 'choices': [{
140 | 'index': 0,
141 | 'delta': {'role': 'assistant', 'content': '消息为空'},
142 | 'finish_reason': 'stop'
143 | }]
144 | }
145 | return
146 |
147 | # 解析模型参数
148 | model_info = parse_model(model)
149 |
150 | # 发送开始生成消息
151 | yield {
152 | 'id': utils.generate_uuid(),
153 | 'model': model or model_info['model'],
154 | 'object': 'chat.completion.chunk',
155 | 'choices': [{
156 | 'index': 0,
157 | 'delta': {'role': 'assistant', 'content': '🎨 图像生成中,请稍候...'},
158 | 'finish_reason': None
159 | }]
160 | }
161 |
162 | try:
163 | # 生成图像
164 | image_urls = generate_images(
165 | model=model_info['model'],
166 | prompt=messages[-1]['content'],
167 | width=model_info['width'],
168 | height=model_info['height'],
169 | refresh_token=refresh_token
170 | )
171 |
172 | # 发送图像URL
173 | for i, url in enumerate(image_urls):
174 | yield {
175 | 'id': utils.generate_uuid(),
176 | 'model': model or model_info['model'],
177 | 'object': 'chat.completion.chunk',
178 | 'choices': [{
179 | 'index': i + 1,
180 | 'delta': {
181 | 'role': 'assistant',
182 | 'content': f'\n'
183 | },
184 | 'finish_reason': None if i < len(image_urls) - 1 else 'stop'
185 | }]
186 | }
187 |
188 | # 发送完成消息
189 | yield {
190 | 'id': utils.generate_uuid(),
191 | 'model': model or model_info['model'],
192 | 'object': 'chat.completion.chunk',
193 | 'choices': [{
194 | 'index': len(image_urls) + 1,
195 | 'delta': {
196 | 'role': 'assistant',
197 | 'content': '图像生成完成!'
198 | },
199 | 'finish_reason': 'stop'
200 | }]
201 | }
202 |
203 | except Exception as e:
204 | # 发送错误消息
205 | yield {
206 | 'id': utils.generate_uuid(),
207 | 'model': model or model_info['model'],
208 | 'object': 'chat.completion.chunk',
209 | 'choices': [{
210 | 'index': 1,
211 | 'delta': {
212 | 'role': 'assistant',
213 | 'content': f'生成图片失败: {str(e)}'
214 | },
215 | 'finish_reason': 'stop'
216 | }]
217 | }
218 | except Exception as e:
219 | if retry_count < MAX_RETRY_COUNT:
220 | print(f"Response error: {str(e)}")
221 | print(f"Try again after {RETRY_DELAY / 1000}s...")
222 | time.sleep(RETRY_DELAY / 1000)
223 | async for chunk in create_completion_stream(messages, refresh_token, model, retry_count + 1):
224 | yield chunk
225 | return
226 | raise e
```