# Directory Structure
```
├── Dockerfile
├── LICENSE
├── README.md
└── server.py
```
# Files
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | 🧠 Ask ChatGPT - MCP Server (Stdio)
2 |
3 | This is a Model Context Protocol (MCP) stdio server that forwards prompts to OpenAI’s ChatGPT (GPT-4o). It is designed to run inside LangGraph-based assistants and enables advanced summarization, analysis, and reasoning by accessing an external LLM.
4 |
5 | 📌 What It Does
6 |
7 | This server exposes a single tool:
8 |
9 | ```json
10 | {
11 | "name": "ask_chatgpt",
12 | "description": "Sends the provided text ('content') to an external ChatGPT (gpt-4o) model for advanced reasoning or summarization.",
13 | "parameters": {
14 | "type": "object",
15 | "properties": {
16 | "content": {
17 | "type": "string",
18 | "description": "The text to analyze, summarize, compare, or reason about."
19 | }
20 | },
21 | "required": ["content"]
22 | }
23 | }
24 | ```
25 |
26 | Use this when your assistant needs to:
27 |
28 | Summarize long documents
29 |
30 | Analyze configuration files
31 |
32 | Compare options
33 |
34 | Perform advanced natural language reasoning
35 |
36 | 🐳 Docker Usage
37 |
38 | Build and run the container:
39 |
40 | ```bash
41 |
42 | docker build -t ask-chatgpt-mcp .
43 |
44 | docker run -e OPENAI_API_KEY=your-openai-key -i ask-chatgpt-mcp
45 |
46 | ```
47 |
48 | 🧪 Manual Test
49 |
50 | Test the server locally using a one-shot request:
51 |
52 | ```bash
53 |
54 | echo '{"method":"tools/call","params":{"name":"ask_chatgpt","arguments":{"content":"Summarize this config..."}}}' | \
55 | OPENAI_API_KEY=your-openai-key python3 server.py --oneshot
56 |
57 | ```
58 |
59 | 🧩 LangGraph Integration
60 |
61 | To connect this MCP server to your LangGraph pipeline, configure it like this:
62 |
63 | ```python
64 |
65 | ("chatgpt-mcp", ["python3", "server.py", "--oneshot"], "tools/discover", "tools/call")
66 |
67 | ```
68 |
69 | ⚙️ MCP Server Config Example
70 |
71 | Here’s how to configure the server using an mcpServers JSON config:
72 |
73 | ```json
74 |
75 | {
76 | "mcpServers": {
77 | "chatgpt": {
78 | "command": "python3",
79 | "args": [
80 | "server.py",
81 | "--oneshot"
82 | ],
83 | "env": {
84 | "OPENAI_API_KEY": "<YOUR_OPENAI_API_KEY>"
85 | }
86 | }
87 | }
88 | }
89 |
90 | ```
91 |
92 | 🔍 Explanation
93 |
94 | "command": Runs the script with Python
95 |
96 | "args": Enables one-shot stdin/stdout mode
97 |
98 | "env": Injects your OpenAI key securely
99 |
100 | 🌍 Environment Setup
101 |
102 | Create a .env file (auto-loaded with python-dotenv) or export the key manually:
103 |
104 | ```env
105 |
106 | OPENAI_API_KEY=your-openai-key
107 |
108 | ```
109 |
110 | Or:
111 |
112 | ```bash
113 |
114 | export OPENAI_API_KEY=your-openai-key
115 |
116 | ```
117 |
118 | 📦 Dependencies
119 |
120 | Installed during the Docker build:
121 |
122 | openai
123 |
124 | requests
125 |
126 | python-dotenv
127 |
128 | 📁 Project Structure
129 |
130 | ```bash
131 | .
132 | ├── Dockerfile # Docker build for the MCP server
133 | ├── server.py # Main stdio server implementation
134 | └── README.md # You're reading it!
135 |
136 | ```
137 |
138 | 🔐 Security Notes
139 |
140 | Never commit .env files or API keys.
141 |
142 | Store secrets in secure environment variables or secret managers.
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | FROM python:3.11-slim
2 |
3 | WORKDIR /app
4 |
5 | RUN pip install requests
6 | RUN pip install python-dotenv
7 | RUN pip install openai
8 |
9 | COPY . .
10 |
11 | CMD ["python", "-u", "server.py"]
```
--------------------------------------------------------------------------------
/server.py:
--------------------------------------------------------------------------------
```python
1 | import os
2 | import json
3 | import time
4 | import logging
5 | import asyncio
6 | import sys
7 | import threading
8 | from typing import Dict, Any
9 | from openai import OpenAI
10 | from openai.types.chat import ChatCompletion
11 |
12 | # Setup logging
13 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
14 | logger = logging.getLogger("chatgpt_fastmcp")
15 |
16 | # Load environment variable
17 | OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
18 | if not OPENAI_API_KEY:
19 | raise ValueError("Missing OPENAI_API_KEY environment variable")
20 |
21 | # Initialize OpenAI Client
22 | client = OpenAI(api_key=OPENAI_API_KEY)
23 | logger.info("OpenAI client initialized")
24 |
25 | # ChatGPT Wrapper Class
26 | class ChatGPTClient:
27 | def __init__(self, model: str = "gpt-4o"):
28 | self.model = model
29 |
30 | async def ask(self, content: str) -> Dict[str, Any]:
31 | logger.info(f"Sending to ChatGPT: '{content}'")
32 | try:
33 | response: ChatCompletion = client.chat.completions.create(
34 | model=self.model,
35 | messages=[{"role": "user", "content": content}],
36 | )
37 | output = response.choices[0].message.content
38 | logger.info("Received response from ChatGPT")
39 | return {"output": output}
40 | except Exception as e:
41 | logger.error(f"OpenAI API error: {e}")
42 | return {"error": str(e)}
43 |
44 | # Initialize the client
45 | chatgpt = ChatGPTClient()
46 |
47 | # Output helper
48 | def send_response(response_data: Dict[str, Any]):
49 | response = json.dumps(response_data) + "\n"
50 | sys.stdout.write(response)
51 | sys.stdout.flush()
52 |
53 | # stdin monitor loop for server mode
54 | def monitor_stdin():
55 | while True:
56 | try:
57 | line = sys.stdin.readline().strip()
58 | if not line:
59 | time.sleep(0.1)
60 | continue
61 |
62 | try:
63 | data = json.loads(line)
64 | handle_request(data)
65 | except json.JSONDecodeError as e:
66 | logger.error(f"JSON decode error: {e}")
67 |
68 | except Exception as e:
69 | logger.error(f"Exception in monitor_stdin: {e}")
70 | time.sleep(0.1)
71 |
72 | # Request router
73 | def handle_request(data: Dict[str, Any]):
74 | if not isinstance(data, dict):
75 | send_response({"error": "Invalid request format"})
76 | return
77 |
78 | method = data.get("method")
79 | if method == "tools/call":
80 | tool_name = data.get("params", {}).get("name")
81 | arguments = data.get("params", {}).get("arguments", {})
82 | if tool_name == "ask_chatgpt":
83 | content = arguments.get("content", "")
84 | result = asyncio.run(chatgpt.ask(content))
85 | send_response({"result": result})
86 | else:
87 | send_response({"error": "tool not found"})
88 |
89 | elif method == "tools/discover":
90 | send_response({
91 | "result": [
92 | {
93 | "name": "ask_chatgpt",
94 | # --- MODIFIED DESCRIPTION ---
95 | "description": (
96 | "Sends the provided text ('content') to an external ChatGPT (gpt-4o) model "
97 | "for analysis, summarization, comparison, or generation tasks. Use this "
98 | "ONLY when specifically asked to get ChatGPT's perspective or perform complex "
99 | "analysis beyond the primary assistant's capabilities (e.g., detailed security review "
100 | "of a config, summarizing a large document). Use this tool to augment your own AI capabilities as access to another external Large Language Model."
101 | ),
102 | # --- END MODIFIED DESCRIPTION ---
103 | "parameters": {
104 | "type": "object",
105 | "properties": {
106 | "content": {
107 | "type": "string",
108 | # You could optionally add a description for the parameter too
109 | "description": "The text content (e.g., configuration, question, document snippet) to send to ChatGPT for processing."
110 | }
111 | },
112 | "required": ["content"]
113 | }
114 | }
115 | ]
116 | })
117 |
118 | else:
119 | send_response({"error": "unknown method"})
120 |
121 | # Entry point
122 | if __name__ == "__main__":
123 | logger.info("Starting Ask ChatGPT MCP Server")
124 |
125 | if "--oneshot" in sys.argv:
126 | try:
127 | line = sys.stdin.readline().strip()
128 | data = json.loads(line)
129 | handle_request(data)
130 | except Exception as e:
131 | logger.error(f"Oneshot error: {e}")
132 | send_response({"error": str(e)})
133 |
134 | else:
135 | stdin_thread = threading.Thread(target=monitor_stdin, daemon=True)
136 | stdin_thread.start()
137 | try:
138 | while True:
139 | time.sleep(1)
140 | except KeyboardInterrupt:
141 | logger.info("Shutting down")
142 |
```