# Directory Structure
```
├── LICENSE
├── preview.png
├── pyproject.toml
├── README.md
└── src
└── mcp_server_openai
├── __init__.py
├── llm.py
├── server.py
└── test_openai.py
```
# Files
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # OpenAI MCP Server
2 |
3 | Query OpenAI models directly from Claude using MCP protocol.
4 |
5 | 
6 |
7 | ## Setup
8 |
9 | Add to `claude_desktop_config.json`:
10 |
11 | ```json
12 | {
13 | "mcpServers": {
14 | "openai-server": {
15 | "command": "python",
16 | "args": ["-m", "src.mcp_server_openai.server"],
17 | "env": {
18 | "PYTHONPATH": "C:/path/to/your/mcp-server-openai",
19 | "OPENAI_API_KEY": "your-key-here"
20 | }
21 | }
22 | }
23 | }
24 | ```
25 |
26 | ## Development
27 | ```bash
28 | git clone https://github.com/pierrebrunelle/mcp-server-openai
29 | cd mcp-server-openai
30 | pip install -e .
31 | ```
32 |
33 | ## Testing
34 | ```python
35 | # Run tests from project root
36 | pytest -v test_openai.py -s
37 |
38 | # Sample test output:
39 | Testing OpenAI API call...
40 | OpenAI Response: Hello! I'm doing well, thank you for asking...
41 | PASSED
42 | ```
43 |
44 | ## License
45 | MIT License
46 |
```
--------------------------------------------------------------------------------
/src/mcp_server_openai/__init__.py:
--------------------------------------------------------------------------------
```python
1 | from .server import main, serve
2 | from .llm import LLMConnector
3 |
4 | __version__ = "0.1.0"
```
--------------------------------------------------------------------------------
/src/mcp_server_openai/test_openai.py:
--------------------------------------------------------------------------------
```python
1 | import pytest
2 | from .llm import LLMConnector
3 |
4 | @pytest.mark.asyncio
5 | async def test_ask_openai():
6 | print("\nTesting OpenAI API call...")
7 | connector = LLMConnector("your-openai-key")
8 | response = await connector.ask_openai("Hello, how are you?")
9 | print(f"OpenAI Response: {response}")
10 | assert isinstance(response, str)
11 | assert len(response) > 0
```
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
```toml
1 | [project]
2 | name = "mcp-server-openai"
3 | version = "0.1.0"
4 | description = "MCP server for OpenAI API integration"
5 | requires-python = ">=3.10"
6 | dependencies = [
7 | "mcp>=0.9.1",
8 | "openai>=1.0.0",
9 | "click>=8.0.0",
10 | "pytest-asyncio"
11 | ]
12 |
13 | [build-system]
14 | requires = ["hatchling"]
15 | build-backend = "hatchling.build"
16 |
17 | [project.scripts]
18 | mcp-server-openai = "mcp_server_openai.server:main"
```
--------------------------------------------------------------------------------
/src/mcp_server_openai/llm.py:
--------------------------------------------------------------------------------
```python
1 | import logging
2 | from openai import AsyncOpenAI
3 |
4 | logger = logging.getLogger(__name__)
5 |
6 | class LLMConnector:
7 | def __init__(self, openai_api_key: str):
8 | self.client = AsyncOpenAI(api_key=openai_api_key)
9 |
10 | async def ask_openai(self, query: str, model: str = "gpt-4", temperature: float = 0.7, max_tokens: int = 500) -> str:
11 | try:
12 | response = await self.client.chat.completions.create(
13 | messages=[
14 | {"role": "system", "content": "You are a helpful assistant."},
15 | {"role": "user", "content": query}
16 | ],
17 | model=model,
18 | temperature=temperature,
19 | max_tokens=max_tokens
20 | )
21 | return response.choices[0].message.content
22 | except Exception as e:
23 | logger.error(f"Failed to query OpenAI: {str(e)}")
24 | raise
```
--------------------------------------------------------------------------------
/src/mcp_server_openai/server.py:
--------------------------------------------------------------------------------
```python
1 | import asyncio
2 | import logging
3 | import sys
4 | from typing import Optional
5 |
6 | import click
7 | import mcp
8 | import mcp.types as types
9 | from mcp.server import Server, NotificationOptions
10 | from mcp.server.models import InitializationOptions
11 |
12 | from .llm import LLMConnector
13 |
14 | logging.basicConfig(level=logging.DEBUG)
15 | logger = logging.getLogger(__name__)
16 |
17 | def serve(openai_api_key: str) -> Server:
18 | server = Server("openai-server")
19 | connector = LLMConnector(openai_api_key)
20 |
21 | @server.list_tools()
22 | async def handle_list_tools() -> list[types.Tool]:
23 | return [
24 | types.Tool(
25 | name="ask-openai",
26 | description="Ask my assistant models a direct question",
27 | inputSchema={
28 | "type": "object",
29 | "properties": {
30 | "query": {"type": "string", "description": "Ask assistant"},
31 | "model": {"type": "string", "default": "gpt-4", "enum": ["gpt-4", "gpt-3.5-turbo"]},
32 | "temperature": {"type": "number", "default": 0.7, "minimum": 0, "maximum": 2},
33 | "max_tokens": {"type": "integer", "default": 500, "minimum": 1, "maximum": 4000}
34 | },
35 | "required": ["query"]
36 | }
37 | )
38 | ]
39 |
40 | @server.call_tool()
41 | async def handle_tool_call(name: str, arguments: dict | None) -> list[types.TextContent]:
42 | try:
43 | if not arguments:
44 | raise ValueError("No arguments provided")
45 |
46 | if name == "ask-openai":
47 | response = await connector.ask_openai(
48 | query=arguments["query"],
49 | model=arguments.get("model", "gpt-4"),
50 | temperature=arguments.get("temperature", 0.7),
51 | max_tokens=arguments.get("max_tokens", 500)
52 | )
53 | return [types.TextContent(type="text", text=f"OpenAI Response:\n{response}")]
54 |
55 | raise ValueError(f"Unknown tool: {name}")
56 | except Exception as e:
57 | logger.error(f"Tool call failed: {str(e)}")
58 | return [types.TextContent(type="text", text=f"Error: {str(e)}")]
59 |
60 | return server
61 |
62 | @click.command()
63 | @click.option("--openai-api-key", envvar="OPENAI_API_KEY", required=True)
64 | def main(openai_api_key: str):
65 | try:
66 | async def _run():
67 | async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
68 | server = serve(openai_api_key)
69 | await server.run(
70 | read_stream, write_stream,
71 | InitializationOptions(
72 | server_name="openai-server",
73 | server_version="0.1.0",
74 | capabilities=server.get_capabilities(
75 | notification_options=NotificationOptions(),
76 | experimental_capabilities={}
77 | )
78 | )
79 | )
80 | asyncio.run(_run())
81 | except KeyboardInterrupt:
82 | logger.info("Server stopped by user")
83 | except Exception as e:
84 | logger.exception("Server failed")
85 | sys.exit(1)
86 |
87 | if __name__ == "__main__":
88 | main()
```