#
tokens: 1513/50000 6/6 files
lines: off (toggle) GitHub
raw markdown copy
# Directory Structure

```
├── LICENSE
├── preview.png
├── pyproject.toml
├── README.md
└── src
    └── mcp_server_openai
        ├── __init__.py
        ├── llm.py
        ├── server.py
        └── test_openai.py
```

# Files

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
# OpenAI MCP Server

Query OpenAI models directly from Claude using MCP protocol.

![preview](preview.png)

## Setup

Add to `claude_desktop_config.json`:

```json
{
  "mcpServers": {
    "openai-server": {
      "command": "python",
      "args": ["-m", "src.mcp_server_openai.server"],
      "env": {
        "PYTHONPATH": "C:/path/to/your/mcp-server-openai",
        "OPENAI_API_KEY": "your-key-here"
      }
    }
  }
}
```

## Development
```bash
git clone https://github.com/pierrebrunelle/mcp-server-openai
cd mcp-server-openai
pip install -e .
```

## Testing
```python
# Run tests from project root
pytest -v test_openai.py -s

# Sample test output:
Testing OpenAI API call...
OpenAI Response: Hello! I'm doing well, thank you for asking...
PASSED
```

## License
MIT License

```

--------------------------------------------------------------------------------
/src/mcp_server_openai/__init__.py:
--------------------------------------------------------------------------------

```python
from .server import main, serve
from .llm import LLMConnector

__version__ = "0.1.0"
```

--------------------------------------------------------------------------------
/src/mcp_server_openai/test_openai.py:
--------------------------------------------------------------------------------

```python
import pytest
from .llm import LLMConnector

@pytest.mark.asyncio
async def test_ask_openai():
    print("\nTesting OpenAI API call...")
    connector = LLMConnector("your-openai-key")
    response = await connector.ask_openai("Hello, how are you?")
    print(f"OpenAI Response: {response}")
    assert isinstance(response, str)
    assert len(response) > 0
```

--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------

```toml
[project]
name = "mcp-server-openai"
version = "0.1.0"
description = "MCP server for OpenAI API integration"
requires-python = ">=3.10"
dependencies = [
    "mcp>=0.9.1",
    "openai>=1.0.0",
    "click>=8.0.0",
    "pytest-asyncio"
]

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

[project.scripts]
mcp-server-openai = "mcp_server_openai.server:main"
```

--------------------------------------------------------------------------------
/src/mcp_server_openai/llm.py:
--------------------------------------------------------------------------------

```python
import logging
from openai import AsyncOpenAI

logger = logging.getLogger(__name__)

class LLMConnector:
    def __init__(self, openai_api_key: str):
        self.client = AsyncOpenAI(api_key=openai_api_key)

    async def ask_openai(self, query: str, model: str = "gpt-4", temperature: float = 0.7, max_tokens: int = 500) -> str:
        try:
            response = await self.client.chat.completions.create(
                messages=[
                    {"role": "system", "content": "You are a helpful assistant."},
                    {"role": "user", "content": query}
                ],
                model=model,
                temperature=temperature,
                max_tokens=max_tokens
            )
            return response.choices[0].message.content
        except Exception as e:
            logger.error(f"Failed to query OpenAI: {str(e)}")
            raise
```

--------------------------------------------------------------------------------
/src/mcp_server_openai/server.py:
--------------------------------------------------------------------------------

```python
import asyncio
import logging
import sys
from typing import Optional

import click
import mcp
import mcp.types as types
from mcp.server import Server, NotificationOptions
from mcp.server.models import InitializationOptions

from .llm import LLMConnector

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)

def serve(openai_api_key: str) -> Server:
    server = Server("openai-server")
    connector = LLMConnector(openai_api_key)

    @server.list_tools()
    async def handle_list_tools() -> list[types.Tool]:
        return [
            types.Tool(
                name="ask-openai",
                description="Ask my assistant models a direct question",
                inputSchema={
                    "type": "object",
                    "properties": {
                        "query": {"type": "string", "description": "Ask assistant"},
                        "model": {"type": "string", "default": "gpt-4", "enum": ["gpt-4", "gpt-3.5-turbo"]},
                        "temperature": {"type": "number", "default": 0.7, "minimum": 0, "maximum": 2},
                        "max_tokens": {"type": "integer", "default": 500, "minimum": 1, "maximum": 4000}
                    },
                    "required": ["query"]
                }
            )
        ]

    @server.call_tool()
    async def handle_tool_call(name: str, arguments: dict | None) -> list[types.TextContent]:
        try:
            if not arguments:
                raise ValueError("No arguments provided")

            if name == "ask-openai":
                response = await connector.ask_openai(
                    query=arguments["query"],
                    model=arguments.get("model", "gpt-4"),
                    temperature=arguments.get("temperature", 0.7),
                    max_tokens=arguments.get("max_tokens", 500)
                )
                return [types.TextContent(type="text", text=f"OpenAI Response:\n{response}")]

            raise ValueError(f"Unknown tool: {name}")
        except Exception as e:
            logger.error(f"Tool call failed: {str(e)}")
            return [types.TextContent(type="text", text=f"Error: {str(e)}")]

    return server

@click.command()
@click.option("--openai-api-key", envvar="OPENAI_API_KEY", required=True)
def main(openai_api_key: str):
    try:
        async def _run():
            async with mcp.server.stdio.stdio_server() as (read_stream, write_stream):
                server = serve(openai_api_key)
                await server.run(
                    read_stream, write_stream,
                    InitializationOptions(
                        server_name="openai-server",
                        server_version="0.1.0",
                        capabilities=server.get_capabilities(
                            notification_options=NotificationOptions(),
                            experimental_capabilities={}
                        )
                    )
                )
        asyncio.run(_run())
    except KeyboardInterrupt:
        logger.info("Server stopped by user")
    except Exception as e:
        logger.exception("Server failed")
        sys.exit(1)

if __name__ == "__main__":
    main()
```