# Directory Structure
```
├── .gitignore
├── .idea
│ ├── .gitignore
│ ├── git_toolbox_blame.xml
│ ├── modules.xml
│ └── server-vercel-ai.iml
├── package.json
├── README.md
├── src
│ ├── index.ts
│ └── types.ts
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
```
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/.pnp
.pnp.js
# testing
/coverage
#cache
.turbo
# misc
.DS_Store
*.pem
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
# local env files
.env*
out/
build/
dist/
# plasmo - https://www.plasmo.com
.plasmo
# bpp - http://bpp.browser.market/
keys.json
# typescript
.tsbuildinfo
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
# AI MCP Server
An MCP server implementation supporting OpenAI and Mistral using the Vercel AI SDK.
## Supported Providers
### OpenAI
- Models: gpt-4-turbo, gpt-4, gpt-3.5-turbo
- Features: Structured output, system prompts
### Mistral
- Models: mistral-large-latest, mistral-small-latest, pixtral-large-latest
- Features: Safe prompts, system prompts
## Prerequisites
- Node.js 18 or higher
- OpenAI API key (for OpenAI features)
- Mistral API key (for Mistral features)
## Installation
1. Clone the repository:
```bash
git clone https://github.com/yourusername/openai-mcp-server.git
cd openai-mcp-server
```
--------------------------------------------------------------------------------
/.idea/git_toolbox_blame.xml:
--------------------------------------------------------------------------------
```
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="GitToolBoxBlameSettings">
<option name="version" value="2" />
</component>
</project>
```
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
```
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectModuleManager">
<modules>
<module fileurl="file://$PROJECT_DIR$/.idea/server-vercel-ai.iml" filepath="$PROJECT_DIR$/.idea/server-vercel-ai.iml" />
</modules>
</component>
</project>
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
{
"compilerOptions": {
"target": "ES2020",
"module": "ES2020",
"moduleResolution": "node",
"esModuleInterop": true,
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
{
"name": "openai-mcp-server",
"version": "1.0.0",
"description": "MCP server for OpenAI integration using Vercel AI SDK",
"type": "module",
"main": "dist/index.js",
"scripts": {
"build": "tsc",
"start": "node dist/index.js",
"dev": "tsc-watch --onSuccess \"node dist/index.js\"",
"lint": "eslint src/**/*.ts",
"format": "prettier --write \"src/**/*.ts\""
},
"dependencies": {
"@ai-sdk/openai": "^1.0.6",
"@ai-sdk/mistral": "^1.0.4",
"@modelcontextprotocol/sdk": "^1.0.3",
"ai": "^4.0.11",
"dotenv": "^16.4.7",
"zod": "^3.23.8"
},
"devDependencies": {
"@types/node": "^22.10.1",
"@typescript-eslint/eslint-plugin": "^8.17.0",
"@typescript-eslint/parser": "^8.17.0",
"eslint": "^9.16.0",
"prettier": "^3.4.2",
"tsc-watch": "^6.0.4",
"typescript": "^5.7.2"
},
"engines": {
"node": ">=18.0.0"
}
}
```
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from 'zod';
// Model enums
export const OpenAIModels = z.enum(['gpt-4o', 'gpt-4', 'gpt-4-turbo']);
export const MistralModels = z.enum([
'pixtral-large-latest',
'mistral-large-latest',
'mistral-small-latest',
'pixtral-12b-2409'
]);
// Input types for tools
export const GenerateTextInput = z.object({
provider: z.enum(["openai", "mistral"]),
// provider: z.enum(["openai", "mistral", "mistral-agent"]),
model: z.string(),
agentId: z.string().optional(),
prompt: z.string(),
system: z.string().optional(),
safePrompt: z.boolean().optional() // Mistral-specific option
}).refine((data): boolean => {
// If it's a mistral-agent, require agentId
// if (data.provider === 'mistral-agent') {
// return !!data.agentId;
// }
// For other providers (openai, mistral), require model
return !!data.model;
}, {
message: "agentId is required for mistral-agent, model is required for other providers"
});
export const GenerateStructuredInput = z.object({
provider: z.enum(['openai', 'mistral']),
model: z.union([OpenAIModels, MistralModels]),
prompt: z.string(),
schema: z.object({
type: z.literal('object'),
properties: z.record(z.any()),
required: z.array(z.string()).optional()
})
});
// Response types
export interface OpenAIResponse {
text: string;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
export interface StructuredResponse<T = any> {
object: T;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
// Error types
export interface OpenAIError {
message: string;
type: string;
code?: string;
param?: string;
}
export type GenerateTextInputType = z.infer<typeof GenerateTextInput>;
```
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
```typescript
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
} from "@modelcontextprotocol/sdk/types.js";
import { openai } from '@ai-sdk/openai';
import { mistral } from '@ai-sdk/mistral';
import { generateText } from 'ai';
import dotenv from 'dotenv';
import {GenerateTextInput, GenerateStructuredInput, OpenAIModels} from './types.js';
dotenv.config();
const server = new Server(
{
name: "ai-mcp-server",
version: "1.0.0",
},
{
capabilities: {
tools: {},
},
}
);
server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: "generate_text",
description: "Generate text using OpenAI or Mistral models",
inputSchema: {
type: "object",
properties: {
provider: {
type: "string",
enum: ["openai", "mistral"],
description: "The AI provider to use"
},
model: {
type: "string",
description: "The model to use. For OpenAI: gpt-4-turbo, gpt-4, gpt-3.5-turbo. For Mistral: mistral-large-latest, mistral-small-latest, etc."
},
prompt: {
type: "string",
description: "The prompt to send to the model"
},
system: {
type: "string",
description: "Optional system prompt (supported by both providers)",
optional: true
},
safePrompt: {
type: "boolean",
description: "Mistral-specific: Whether to inject a safety prompt",
optional: true
}
},
required: ["provider", "model", "prompt"]
}
},
{
name: "generate_structured",
description: "Generate structured data using AI models",
inputSchema: {
type: "object",
properties: {
provider: {
type: "string",
enum: ["openai", "mistral"],
description: "The AI provider to use"
},
model: {
type: "string",
description: "The model to use"
},
prompt: {
type: "string",
description: "Description of what to generate"
},
schema: {
type: "object",
description: "The schema for the structured output"
}
},
required: ["provider", "model", "prompt", "schema"]
}
}
]
};
});
// First define the provider handlers
const providers = {
'openai': openai,
// 'anthropic': anthropic,
'mistral': mistral,
};
server.setRequestHandler(CallToolRequestSchema, async (request) => {
try {
switch (request.params.name) {
case 'generate_text': {
const input = GenerateTextInput.parse(request.params.arguments);
// Get the provider function
const providerFunction = providers[input.provider];
if (!providerFunction) {
throw new Error(`Provider ${input.provider} is not supported. Supported providers: ${Object.keys(providers).join(', ')}`);
}
if (!providerFunction) {
throw new Error(`Provider ${input.provider} is not supported`);
}
const model = providerFunction(input.model, {});
const result = await generateText({
model,
prompt: input.prompt,
...(input.system != null ? {system: input.system} : {})
});
return {
content: [{
type: "text",
text: result.text
}]
};
}
default:
throw new Error("Unknown tool");
}
} catch (error) {
console.error('Detailed error:', error); // Add detailed error logging
return {
content: [{
type: "text",
text: `Error: ${error instanceof Error ? error.message : String(error)}`
}],
isError: true
};
}
});
// Start the server
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);
console.error("OpenAI MCP Server running on stdio");
}
main().catch((error) => {
console.error("Server error:", error);
process.exit(1);
});
```