# Directory Structure
```
├── .gitignore
├── .idea
│   ├── .gitignore
│   ├── git_toolbox_blame.xml
│   ├── modules.xml
│   └── server-vercel-ai.iml
├── package.json
├── README.md
├── src
│   ├── index.ts
│   └── types.ts
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 | 
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
 1 | 
 2 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
 3 | 
 4 | # dependencies
 5 | /node_modules
 6 | /.pnp
 7 | .pnp.js
 8 | 
 9 | # testing
10 | /coverage
11 | 
12 | #cache
13 | .turbo
14 | 
15 | # misc
16 | .DS_Store
17 | *.pem
18 | 
19 | # debug
20 | npm-debug.log*
21 | yarn-debug.log*
22 | yarn-error.log*
23 | .pnpm-debug.log*
24 | 
25 | # local env files
26 | .env*
27 | 
28 | out/
29 | build/
30 | dist/
31 | 
32 | # plasmo - https://www.plasmo.com
33 | .plasmo
34 | 
35 | # bpp - http://bpp.browser.market/
36 | keys.json
37 | 
38 | # typescript
39 | .tsbuildinfo
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
 1 | # AI MCP Server
 2 | 
 3 | An MCP server implementation supporting OpenAI and Mistral using the Vercel AI SDK.
 4 | 
 5 | ## Supported Providers
 6 | 
 7 | ### OpenAI
 8 | - Models: gpt-4-turbo, gpt-4, gpt-3.5-turbo
 9 | - Features: Structured output, system prompts
10 | 
11 | ### Mistral
12 | - Models: mistral-large-latest, mistral-small-latest, pixtral-large-latest
13 | - Features: Safe prompts, system prompts
14 | 
15 | ## Prerequisites
16 | 
17 | - Node.js 18 or higher
18 | - OpenAI API key (for OpenAI features)
19 | - Mistral API key (for Mistral features)
20 | 
21 | ## Installation
22 | 
23 | 1. Clone the repository:
24 | ```bash
25 | git clone https://github.com/yourusername/openai-mcp-server.git
26 | cd openai-mcp-server
```
--------------------------------------------------------------------------------
/.idea/git_toolbox_blame.xml:
--------------------------------------------------------------------------------
```
1 | <?xml version="1.0" encoding="UTF-8"?>
2 | <project version="4">
3 |   <component name="GitToolBoxBlameSettings">
4 |     <option name="version" value="2" />
5 |   </component>
6 | </project>
```
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
```
1 | <?xml version="1.0" encoding="UTF-8"?>
2 | <project version="4">
3 |   <component name="ProjectModuleManager">
4 |     <modules>
5 |       <module fileurl="file://$PROJECT_DIR$/.idea/server-vercel-ai.iml" filepath="$PROJECT_DIR$/.idea/server-vercel-ai.iml" />
6 |     </modules>
7 |   </component>
8 | </project>
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
 1 | {
 2 |   "compilerOptions": {
 3 | 	"target": "ES2020",
 4 | 	"module": "ES2020",
 5 | 	"moduleResolution": "node",
 6 | 	"esModuleInterop": true,
 7 | 	"outDir": "./dist",
 8 | 	"rootDir": "./src",
 9 | 	"strict": true,
10 | 	"skipLibCheck": true,
11 | 	"forceConsistentCasingInFileNames": true,
12 | 	"resolveJsonModule": true
13 |   },
14 |   "include": ["src/**/*"],
15 |   "exclude": ["node_modules", "dist"]
16 | }
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
 1 | {
 2 |   "name": "openai-mcp-server",
 3 |   "version": "1.0.0",
 4 |   "description": "MCP server for OpenAI integration using Vercel AI SDK",
 5 |   "type": "module",
 6 |   "main": "dist/index.js",
 7 |   "scripts": {
 8 | 	"build": "tsc",
 9 | 	"start": "node dist/index.js",
10 | 	"dev": "tsc-watch --onSuccess \"node dist/index.js\"",
11 | 	"lint": "eslint src/**/*.ts",
12 | 	"format": "prettier --write \"src/**/*.ts\""
13 |   },
14 |   "dependencies": {
15 | 	"@ai-sdk/openai": "^1.0.6",
16 | 	"@ai-sdk/mistral": "^1.0.4",
17 | 	"@modelcontextprotocol/sdk": "^1.0.3",
18 | 	"ai": "^4.0.11",
19 | 	"dotenv": "^16.4.7",
20 | 	"zod": "^3.23.8"
21 |   },
22 |   "devDependencies": {
23 | 	"@types/node": "^22.10.1",
24 | 	"@typescript-eslint/eslint-plugin": "^8.17.0",
25 | 	"@typescript-eslint/parser": "^8.17.0",
26 | 	"eslint": "^9.16.0",
27 | 	"prettier": "^3.4.2",
28 | 	"tsc-watch": "^6.0.4",
29 | 	"typescript": "^5.7.2"
30 |   },
31 |   "engines": {
32 | 	"node": ">=18.0.0"
33 |   }
34 | }
```
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
```typescript
 1 | import { z } from 'zod';
 2 | 
 3 | // Model enums
 4 | export const OpenAIModels = z.enum(['gpt-4o', 'gpt-4', 'gpt-4-turbo']);
 5 | export const MistralModels = z.enum([
 6 |     'pixtral-large-latest',
 7 |     'mistral-large-latest',
 8 |     'mistral-small-latest',
 9 |     'pixtral-12b-2409'
10 | ]);
11 | 
12 | // Input types for tools
13 | export const GenerateTextInput = z.object({
14 |     provider: z.enum(["openai", "mistral"]),
15 |     // provider: z.enum(["openai", "mistral", "mistral-agent"]),
16 |     model: z.string(),
17 |     agentId: z.string().optional(),
18 |     prompt: z.string(),
19 |     system: z.string().optional(),
20 |     safePrompt: z.boolean().optional() // Mistral-specific option
21 | }).refine((data): boolean => {
22 |     // If it's a mistral-agent, require agentId
23 |     // if (data.provider === 'mistral-agent') {
24 |     //     return !!data.agentId;
25 |     // }
26 |     // For other providers (openai, mistral), require model
27 |     return !!data.model;
28 | }, {
29 |     message: "agentId is required for mistral-agent, model is required for other providers"
30 | });
31 | 
32 | export const GenerateStructuredInput = z.object({
33 |     provider: z.enum(['openai', 'mistral']),
34 |     model: z.union([OpenAIModels, MistralModels]),
35 |     prompt: z.string(),
36 |     schema: z.object({
37 |         type: z.literal('object'),
38 |         properties: z.record(z.any()),
39 |         required: z.array(z.string()).optional()
40 |     })
41 | });
42 | 
43 | // Response types
44 | export interface OpenAIResponse {
45 |     text: string;
46 |     usage?: {
47 |         promptTokens: number;
48 |         completionTokens: number;
49 |         totalTokens: number;
50 |     };
51 | }
52 | 
53 | export interface StructuredResponse<T = any> {
54 |     object: T;
55 |     usage?: {
56 |         promptTokens: number;
57 |         completionTokens: number;
58 |         totalTokens: number;
59 |     };
60 | }
61 | 
62 | // Error types
63 | export interface OpenAIError {
64 |     message: string;
65 |     type: string;
66 |     code?: string;
67 |     param?: string;
68 | }
69 | 
70 | 
71 | export type GenerateTextInputType = z.infer<typeof GenerateTextInput>;
```
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
```typescript
  1 | import { Server } from "@modelcontextprotocol/sdk/server/index.js";
  2 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
  3 | import {
  4 |     CallToolRequestSchema,
  5 |     ListToolsRequestSchema,
  6 | } from "@modelcontextprotocol/sdk/types.js";
  7 | import { openai } from '@ai-sdk/openai';
  8 | import { mistral } from '@ai-sdk/mistral';
  9 | import { generateText } from 'ai';
 10 | import dotenv from 'dotenv';
 11 | import {GenerateTextInput, GenerateStructuredInput, OpenAIModels} from './types.js';
 12 | 
 13 | dotenv.config();
 14 | 
 15 | const server = new Server(
 16 |     {
 17 |         name: "ai-mcp-server",
 18 |         version: "1.0.0",
 19 |     },
 20 |     {
 21 |         capabilities: {
 22 |             tools: {},
 23 |         },
 24 |     }
 25 | );
 26 | 
 27 | server.setRequestHandler(ListToolsRequestSchema, async () => {
 28 |     return {
 29 |         tools: [
 30 |             {
 31 |                 name: "generate_text",
 32 |                 description: "Generate text using OpenAI or Mistral models",
 33 |                 inputSchema: {
 34 |                     type: "object",
 35 |                     properties: {
 36 |                         provider: {
 37 |                             type: "string",
 38 |                             enum: ["openai", "mistral"],
 39 |                             description: "The AI provider to use"
 40 |                         },
 41 |                         model: {
 42 |                             type: "string",
 43 |                             description: "The model to use. For OpenAI: gpt-4-turbo, gpt-4, gpt-3.5-turbo. For Mistral: mistral-large-latest, mistral-small-latest, etc."
 44 |                         },
 45 |                         prompt: {
 46 |                             type: "string",
 47 |                             description: "The prompt to send to the model"
 48 |                         },
 49 |                         system: {
 50 |                             type: "string",
 51 |                             description: "Optional system prompt (supported by both providers)",
 52 |                             optional: true
 53 |                         },
 54 |                         safePrompt: {
 55 |                             type: "boolean",
 56 |                             description: "Mistral-specific: Whether to inject a safety prompt",
 57 |                             optional: true
 58 |                         }
 59 |                     },
 60 |                     required: ["provider", "model", "prompt"]
 61 |                 }
 62 |             },
 63 |             {
 64 |                 name: "generate_structured",
 65 |                 description: "Generate structured data using AI models",
 66 |                 inputSchema: {
 67 |                     type: "object",
 68 |                     properties: {
 69 |                         provider: {
 70 |                             type: "string",
 71 |                             enum: ["openai", "mistral"],
 72 |                             description: "The AI provider to use"
 73 |                         },
 74 |                         model: {
 75 |                             type: "string",
 76 |                             description: "The model to use"
 77 |                         },
 78 |                         prompt: {
 79 |                             type: "string",
 80 |                             description: "Description of what to generate"
 81 |                         },
 82 |                         schema: {
 83 |                             type: "object",
 84 |                             description: "The schema for the structured output"
 85 |                         }
 86 |                     },
 87 |                     required: ["provider", "model", "prompt", "schema"]
 88 |                 }
 89 |             }
 90 |         ]
 91 |     };
 92 | });
 93 | // First define the provider handlers
 94 | const providers = {
 95 |     'openai': openai,
 96 |     // 'anthropic': anthropic,
 97 |     'mistral': mistral,
 98 | };
 99 | 
100 | server.setRequestHandler(CallToolRequestSchema, async (request) => {
101 |     try {
102 |         switch (request.params.name) {
103 |             case 'generate_text': {
104 |                 const input = GenerateTextInput.parse(request.params.arguments);
105 | 
106 |                 // Get the provider function
107 |                 const providerFunction = providers[input.provider];
108 |                 if (!providerFunction) {
109 |                     throw new Error(`Provider ${input.provider} is not supported. Supported providers: ${Object.keys(providers).join(', ')}`);
110 |                 }
111 | 
112 |                 if (!providerFunction) {
113 |                     throw new Error(`Provider ${input.provider} is not supported`);
114 |                 }
115 | 
116 |                 const model = providerFunction(input.model, {});
117 | 
118 |                 const result = await generateText({
119 |                     model,
120 |                     prompt: input.prompt,
121 |                     ...(input.system != null ? {system: input.system} : {})
122 |                 });
123 | 
124 |                 return {
125 |                     content: [{
126 |                         type: "text",
127 |                         text: result.text
128 |                     }]
129 |                 };
130 |             }
131 | 
132 |             default:
133 |                 throw new Error("Unknown tool");
134 |         }
135 |     } catch (error) {
136 |         console.error('Detailed error:', error); // Add detailed error logging
137 |         return {
138 |             content: [{
139 |                 type: "text",
140 |                 text: `Error: ${error instanceof Error ? error.message : String(error)}`
141 |             }],
142 |             isError: true
143 |         };
144 |     }
145 | });
146 | 
147 | // Start the server
148 | async function main() {
149 |     const transport = new StdioServerTransport();
150 |     await server.connect(transport);
151 |     console.error("OpenAI MCP Server running on stdio");
152 | }
153 | 
154 | main().catch((error) => {
155 |     console.error("Server error:", error);
156 |     process.exit(1);
157 | });
```