# Directory Structure
```
├── .env.example
├── .gitattributes
├── .gitignore
├── config
│ ├── models.yaml
│ └── system_instructions.yaml
├── config.example.ts
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── src
│ ├── index.ts
│ ├── providers
│ │ └── openrouter.ts
│ ├── stores
│ │ ├── FileSystemStore.ts
│ │ └── Store.ts
│ └── types
│ ├── conversation.ts
│ ├── errors.ts
│ └── server.ts
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
```
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
```
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
1 | # OpenAI Configuration
2 | OPENAI_API_KEY=your_openai_key_here
3 |
4 | # DeepSeek Configuration
5 | DEEPSEEK_API_KEY=your_api_key_here
6 |
7 | # OpenRouter Configuration
8 | OPENROUTER_API_KEY=your-openrouter-api-key
9 |
10 | # Server Configuration
11 | DATA_DIR=./data/conversations
12 | LOG_LEVEL=info # debug, info, warn, error
13 |
14 | # Conversation storage path
15 | CONVERSATIONS_PATH=d:\\Projects\\Conversations
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | lerna-debug.log*
8 | .pnpm-debug.log*
9 |
10 | # Diagnostic reports (https://nodejs.org/api/report.html)
11 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
12 |
13 | # Runtime data
14 | pids
15 | *.pid
16 | *.seed
17 | *.pid.lock
18 |
19 | # Directory for instrumented libs generated by jscoverage/JSCover
20 | lib-cov
21 |
22 | # Coverage directory used by tools like istanbul
23 | coverage
24 | *.lcov
25 |
26 | # nyc test coverage
27 | .nyc_output
28 |
29 | # Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
30 | .grunt
31 |
32 | # Bower dependency directory (https://bower.io/)
33 | bower_components
34 |
35 | # node-waf configuration
36 | .lock-wscript
37 |
38 | # Compiled binary addons (https://nodejs.org/api/addons.html)
39 | build/Release
40 |
41 | # Dependency directories
42 | node_modules/
43 | jspm_packages/
44 |
45 | # Snowpack dependency directory (https://snowpack.dev/)
46 | web_modules/
47 |
48 | # TypeScript cache
49 | *.tsbuildinfo
50 |
51 | # Optional npm cache directory
52 | .npm
53 |
54 | # Optional eslint cache
55 | .eslintcache
56 |
57 | # Optional stylelint cache
58 | .stylelintcache
59 |
60 | # Microbundle cache
61 | .rpt2_cache/
62 | .rts2_cache_cjs/
63 | .rts2_cache_es/
64 | .rts2_cache_umd/
65 |
66 | # Optional REPL history
67 | .node_repl_history
68 |
69 | # Output of 'npm pack'
70 | *.tgz
71 |
72 | # Yarn Integrity file
73 | .yarn-integrity
74 |
75 | # dotenv environment variable files
76 | .env
77 | .env.development.local
78 | .env.test.local
79 | .env.production.local
80 | .env.local
81 |
82 | # parcel-bundler cache (https://parceljs.org/)
83 | .cache
84 | .parcel-cache
85 |
86 | # Next.js build output
87 | .next
88 | out
89 |
90 | # Nuxt.js build / generate output
91 | .nuxt
92 | dist
93 |
94 | # Gatsby files
95 | .cache/
96 | # Comment in the public line in if your project uses Gatsby and not Next.js
97 | # https://nextjs.org/blog/next-9-1#public-directory-support
98 | # public
99 |
100 | # vuepress build output
101 | .vuepress/dist
102 |
103 | # vuepress v2.x temp and cache directory
104 | .temp
105 | .cache
106 |
107 | # Docusaurus cache and generated files
108 | .docusaurus
109 |
110 | # Serverless directories
111 | .serverless/
112 |
113 | # FuseBox cache
114 | .fusebox/
115 |
116 | # DynamoDB Local files
117 | .dynamodb/
118 |
119 | # TernJS port file
120 | .tern-port
121 |
122 | # Stores VSCode versions used for testing VSCode extensions
123 | .vscode-test
124 |
125 | # yarn v2
126 | .yarn/cache
127 | .yarn/unplugged
128 | .yarn/build-state.yml
129 | .yarn/install-state.gz
130 | .pnp.*
131 | node_modules
132 |
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP Conversation Server
2 |
3 | A Model Context Protocol (MCP) server implementation for managing conversations with OpenRouter's language models. This server provides a standardized interface for applications to interact with various language models through a unified conversation management system.
4 |
5 | ## Features
6 |
7 | - **MCP Protocol Support**
8 | - Full MCP protocol compliance
9 | - Resource management and discovery
10 | - Tool-based interaction model
11 | - Streaming response support
12 | - Error handling and recovery
13 |
14 | - **OpenRouter Integration**
15 | - Support for all OpenRouter models
16 | - Real-time streaming responses
17 | - Automatic token counting
18 | - Model context window management
19 | - Available models include:
20 | - Claude 3 Opus
21 | - Claude 3 Sonnet
22 | - Llama 2 70B
23 | - And many more from OpenRouter's catalog
24 |
25 | - **Conversation Management**
26 | - Create and manage multiple conversations
27 | - Support for system messages
28 | - Message history tracking
29 | - Token usage monitoring
30 | - Conversation filtering and search
31 |
32 | - **Streaming Support**
33 | - Real-time message streaming
34 | - Chunked response handling
35 | - Token counting
36 |
37 | - **File System Persistence**
38 | - Conversation state persistence
39 | - Configurable storage location
40 | - Automatic state management
41 |
42 | ## Installation
43 |
44 | ```bash
45 | npm install mcp-conversation-server
46 | ```
47 |
48 | ## Configuration
49 |
50 | ### Configuration
51 |
52 | All configuration for the MCP Conversation Server is now provided via YAML. Please update the `config/models.yaml` file with your settings. For example:
53 |
54 | ```yaml
55 | # MCP Server Configuration
56 | openRouter:
57 | apiKey: "YOUR_OPENROUTER_API_KEY" # Replace with your actual OpenRouter API key.
58 |
59 | persistence:
60 | path: "./conversations" # Directory for storing conversation data.
61 |
62 | models:
63 | # Define your models here
64 | 'provider/model-name':
65 | id: 'provider/model-name'
66 | contextWindow: 123456
67 | streaming: true
68 | temperature: 0.7
69 | description: 'Model description'
70 |
71 | # Default model to use if none specified
72 | defaultModel: 'provider/model-name'
73 | ```
74 |
75 | ### Server Configuration
76 |
77 | The MCP Conversation Server now loads all its configuration from the YAML file. In your application, you can load the configuration as follows:
78 |
79 | ```typescript
80 | const config = await loadModelsConfig(); // Loads openRouter, persistence, models, and defaultModel settings from 'config/models.yaml'
81 | ```
82 |
83 | *Note: Environment variables are no longer required as all configuration is provided via the YAML file.*
84 |
85 | ## Usage
86 |
87 | ### Basic Server Setup
88 |
89 | ```typescript
90 | import { ConversationServer } from 'mcp-conversation-server';
91 |
92 | const server = new ConversationServer(config);
93 | server.run().catch(console.error);
94 | ```
95 |
96 | ### Available Tools
97 |
98 | The server exposes several MCP tools:
99 |
100 | 1. **create-conversation**
101 |
102 | ```typescript
103 | {
104 | provider: 'openrouter', // Provider is always 'openrouter'
105 | model: string, // OpenRouter model ID (e.g., 'anthropic/claude-3-opus-20240229')
106 | title?: string; // Optional conversation title
107 | }
108 | ```
109 |
110 | 2. **send-message**
111 |
112 | ```typescript
113 | {
114 | conversationId: string; // Conversation ID
115 | content: string; // Message content
116 | stream?: boolean; // Enable streaming responses
117 | }
118 | ```
119 |
120 | 3. **list-conversations**
121 |
122 | ```typescript
123 | {
124 | filter?: {
125 | model?: string; // Filter by model
126 | startDate?: string; // Filter by start date
127 | endDate?: string; // Filter by end date
128 | }
129 | }
130 | ```
131 |
132 | ### Resources
133 |
134 | The server provides access to several resources:
135 |
136 | 1. **conversation://{id}**
137 | - Access specific conversation details
138 | - View message history
139 | - Check conversation metadata
140 |
141 | 2. **conversation://list**
142 | - List all active conversations
143 | - Filter conversations by criteria
144 | - Sort by recent activity
145 |
146 | ## Development
147 |
148 | ### Building
149 |
150 | ```bash
151 | npm run build
152 | ```
153 |
154 | ### Running Tests
155 |
156 | ```bash
157 | npm test
158 | ```
159 |
160 | ### Debugging
161 |
162 | The server provides several debugging features:
163 |
164 | 1. **Error Logging**
165 | - All errors are logged with stack traces
166 | - Token usage tracking
167 | - Rate limit monitoring
168 |
169 | 2. **MCP Inspector**
170 |
171 | ```bash
172 | npm run inspector
173 | ```
174 |
175 | Use the MCP Inspector to:
176 | - Test tool execution
177 | - View resource contents
178 | - Monitor message flow
179 | - Validate protocol compliance
180 |
181 | 3. **Provider Validation**
182 |
183 | ```typescript
184 | await server.providerManager.validateProviders();
185 | ```
186 |
187 | Validates:
188 | - API key validity
189 | - Model availability
190 | - Rate limit status
191 |
192 | ### Troubleshooting
193 |
194 | Common issues and solutions:
195 |
196 | 1. **OpenRouter Connection Issues**
197 | - Verify your API key is valid
198 | - Check rate limits on [OpenRouter's dashboard](https://openrouter.ai/dashboard)
199 | - Ensure the model ID is correct
200 | - Monitor credit usage
201 |
202 | 2. **Message Streaming Errors**
203 | - Verify model streaming support
204 | - Check connection stability
205 | - Monitor token limits
206 | - Handle timeout settings
207 |
208 | 3. **File System Errors**
209 | - Check directory permissions
210 | - Verify path configuration
211 | - Monitor disk space
212 | - Handle concurrent access
213 |
214 | ## Contributing
215 |
216 | 1. Fork the repository
217 | 2. Create a feature branch
218 | 3. Commit your changes
219 | 4. Push to the branch
220 | 5. Create a Pull Request
221 |
222 | ## License
223 |
224 | ISC License
225 |
```
--------------------------------------------------------------------------------
/src/stores/Store.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Conversation } from '../types/conversation.js';
2 |
3 | export interface Store {
4 | initialize(): Promise<void>;
5 | saveConversation(conversation: Conversation): Promise<void>;
6 | getConversation(id: string): Promise<Conversation | null>;
7 | listConversations(): Promise<Conversation[]>;
8 | }
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | "target": "ES2022",
4 | "module": "Node16",
5 | "moduleResolution": "Node16",
6 | "outDir": "./build",
7 | "rootDir": "./src",
8 | "strict": true,
9 | "esModuleInterop": true,
10 | "skipLibCheck": true,
11 | "forceConsistentCasingInFileNames": true
12 | },
13 | "include": ["src/**/*"],
14 | "exclude": ["node_modules"]
15 | }
16 |
```
--------------------------------------------------------------------------------
/src/types/conversation.ts:
--------------------------------------------------------------------------------
```typescript
1 | export interface Message {
2 | role: 'system' | 'user' | 'assistant';
3 | content: string;
4 | timestamp: string;
5 | name?: string;
6 | }
7 |
8 | export interface Conversation {
9 | id: string;
10 | model: string;
11 | title: string;
12 | messages: Message[];
13 | created: string;
14 | updated: string;
15 | }
16 |
17 | export interface ConversationFilter {
18 | model?: string;
19 | startDate?: string;
20 | endDate?: string;
21 | }
```
--------------------------------------------------------------------------------
/src/types/errors.ts:
--------------------------------------------------------------------------------
```typescript
1 | export class McpError extends Error {
2 | constructor(public code: string, message: string) {
3 | super(message);
4 | this.name = 'McpError';
5 | }
6 | }
7 |
8 | export class ValidationError extends Error {
9 | constructor(message: string) {
10 | super(message);
11 | this.name = 'ValidationError';
12 | }
13 | }
14 |
15 | export class OpenRouterError extends Error {
16 | constructor(message: string) {
17 | super(message);
18 | this.name = 'OpenRouterError';
19 | }
20 | }
21 |
22 | export class FileSystemError extends Error {
23 | constructor(message: string) {
24 | super(message);
25 | this.name = 'FileSystemError';
26 | }
27 | }
```
--------------------------------------------------------------------------------
/config/models.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # OpenRouter Models Configuration
2 | # Visit https://openrouter.ai/docs#models for the complete list of available models
3 |
4 | # MCP Server Configuration
5 | openRouter:
6 | apiKey: "<<OPEN ROUTER>>" # Replace with your actual OpenRouter API key.
7 |
8 | persistence:
9 | path: "d:/projects/conversations" # Optional: Directory for storing conversation data.
10 |
11 | models:
12 |
13 | 'google/gemini-2.0-pro-exp-02-05:free':
14 | id: 'google/gemini-2.0-pro-exp-02-05:free'
15 | contextWindow: 2000000
16 | streaming: true
17 | temperature: 0.2
18 | description: 'Google Gemini 2.0 Pro is a powerful and versatile language model that can handle a wide range of tasks.'
19 |
20 |
21 | 'google/gemini-2.0-flash-001':
22 | id: 'google/gemini-2.0-flash-001'
23 | contextWindow: 1000000
24 | streaming: true
25 | temperature: 0.2
26 | description: 'Google Gemini 2.0 Flash is a powerful and versatile language model that can handle a wide range of tasks.'
27 |
28 |
29 | # Add more models as needed following the same format
30 | # Example:
31 | # 'provider/model-name':
32 | # id: 'provider/model-name'
33 | # contextWindow: <window_size>
34 | # streaming: true/false
35 | # description: 'Model description'
36 |
37 | # Default model to use if none specified
38 | defaultModel: 'google/gemini-2.0-pro-exp-02-05:free'
```
--------------------------------------------------------------------------------
/src/types/server.ts:
--------------------------------------------------------------------------------
```typescript
1 | export interface ResourceConfig {
2 | maxSizeBytes: number;
3 | allowedTypes: string[];
4 | chunkSize: number;
5 | }
6 |
7 | export interface ServerConfig {
8 | openRouter: {
9 | apiKey: string;
10 | };
11 | models: {
12 | [key: string]: {
13 | id: string;
14 | contextWindow: number;
15 | streaming: boolean;
16 | description?: string;
17 | };
18 | };
19 | defaultModel: string;
20 | persistence: {
21 | type: 'filesystem';
22 | path: string;
23 | };
24 | resources: {
25 | maxSizeBytes: number;
26 | allowedTypes: string[];
27 | chunkSize: number;
28 | };
29 | }
30 |
31 | export interface ModelConfig {
32 | contextWindow: number;
33 | streaming: boolean;
34 | }
35 |
36 | export interface PersistenceConfig {
37 | type: 'filesystem' | 'memory';
38 | path?: string;
39 | }
40 |
41 | export interface CreateConversationParams {
42 | provider?: string;
43 | model?: string;
44 | title?: string;
45 | }
46 |
47 | export interface Conversation {
48 | id: string;
49 | provider: string;
50 | model: string;
51 | title: string;
52 | messages: Message[];
53 | createdAt: number;
54 | updatedAt: number;
55 | }
56 |
57 | export interface Message {
58 | role: 'user' | 'assistant';
59 | content: string;
60 | timestamp: number;
61 | context?: {
62 | documents?: string[];
63 | code?: string[];
64 | };
65 | }
66 |
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "mcp-conversation-server",
3 | "version": "0.1.0",
4 | "description": "A Model Context Protocol server used to execute various applicatoin types.",
5 | "private": true,
6 | "type": "module",
7 | "bin": {
8 | "mcp-conversation-server": "./build/index.js"
9 | },
10 | "files": [
11 | "build"
12 | ],
13 | "scripts": {
14 | "prebuild": "rimraf build",
15 | "build": "tsc && npm run copy-config",
16 | "copy-config": "copyfiles config/**/* build/",
17 | "start": "node build/index.js",
18 | "dev": "ts-node-esm src/index.ts",
19 | "test": "jest"
20 | },
21 | "keywords": [],
22 | "author": "",
23 | "license": "ISC",
24 | "dependencies": {
25 | "@modelcontextprotocol/sdk": "^1.0.0",
26 | "@types/dotenv": "^8.2.3",
27 | "@types/express": "^4.17.21",
28 | "@types/uuid": "^9.0.7",
29 | "dotenv": "^16.4.7",
30 | "express": "^4.18.2",
31 | "openai": "^4.83.0",
32 | "uuid": "^9.0.1",
33 | "yaml": "^2.7.0"
34 | },
35 | "devDependencies": {
36 | "@types/jest": "^29.5.14",
37 | "@types/node": "^20.11.24",
38 | "@typescript-eslint/eslint-plugin": "^7.0.0",
39 | "@typescript-eslint/parser": "^7.0.0",
40 | "copyfiles": "^2.4.1",
41 | "eslint": "^8.56.0",
42 | "jest": "^29.7.0",
43 | "prettier": "^3.4.2",
44 | "rimraf": "^5.0.10",
45 | "ts-jest": "^29.2.5",
46 | "ts-node-dev": "^2.0.0",
47 | "typescript": "^5.3.3"
48 | }
49 | }
50 |
```
--------------------------------------------------------------------------------
/config/system_instructions.yaml:
--------------------------------------------------------------------------------
```yaml
1 | # System Instructions Configuration
2 | # Define default and model-specific system instructions
3 |
4 | default: |
5 | You are a helpful AI assistant focused on providing accurate and concise responses.
6 | Please follow these guidelines:
7 | - Be direct and to the point
8 | - Show code examples when relevant
9 | - Explain complex concepts clearly
10 | - Ask for clarification when needed
11 |
12 | models:
13 | # DeepSeek Models
14 | 'deepseek/deepseek-chat': |
15 | You are DeepSeek Chat, a helpful AI assistant with strong coding and technical capabilities.
16 | Guidelines:
17 | - Focus on practical, implementable solutions
18 | - Provide code examples with explanations
19 | - Use clear technical explanations
20 | - Follow best practices in software development
21 | - Ask for clarification on ambiguous requirements
22 |
23 | 'deepseek/deepseek-r1': |
24 | You are DeepSeek Reasoner, an AI focused on step-by-step problem solving and logical reasoning.
25 | Guidelines:
26 | - Break down complex problems into steps
27 | - Show your reasoning process clearly
28 | - Validate assumptions
29 | - Consider edge cases
30 | - Provide concrete examples
31 |
32 | # Claude Models
33 | 'anthropic/claude-3-opus-20240229': |
34 | You are Claude 3 Opus, a highly capable AI assistant with strong analytical and creative abilities.
35 | Guidelines:
36 | - Provide comprehensive, well-reasoned responses
37 | - Balance depth with clarity
38 | - Use examples to illustrate complex points
39 | - Consider multiple perspectives
40 | - Maintain high standards of accuracy
41 |
42 | 'anthropic/claude-3-sonnet-20240229': |
43 | You are Claude 3 Sonnet, focused on efficient and practical problem-solving.
44 | Guidelines:
45 | - Provide concise, actionable responses
46 | - Focus on practical solutions
47 | - Use clear examples
48 | - Be direct and efficient
49 | - Ask for clarification when needed
50 |
51 | # Llama Models
52 | 'meta-llama/llama-2-70b-chat': |
53 | You are Llama 2, an open-source AI assistant focused on helpful and accurate responses.
54 | Guidelines:
55 | - Provide clear, straightforward answers
56 | - Use examples when helpful
57 | - Stay within known capabilities
58 | - Be direct about limitations
59 | - Focus on practical solutions
```
--------------------------------------------------------------------------------
/config.example.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ServerConfig } from './src/types/server.js';
2 | import * as path from 'path';
3 |
4 | /**
5 | * Example configuration for the MCP Conversation Server
6 | *
7 | * This configuration includes examples for all supported providers:
8 | * - OpenAI
9 | * - DeepSeek
10 | * - OpenRouter
11 | *
12 | * Storage paths can be configured in several ways:
13 | * 1. Use environment variable: CONVERSATIONS_PATH
14 | * 2. Set absolute path in config
15 | * 3. Set relative path (relative to project root)
16 | * 4. Let it default to OS-specific app data directory
17 | */
18 |
19 | const config: ServerConfig = {
20 | providers: {
21 | deepseek: {
22 | endpoint: 'https://api.deepseek.com/v1',
23 | apiKey: process.env.DEEPSEEK_API_KEY || '',
24 | models: {
25 | 'deepseek-chat': {
26 | id: 'deepseek-chat',
27 | contextWindow: 32768,
28 | streaming: true
29 | },
30 | 'deepseek-reasoner': {
31 | id: 'deepseek-reasoner',
32 | contextWindow: 64000,
33 | streaming: true
34 | }
35 | },
36 | timeouts: {
37 | completion: 300000, // 5 minutes for non-streaming
38 | stream: 120000 // 2 minutes per stream chunk
39 | }
40 | },
41 | openai: {
42 | endpoint: 'https://api.openai.com/v1',
43 | apiKey: process.env.OPENAI_API_KEY || '',
44 | models: {
45 | 'gpt-4': {
46 | id: 'gpt-4',
47 | contextWindow: 8192,
48 | streaming: true
49 | },
50 | 'gpt-3.5-turbo': {
51 | id: 'gpt-3.5-turbo',
52 | contextWindow: 4096,
53 | streaming: true
54 | }
55 | },
56 | timeouts: {
57 | completion: 300000, // 5 minutes for non-streaming
58 | stream: 60000 // 1 minute per stream chunk
59 | }
60 | }
61 | },
62 | defaultProvider: 'deepseek',
63 | defaultModel: 'deepseek-chat',
64 | persistence: {
65 | type: 'filesystem' as const,
66 | // Use environment variable or default to d:\Projects\Conversations
67 | path: process.env.CONVERSATIONS_PATH || path.normalize('d:\\Projects\\Conversations')
68 | },
69 | resources: {
70 | maxSizeBytes: 10 * 1024 * 1024, // 10MB
71 | allowedTypes: ['.txt', '.md', '.json', '.csv'],
72 | chunkSize: 1024 // 1KB chunks
73 | }
74 | };
75 |
76 | export default config;
77 |
78 | /**
79 | * Example usage:
80 | *
81 | * ```typescript
82 | * import { ConversationServer } from './src/index.js';
83 | * import { config } from './config.js';
84 | *
85 | * // Override storage path if needed
86 | * config.persistence.path = '/custom/path/to/conversations';
87 | *
88 | * const server = new ConversationServer(config);
89 | * server.initialize().then(() => {
90 | * console.log('Server initialized, connecting...');
91 | * server.connect().catch(err => console.error('Failed to connect:', err));
92 | * }).catch(err => console.error('Failed to initialize:', err));
93 | * ```
94 | */
```
--------------------------------------------------------------------------------
/src/stores/FileSystemStore.ts:
--------------------------------------------------------------------------------
```typescript
1 | import * as fs from 'fs/promises';
2 | import * as path from 'path';
3 | import { Conversation } from '../types/conversation.js';
4 | import { Store } from './Store.js';
5 |
6 | interface FSError extends Error {
7 | code?: string;
8 | message: string;
9 | }
10 |
11 | function isFSError(error: unknown): error is FSError {
12 | return error instanceof Error && ('code' in error || 'message' in error);
13 | }
14 |
15 | export class FileSystemStore implements Store {
16 | private dataPath: string;
17 | private initialized: boolean = false;
18 |
19 | constructor(dataPath: string) {
20 | this.dataPath = dataPath;
21 | }
22 |
23 | async initialize(): Promise<void> {
24 | if (this.initialized) {
25 | return;
26 | }
27 |
28 | try {
29 | await fs.mkdir(this.dataPath, { recursive: true });
30 | this.initialized = true;
31 | } catch (error) {
32 | throw new Error(`Failed to initialize store: ${error instanceof Error ? error.message : String(error)}`);
33 | }
34 | }
35 |
36 | private getConversationPath(id: string): string {
37 | return path.join(this.dataPath, `${id}.json`);
38 | }
39 |
40 | async saveConversation(conversation: Conversation): Promise<void> {
41 | const filePath = this.getConversationPath(conversation.id);
42 | try {
43 | await fs.writeFile(filePath, JSON.stringify(conversation, null, 2));
44 | } catch (error) {
45 | throw new Error(`Failed to save conversation: ${error instanceof Error ? error.message : String(error)}`);
46 | }
47 | }
48 |
49 | async getConversation(id: string): Promise<Conversation | null> {
50 | const filePath = this.getConversationPath(id);
51 | try {
52 | const data = await fs.readFile(filePath, 'utf-8');
53 | return JSON.parse(data) as Conversation;
54 | } catch (error) {
55 | if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
56 | return null;
57 | }
58 | throw new Error(`Failed to read conversation: ${error instanceof Error ? error.message : String(error)}`);
59 | }
60 | }
61 |
62 | async listConversations(): Promise<Conversation[]> {
63 | try {
64 | const files = await fs.readdir(this.dataPath);
65 | const conversations: Conversation[] = [];
66 |
67 | for (const file of files) {
68 | if (path.extname(file) === '.json') {
69 | try {
70 | const data = await fs.readFile(path.join(this.dataPath, file), 'utf-8');
71 | conversations.push(JSON.parse(data) as Conversation);
72 | } catch (error) {
73 | console.error(`Failed to read conversation file ${file}:`, error);
74 | }
75 | }
76 | }
77 |
78 | return conversations;
79 | } catch (error) {
80 | if ((error as NodeJS.ErrnoException).code === 'ENOENT') {
81 | return [];
82 | }
83 | throw new Error(`Failed to list conversations: ${error instanceof Error ? error.message : String(error)}`);
84 | }
85 | }
86 |
87 | async deleteConversation(id: string): Promise<void> {
88 | const filePath = this.getConversationPath(id);
89 | try {
90 | await fs.unlink(filePath);
91 | } catch (error) {
92 | if (isFSError(error)) {
93 | if (error.code !== 'ENOENT') {
94 | throw new Error(`Failed to delete conversation ${id}: ${error.message}`);
95 | }
96 | } else {
97 | throw new Error(`Failed to delete conversation ${id}: Unknown error`);
98 | }
99 | }
100 | }
101 | }
102 |
```
--------------------------------------------------------------------------------
/src/providers/openrouter.ts:
--------------------------------------------------------------------------------
```typescript
1 | import OpenAI from 'openai';
2 | import { Message } from '../types/conversation.js';
3 |
4 | interface Model {
5 | id: string;
6 | contextWindow: number;
7 | streaming: boolean;
8 | supportsFunctions?: boolean;
9 | temperature?: number;
10 | description?: string;
11 | }
12 |
13 | interface ProviderConfig {
14 | apiKey: string;
15 | models: {
16 | [key: string]: {
17 | id: string;
18 | contextWindow: number;
19 | streaming: boolean;
20 | temperature?: number;
21 | description?: string;
22 | };
23 | };
24 | defaultModel: string;
25 | timeouts?: {
26 | completion?: number;
27 | stream?: number;
28 | };
29 | }
30 |
31 | interface ProviderResponse {
32 | content: string;
33 | model: string;
34 | tokenCount?: number;
35 | metadata?: Record<string, unknown>;
36 | }
37 |
38 | interface CompletionParams {
39 | messages: Message[];
40 | model: string;
41 | stream: boolean;
42 | timeout?: number;
43 | temperature?: number;
44 | maxTokens?: number;
45 | }
46 |
47 | interface ModelInfo extends Model {
48 | isDefault: boolean;
49 | provider: string;
50 | cost?: {
51 | prompt: number;
52 | completion: number;
53 | };
54 | }
55 |
56 | export class OpenRouterProvider {
57 | private client: OpenAI;
58 | private _models: Model[];
59 | private defaultModel: string;
60 | private timeouts: Required<NonNullable<ProviderConfig['timeouts']>>;
61 | readonly name = 'openrouter';
62 |
63 | constructor(config: ProviderConfig) {
64 | if (!config.apiKey) {
65 | throw new Error('Missing openRouter.apiKey in YAML configuration');
66 | }
67 |
68 | if (!config.defaultModel) {
69 | throw new Error('Missing defaultModel in YAML configuration');
70 | }
71 |
72 | // Initialize OpenAI client with OpenRouter configuration
73 | this.client = new OpenAI({
74 | apiKey: config.apiKey,
75 | baseURL: 'https://openrouter.ai/api/v1',
76 | defaultQuery: { use_cache: 'true' },
77 | defaultHeaders: {
78 | 'HTTP-Referer': 'https://github.com/cursor-ai/mcp-conversation-server',
79 | 'X-Title': 'MCP Conversation Server',
80 | 'Content-Type': 'application/json',
81 | 'OR-SITE-LOCATION': 'https://github.com/cursor-ai/mcp-conversation-server',
82 | 'OR-ALLOW-FINE-TUNING': 'false'
83 | }
84 | });
85 |
86 | this.timeouts = {
87 | completion: config.timeouts?.completion ?? 30000,
88 | stream: config.timeouts?.stream ?? 60000
89 | };
90 |
91 | this.defaultModel = config.defaultModel;
92 |
93 | // Convert configured models to internal format
94 | this._models = Object.entries(config.models).map(([id, modelConfig]) => ({
95 | id,
96 | contextWindow: modelConfig.contextWindow,
97 | streaming: modelConfig.streaming,
98 | temperature: modelConfig.temperature,
99 | description: modelConfig.description,
100 | supportsFunctions: false
101 | }));
102 | }
103 |
104 | private getModelConfig(modelId: string): Model {
105 | const model = this._models.find(m => m.id === modelId);
106 | if (!model) {
107 | console.warn(`Model ${modelId} not found in configuration, falling back to default model ${this.defaultModel}`);
108 | const defaultModel = this._models.find(m => m.id === this.defaultModel);
109 | if (!defaultModel) {
110 | throw new Error('Default model not found in configuration');
111 | }
112 | return defaultModel;
113 | }
114 | return model;
115 | }
116 |
117 | get models(): Model[] {
118 | return this._models;
119 | }
120 |
121 | async validateConfig(): Promise<void> {
122 | if (this._models.length === 0) {
123 | throw new Error('No models configured for OpenRouter provider');
124 | }
125 |
126 | try {
127 | // Simple validation - just verify API connection works
128 | await this.client.chat.completions.create({
129 | model: this._models[0].id,
130 | messages: [{ role: 'user', content: 'test' }],
131 | max_tokens: 1 // Minimum response size for validation
132 | });
133 | } catch (error: unknown) {
134 | const message = error instanceof Error ? error.message : 'Unknown error';
135 | throw new Error(`Failed to validate OpenRouter configuration: ${message}`);
136 | }
137 | }
138 |
139 | async createCompletion(params: CompletionParams): Promise<ProviderResponse> {
140 | try {
141 | // Get model configuration or fall back to default
142 | const modelConfig = this.getModelConfig(params.model);
143 |
144 | const response = await this.client.chat.completions.create({
145 | model: modelConfig.id,
146 | messages: params.messages.map((msg: Message) => ({
147 | role: msg.role,
148 | content: msg.content,
149 | name: msg.name
150 | })),
151 | temperature: params.temperature ?? modelConfig.temperature ?? 0.7,
152 | max_tokens: params.maxTokens,
153 | stream: false
154 | });
155 |
156 | // Validate response structure
157 | if (!response || !response.choices || !Array.isArray(response.choices) || response.choices.length === 0) {
158 | throw new Error('Invalid or empty response from OpenRouter');
159 | }
160 |
161 | const choice = response.choices[0];
162 | if (!choice || !choice.message || typeof choice.message.content !== 'string') {
163 | throw new Error('Invalid message structure in OpenRouter response');
164 | }
165 |
166 | return {
167 | content: choice.message.content,
168 | model: modelConfig.id,
169 | tokenCount: response.usage?.total_tokens,
170 | metadata: {
171 | provider: 'openrouter',
172 | modelName: modelConfig.id,
173 | ...response.usage && { usage: response.usage }
174 | }
175 | };
176 | } catch (error: unknown) {
177 | if (error instanceof Error) {
178 | if (error.message.includes('timeout')) {
179 | throw new Error('OpenRouter request timed out. Please try again.');
180 | }
181 | if (error.message.includes('rate_limit')) {
182 | throw new Error('OpenRouter rate limit exceeded. Please try again later.');
183 | }
184 | if (error.message.includes('insufficient_quota')) {
185 | throw new Error('OpenRouter quota exceeded. Please check your credits.');
186 | }
187 | throw new Error(`OpenRouter completion failed: ${error.message}`);
188 | }
189 | throw new Error('Unknown error occurred during OpenRouter completion');
190 | }
191 | }
192 |
193 | async *streamCompletion(params: CompletionParams): AsyncIterableIterator<ProviderResponse> {
194 | try {
195 | // Get model configuration or fall back to default
196 | const modelConfig = this.getModelConfig(params.model);
197 |
198 | const stream = await this.client.chat.completions.create({
199 | model: modelConfig.id,
200 | messages: params.messages.map((msg: Message) => ({
201 | role: msg.role,
202 | content: msg.content,
203 | name: msg.name
204 | })),
205 | temperature: params.temperature ?? modelConfig.temperature ?? 0.7,
206 | max_tokens: params.maxTokens,
207 | stream: true
208 | });
209 |
210 | for await (const chunk of stream) {
211 | // Validate chunk structure
212 | if (!chunk || !chunk.choices || !Array.isArray(chunk.choices) || chunk.choices.length === 0) {
213 | continue;
214 | }
215 |
216 | const delta = chunk.choices[0]?.delta;
217 | if (!delta || typeof delta.content !== 'string') {
218 | continue;
219 | }
220 |
221 | yield {
222 | content: delta.content,
223 | model: modelConfig.id,
224 | metadata: {
225 | provider: 'openrouter',
226 | modelName: modelConfig.id,
227 | isPartial: true
228 | }
229 | };
230 | }
231 | } catch (error: unknown) {
232 | if (error instanceof Error) {
233 | if (error.message.includes('timeout')) {
234 | throw new Error('OpenRouter streaming request timed out. Please try again.');
235 | }
236 | if (error.message.includes('rate_limit')) {
237 | throw new Error('OpenRouter rate limit exceeded. Please try again later.');
238 | }
239 | if (error.message.includes('insufficient_quota')) {
240 | throw new Error('OpenRouter quota exceeded. Please check your credits.');
241 | }
242 | throw new Error(`OpenRouter streaming completion failed: ${error.message}`);
243 | }
244 | throw new Error('Unknown error occurred during OpenRouter streaming');
245 | }
246 | }
247 |
248 | /**
249 | * Get detailed information about all available models
250 | * @returns Array of model information including default status and pricing
251 | */
252 | async listAvailableModels(): Promise<ModelInfo[]> {
253 | try {
254 | return this._models.map(model => {
255 | const [provider, modelName] = model.id.split('/');
256 | return {
257 | ...model,
258 | provider: provider || 'unknown',
259 | isDefault: model.id === this.defaultModel,
260 | cost: undefined // Could be fetched from OpenRouter API if needed
261 | };
262 | }).sort((a, b) => {
263 | // Sort with default model first, then by provider/name
264 | if (a.isDefault) return -1;
265 | if (b.isDefault) return 1;
266 | return a.id.localeCompare(b.id);
267 | });
268 | } catch (error) {
269 | const message = error instanceof Error ? error.message : 'Unknown error';
270 | throw new Error(`Failed to list available models: ${message}`);
271 | }
272 | }
273 |
274 | /**
275 | * Get the current default model configuration
276 | * @returns The default model configuration
277 | */
278 | getDefaultModel(): ModelInfo {
279 | const defaultModel = this._models.find(m => m.id === this.defaultModel);
280 | if (!defaultModel) {
281 | throw new Error('Default model not found in configuration');
282 | }
283 | const [provider] = defaultModel.id.split('/');
284 | return {
285 | ...defaultModel,
286 | isDefault: true,
287 | provider: provider || 'unknown',
288 | cost: undefined
289 | };
290 | }
291 | }
```
--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
2 | import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
3 | import { z } from 'zod';
4 | import * as dotenv from 'dotenv';
5 | import * as path from 'path';
6 | import * as os from 'os';
7 | import * as fs from 'fs/promises';
8 | import { parse } from 'yaml';
9 | import OpenAI from 'openai';
10 | import { Message, Conversation, ConversationFilter } from './types/conversation.js';
11 | import { ServerConfig } from './types/server.js';
12 | import { OpenRouterError, FileSystemError } from './types/errors.js';
13 | import { OpenRouterProvider } from './providers/openrouter.js';
14 |
15 | // Load environment variables from .env file
16 | dotenv.config();
17 |
18 | // Determine the appropriate app data directory based on OS
19 | function getAppDataPath(): string {
20 | switch (process.platform) {
21 | case 'win32':
22 | return process.env.APPDATA || path.join(os.homedir(), 'AppData', 'Roaming');
23 | case 'darwin':
24 | return path.join(os.homedir(), 'Library', 'Application Support');
25 | default:
26 | return process.env.XDG_DATA_HOME || path.join(os.homedir(), '.local', 'share');
27 | }
28 | }
29 |
30 | // Create the app-specific data directory path
31 | const APP_NAME = 'mcp-conversation-server';
32 | const defaultDataPath = path.join(getAppDataPath(), APP_NAME, 'conversations');
33 |
34 | /**
35 | * MCP Conversation Server
36 | *
37 | * Workflow:
38 | * 1. Create a conversation:
39 | * - Use create-conversation tool
40 | * - Specify provider (e.g., 'deepseek') and model (e.g., 'deepseek-chat')
41 | * - Optionally provide a title
42 | *
43 | * 2. Send messages:
44 | * - Use send-message tool
45 | * - Provide conversationId from step 1
46 | * - Set stream: true for real-time responses
47 | * - Messages maintain chat context automatically
48 | *
49 | * 3. Access conversation history:
50 | * - Use resources/read with conversation://{id}/history
51 | * - Full chat history with context is preserved
52 | *
53 | * Error Handling:
54 | * - All errors include detailed messages and proper error codes
55 | * - Automatic retries for transient failures
56 | * - Timeouts are configurable per operation
57 | */
58 |
59 | // Schema definitions
60 | const ListResourcesSchema = z.object({
61 | method: z.literal('resources/list')
62 | });
63 |
64 | const ReadResourceSchema = z.object({
65 | method: z.literal('resources/read'),
66 | params: z.object({
67 | uri: z.string()
68 | })
69 | });
70 |
71 | const ListToolsSchema = z.object({
72 | method: z.literal('tools/list')
73 | });
74 |
75 | const CallToolSchema = z.object({
76 | method: z.literal('tools/call'),
77 | params: z.object({
78 | name: z.string(),
79 | arguments: z.record(z.unknown())
80 | })
81 | });
82 |
83 | const ListPromptsSchema = z.object({
84 | method: z.literal('prompts/list')
85 | });
86 |
87 | const GetPromptSchema = z.object({
88 | method: z.literal('prompts/get'),
89 | params: z.object({
90 | name: z.string(),
91 | arguments: z.record(z.unknown()).optional()
92 | })
93 | });
94 |
95 | // Modify logging to use stderr for ALL non-JSON-RPC messages
96 | function logDebug(...args: any[]): void {
97 | console.error('[DEBUG]', ...args);
98 | }
99 |
100 | function logError(...args: any[]): void {
101 | console.error('[ERROR]', ...args);
102 | }
103 |
104 | // Create the MCP server instance
105 | const server = new McpServer({
106 | name: 'conversation-server',
107 | version: '1.0.0'
108 | });
109 |
110 | // Initialize server configuration
111 | const config: ServerConfig = {
112 | openRouter: {
113 | apiKey: process.env.OPENROUTER_API_KEY || ''
114 | },
115 | models: {}, // Will be populated from YAML config
116 | defaultModel: '', // Will be populated from YAML config
117 | persistence: {
118 | type: 'filesystem',
119 | path: process.env.CONVERSATIONS_PATH || defaultDataPath
120 | },
121 | resources: {
122 | maxSizeBytes: 10 * 1024 * 1024, // 10MB
123 | allowedTypes: ['.txt', '.md', '.json', '.csv', '.cs', '.ts', '.js', '.jsx', '.tsx', '.pdf'],
124 | chunkSize: 1024 // 1KB chunks
125 | }
126 | };
127 |
128 | let openRouterProvider: OpenRouterProvider;
129 |
130 | // Load models configuration
131 | async function loadModelsConfig(): Promise<ServerConfig> {
132 | try {
133 | // Try to load from build directory first (for production)
134 | const buildConfigPath = path.join(path.dirname(process.argv[1]), 'config', 'models.yaml');
135 | let fileContents: string;
136 |
137 | try {
138 | fileContents = await fs.readFile(buildConfigPath, 'utf8');
139 | } catch (error) {
140 | // If not found in build directory, try source directory (for development)
141 | const sourceConfigPath = path.join(process.cwd(), 'config', 'models.yaml');
142 | fileContents = await fs.readFile(sourceConfigPath, 'utf8');
143 | }
144 |
145 | const config = parse(fileContents);
146 |
147 | // Validate required configuration
148 | if (!config.openRouter?.apiKey) {
149 | throw new Error('Missing openRouter.apiKey in models.yaml configuration');
150 | }
151 |
152 | if (!config.models || Object.keys(config.models).length === 0) {
153 | throw new Error('No models configured in models.yaml configuration');
154 | }
155 |
156 | if (!config.defaultModel) {
157 | throw new Error('Missing defaultModel in models.yaml configuration');
158 | }
159 |
160 | // Set default persistence path if not specified
161 | if (!config.persistence?.path) {
162 | config.persistence = {
163 | path: defaultDataPath
164 | };
165 | }
166 |
167 | return {
168 | openRouter: {
169 | apiKey: config.openRouter.apiKey
170 | },
171 | models: config.models,
172 | defaultModel: config.defaultModel,
173 | persistence: {
174 | type: 'filesystem',
175 | path: config.persistence.path
176 | },
177 | resources: {
178 | maxSizeBytes: 10 * 1024 * 1024, // 10MB
179 | allowedTypes: ['.txt', '.md', '.json', '.csv', '.cs', '.ts', '.js', '.jsx', '.tsx', '.pdf'],
180 | chunkSize: 1024 // 1KB chunks
181 | }
182 | };
183 | } catch (error) {
184 | if (error instanceof Error) {
185 | throw new Error(`Failed to load models configuration: ${error.message}`);
186 | }
187 | throw new Error('Failed to load models configuration. Make sure models.yaml exists in the config directory.');
188 | }
189 | }
190 |
191 | // Initialize and start the server
192 | async function startServer() {
193 | try {
194 | console.error('Starting MCP Conversation Server...');
195 |
196 | // Load and validate the complete configuration from YAML
197 | const config = await loadModelsConfig();
198 |
199 | console.error('Using data directory:', config.persistence.path);
200 |
201 | // Initialize OpenRouter provider with loaded config
202 | openRouterProvider = new OpenRouterProvider({
203 | apiKey: config.openRouter.apiKey,
204 | models: config.models,
205 | defaultModel: config.defaultModel,
206 | timeouts: {
207 | completion: 30000,
208 | stream: 60000
209 | }
210 | });
211 |
212 | // Create data directory if it doesn't exist
213 | await fs.mkdir(config.persistence.path, { recursive: true });
214 |
215 | // Validate OpenRouter connection using the provider
216 | await openRouterProvider.validateConfig();
217 |
218 | // Set up tools after provider is initialized
219 | setupTools();
220 |
221 | console.error('Successfully connected to OpenRouter');
222 | console.error('Available models:', Object.keys(config.models).join(', '));
223 | console.error('Default model:', config.defaultModel);
224 |
225 | // Set up server transport
226 | const transport = new StdioServerTransport();
227 | await server.connect(transport);
228 |
229 | console.error('Server connected and ready');
230 | } catch (error) {
231 | console.error('Failed to start server:', error);
232 | process.exit(1);
233 | }
234 | }
235 |
236 | // Setup server tools
237 | function setupTools() {
238 | // Add create-conversation tool
239 | server.tool(
240 | 'create-conversation',
241 | `Creates a new conversation with a specified model.`,
242 | {
243 | model: z.string().describe('The model ID to use for the conversation'),
244 | title: z.string().optional().describe('Optional title for the conversation')
245 | },
246 | async (args: { model: string; title?: string }, _extra: any) => {
247 | const { model, title } = args;
248 | const now = new Date().toISOString();
249 | const conversation: Conversation = {
250 | id: crypto.randomUUID(),
251 | model,
252 | title: title || `Conversation ${now}`,
253 | messages: [],
254 | created: now,
255 | updated: now
256 | };
257 |
258 | try {
259 | const conversationPath = path.join(config.persistence.path, `${conversation.id}.json`);
260 | await fs.writeFile(conversationPath, JSON.stringify(conversation, null, 2));
261 | return {
262 | content: [{
263 | type: 'text',
264 | text: JSON.stringify(conversation, null, 2)
265 | }]
266 | };
267 | } catch (error) {
268 | const message = error instanceof Error ? error.message : 'Unknown error';
269 | throw new FileSystemError(`Failed to save conversation: ${message}`);
270 | }
271 | }
272 | );
273 |
274 | // Add send-message tool
275 | server.tool(
276 | 'send-message',
277 | `Sends a message to an existing conversation and receives a response.`,
278 | {
279 | conversationId: z.string(),
280 | content: z.string(),
281 | stream: z.boolean().optional()
282 | },
283 | async (args: { conversationId: string; content: string; stream?: boolean }, _extra: any) => {
284 | const { conversationId, content, stream = false } = args;
285 |
286 | try {
287 | const conversationPath = path.join(config.persistence.path, `${conversationId}.json`);
288 | const conversation: Conversation = JSON.parse(await fs.readFile(conversationPath, 'utf8'));
289 |
290 | const userMessage: Message = {
291 | role: 'user',
292 | content,
293 | timestamp: new Date().toISOString()
294 | };
295 | conversation.messages.push(userMessage);
296 | conversation.updated = new Date().toISOString();
297 |
298 | try {
299 | if (stream) {
300 | const streamResponse = await openRouterProvider.streamCompletion({
301 | model: conversation.model,
302 | messages: conversation.messages,
303 | stream: true
304 | });
305 |
306 | await fs.writeFile(conversationPath, JSON.stringify(conversation, null, 2));
307 |
308 | return {
309 | content: [{
310 | type: 'resource',
311 | resource: {
312 | uri: `stream://${conversationId}`,
313 | text: 'Message stream started',
314 | mimeType: 'text/plain'
315 | }
316 | }]
317 | };
318 | } else {
319 | const response = await openRouterProvider.createCompletion({
320 | model: conversation.model,
321 | messages: conversation.messages,
322 | stream: false
323 | });
324 |
325 | const assistantMessage: Message = {
326 | role: 'assistant',
327 | content: response.content,
328 | timestamp: new Date().toISOString()
329 | };
330 | conversation.messages.push(assistantMessage);
331 | conversation.updated = new Date().toISOString();
332 |
333 | await fs.writeFile(conversationPath, JSON.stringify(conversation, null, 2));
334 |
335 | return {
336 | content: [{
337 | type: 'text',
338 | text: JSON.stringify(assistantMessage, null, 2)
339 | }]
340 | };
341 | }
342 | } catch (error) {
343 | const message = error instanceof Error ? error.message : 'Unknown error';
344 | throw new OpenRouterError(`OpenRouter request failed: ${message}`);
345 | }
346 | } catch (error) {
347 | if (error instanceof OpenRouterError) throw error;
348 | const message = error instanceof Error ? error.message : 'Unknown error';
349 | throw new FileSystemError(`Failed to handle message: ${message}`);
350 | }
351 | }
352 | );
353 |
354 | // Add list-models tool
355 | server.tool(
356 | 'list-models',
357 | `Lists all available models with their configurations and capabilities.`,
358 | {},
359 | async (_args: {}, _extra: any) => {
360 | try {
361 | const models = await openRouterProvider.listAvailableModels();
362 | return {
363 | content: [{
364 | type: 'text',
365 | text: JSON.stringify({
366 | models,
367 | defaultModel: openRouterProvider.getDefaultModel(),
368 | totalModels: models.length
369 | }, null, 2)
370 | }]
371 | };
372 | } catch (error) {
373 | const message = error instanceof Error ? error.message : 'Unknown error';
374 | throw new Error(`Failed to list models: ${message}`);
375 | }
376 | }
377 | );
378 | }
379 |
380 | // Start the server
381 | startServer();
```