#
tokens: 9413/50000 11/11 files
lines: on (toggle) GitHub
raw markdown copy reset
# Directory Structure

```
├── .cursorrules
├── .gitignore
├── Dockerfile
├── LICENSE.txt
├── package-lock.json
├── package.json
├── README.md
├── smithery.yaml
├── src
│   ├── gemini_mcp_agent.ts
│   ├── gemini_mcp_server.ts
│   └── mcp_client.ts
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
 1 | # Dependencies
 2 | node_modules/
 3 | .pnp
 4 | .pnp.js
 5 | 
 6 | # Build output
 7 | dist/
 8 | build/
 9 | 
10 | # Environment variables
11 | .env
12 | .env.local
13 | .env.development.local
14 | .env.test.local
15 | .env.production.local
16 | 
17 | # IDE and editor files
18 | .idea/
19 | .vscode/
20 | *.swp
21 | *.swo
22 | .DS_Store
23 | 
24 | # Logs
25 | npm-debug.log*
26 | yarn-debug.log*
27 | yarn-error.log*
28 | logs/
29 | *.log
30 | 
31 | # Testing
32 | coverage/
33 | 
34 | # Misc
35 | .DS_Store
36 | .env.local
37 | .env.development.local
38 | .env.test.local
39 | .env.production.local
40 | 
41 | # Claude Desktop config (contains API keys)
42 | **/claude_desktop_config.json 
```

--------------------------------------------------------------------------------
/.cursorrules:
--------------------------------------------------------------------------------

```
 1 | 
 2 | <cursor-tools Integration>
 3 | Use the following commands to get AI assistance:
 4 | 
 5 | cursor-tools web "your question"  - Get answers from the web using Perplexity AI
 6 | cursor-tools repo "your question" - Get context-aware answers about this repository using Google Gemini
 7 | 
 8 | cursor-tools web is good for getting up-to-date information from the web that are not repository specific. For example, you can ask it to get the names and details of the latest OpenAI models or details about an external API.
 9 | cursor-tools repo has the entire repository context available to it so it is good for repository search and tasks that require holistic understanding such as planning, debugging and answering questions about the architecture.
10 | 
11 | if cursor-tools is not found in your PATH, you can run it with `npm exec cursor-tools "your question"` or `yarn cursor-tools "your question"` or `pnpm cursor-tools "your question"` depending on your package manager if cursor-tools is installed as a dependency. If cursor-tools is not installed as a dependency you should fall back to using `npx cursor-tools "your question"`.
12 | 
13 | Note: configuration is in cursor-tools.config.json (falling back to ~/.cursor-tools/config.json)
14 | Note: api keys are loaded from .cursor-tools.env (falling back to ~/.cursor-tools/.env)
15 | </cursor-tools Integration>
16 | 
```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
 1 | # gemini-mcp-server
 2 | 
 3 | [![smithery badge](https://smithery.ai/badge/@georgejeffers/gemini-mcp-server)](https://smithery.ai/server/@georgejeffers/gemini-mcp-server)
 4 | 
 5 | A TypeScript implementation of a Model Context Protocol (MCP) server that integrates with Google's Gemini Pro model.
 6 | 
 7 | <a href="https://glama.ai/mcp/servers/ejwvacw7s0">
 8 |   <img width="380" height="200" src="https://glama.ai/mcp/servers/ejwvacw7s0/badge" alt="Gemini Server MCP server" />
 9 | </a>
10 | 
11 | ## MCP Tools
12 | 
13 | ### generate_text
14 | *From server: gemini*
15 | 
16 | ## Prerequisites
17 | 
18 | - Node.js 18 or higher
19 | - Google Gemini API key
20 | - TypeScript
21 | - Claude Desktop app
22 | 
23 | ## Installation
24 | 
25 | ### Installing via Smithery
26 | 
27 | To install Gemini MCP Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@georgejeffers/gemini-mcp-server):
28 | 
29 | ```bash
30 | npx -y @smithery/cli install @georgejeffers/gemini-mcp-server --client claude
31 | ```
32 | 
33 | ### Manual Installation
34 | 1. Clone the repository:
35 | ```bash
36 | git clone https://github.com/GeorgeJeffers/gemini-mcp-server.git
37 | cd gemini-mcp-server
38 | ```
39 | 
40 | 2. Install dependencies:
41 | ```bash
42 | npm install
43 | ```
44 | 
45 | 4. Build:
46 | ```bash
47 | npm run build
48 | ```
49 | 
50 | ## Claude Desktop Integration
51 | 
52 | To use this server with Claude Desktop:
53 | 
54 | 1. Open Claude Desktop
55 | 2. Go to Settings > Developer
56 | 3. Click "Edit Config"
57 | 4. Add the following configuration:
58 | 
59 | ```json
60 | {
61 |   "name": "gemini",
62 |   "command": "node",
63 |   "args": ["dist/gemini_mcp_server.js"],
64 |   "env": {
65 |     "GEMINI_API_KEY": "your_api_key_here"
66 |   },
67 |   "cwd": "/path/to/mcp-gemini-server"
68 | }
69 | ```
70 | 
71 | Replace:
72 | - `/path/to/mcp-gemini-server` with the absolute path to where you cloned this repository
73 | - `your_api_key_here` with your actual Google Gemini API key
74 | 
75 | The server will now be available in Claude Desktop's MCP server list.
76 | 
77 | ## License
78 | 
79 | MIT
80 | 
81 | ## Author
82 | 
83 | GeorgeJeffers
84 | 
```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
 1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
 2 | FROM node:lts-alpine
 3 | 
 4 | WORKDIR /app
 5 | 
 6 | # Copy package files
 7 | COPY package*.json ./
 8 | 
 9 | # Install dependencies without running lifecycle scripts
10 | RUN npm install --ignore-scripts
11 | 
12 | # Install missing ws module and its type declarations
13 | RUN npm install ws @types/ws
14 | 
15 | # Copy source code to work directory
16 | COPY . .
17 | 
18 | # Build the TypeScript code
19 | RUN npm run build
20 | 
21 | # Start the server
22 | CMD ["npm", "start"]
23 | 
```

--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "compilerOptions": {
 3 |     "target": "ES2020",
 4 |     "module": "CommonJS",
 5 |     "moduleResolution": "node",
 6 |     "outDir": "./dist",
 7 |     "rootDir": "./src",
 8 |     "strict": true,
 9 |     "esModuleInterop": true,
10 |     "skipLibCheck": true,
11 |     "forceConsistentCasingInFileNames": true,
12 |     "resolveJsonModule": true,
13 |     "allowJs": true,
14 |     "sourceMap": true
15 |   },
16 |   "ts-node": {
17 |     "transpileOnly": true,
18 |     "files": true
19 |   },
20 |   "include": ["src/**/*"],
21 |   "exclude": ["node_modules", "dist"]
22 | } 
```

--------------------------------------------------------------------------------
/smithery.yaml:
--------------------------------------------------------------------------------

```yaml
 1 | # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml
 2 | 
 3 | startCommand:
 4 |   type: stdio
 5 |   configSchema:
 6 |     # JSON Schema defining the configuration options for the MCP.
 7 |     type: object
 8 |     required:
 9 |       - geminiApiKey
10 |     properties:
11 |       geminiApiKey:
12 |         type: string
13 |         description: Your Google Gemini API Key
14 |   commandFunction:
15 |     # A JS function that produces the CLI command based on the given config to start the MCP on stdio.
16 |     |-
17 |     (config) => ({
18 |       command: 'node',
19 |       args: ['dist/gemini_mcp_server.js'],
20 |       env: {
21 |         GEMINI_API_KEY: config.geminiApiKey
22 |       }
23 |     })
24 |   exampleConfig:
25 |     geminiApiKey: your_api_key_here
26 | 
```

--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------

```json
 1 | {
 2 |   "name": "gemini-mcp-server",
 3 |   "version": "1.0.0",
 4 |   "description": "A TypeScript implementation of a Model Context Protocol (MCP) server that integrates with Google's Gemini Pro model.",
 5 |   "main": "dist/gemini_mcp_server.js",
 6 |   "scripts": {
 7 |     "build": "tsc",
 8 |     "start": "node dist/gemini_mcp_server.js",
 9 |     "dev": "tsc -w",
10 |     "test": "echo \"Error: no test specified\" && exit 1"
11 |   },
12 |   "keywords": ["mcp", "gemini", "ai", "typescript"],
13 |   "author": "GeorgeJeffers",
14 |   "license": "MIT",
15 |   "dependencies": {
16 |     "@google/generative-ai": "^0.2.0",
17 |     "@modelcontextprotocol/sdk": "^1.4.1",
18 |     "dotenv": "^16.4.5",
19 |     "zod": "^3.24.1"
20 |   },
21 |   "devDependencies": {
22 |     "@types/node": "^20.11.24",
23 |     "typescript": "^5.3.3"
24 |   }
25 | }
26 | 
```

--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------

```
 1 | MIT License
 2 | 
 3 | Copyright (c) 2024 
 4 | 
 5 | Permission is hereby granted, free of charge, to any person obtaining a copy
 6 | of this software and associated documentation files (the "Software"), to deal
 7 | in the Software without restriction, including without limitation the rights
 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 | 
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 | 
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE. 
```

--------------------------------------------------------------------------------
/src/gemini_mcp_server.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
  2 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
  3 | import { GoogleGenerativeAI, GenerativeModel } from '@google/generative-ai';
  4 | import { config } from 'dotenv';
  5 | import { z } from "zod";
  6 | 
  7 | // Immediately send the startup message before anything else can write to stdout
  8 | process.stdout.write(JSON.stringify({
  9 |   jsonrpc: "2.0",
 10 |   method: "startup",
 11 |   params: {
 12 |     transport: "stdio"
 13 |   }
 14 | }) + '\n');
 15 | 
 16 | // Redirect stdout to stderr for everything else
 17 | const originalStdoutWrite = process.stdout.write.bind(process.stdout);
 18 | process.stdout.write = (chunk: any, ...args: any[]) => {
 19 |   return process.stderr.write(chunk, ...args);
 20 | };
 21 | 
 22 | // Redirect console methods to stderr
 23 | const consoleMethods = ['log', 'info', 'warn', 'error', 'debug'] as const;
 24 | consoleMethods.forEach(method => {
 25 |   (console as any)[method] = (...args: any[]) => process.stderr.write(`[${method}] ` + args.join(' ') + '\n');
 26 | });
 27 | 
 28 | // Suppress npm and Node.js startup messages
 29 | process.env.NODE_ENV = 'production';
 30 | process.env.NO_UPDATE_NOTIFIER = '1';
 31 | process.env.SUPPRESS_NO_CONFIG_WARNING = 'true';
 32 | process.env.npm_config_loglevel = 'silent';
 33 | 
 34 | // Load environment variables
 35 | config();
 36 | 
 37 | const GEMINI_API_KEY = process.env.GEMINI_API_KEY ?? '';
 38 | if (!GEMINI_API_KEY) {
 39 |   console.error('GEMINI_API_KEY environment variable is required');
 40 |   process.exit(1);
 41 | }
 42 | 
 43 | // Define tool schemas
 44 | const generateTextSchema = z.object({
 45 |   prompt: z.string().min(1),
 46 |   temperature: z.number().min(0).max(1).optional(),
 47 |   maxOutputTokens: z.number().min(1).max(8192).optional(),
 48 |   topK: z.number().min(1).max(40).optional(),
 49 |   topP: z.number().min(0).max(1).optional(),
 50 |   stream: z.boolean().optional(),
 51 | });
 52 | 
 53 | type GenerateTextParams = z.infer<typeof generateTextSchema>;
 54 | 
 55 | class GeminiMCPServer {
 56 |   private model: GenerativeModel;
 57 |   private server: McpServer;
 58 |   private transport: StdioServerTransport;
 59 |   private chat: any; // Store chat session
 60 | 
 61 |   constructor() {
 62 |     const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
 63 |     this.model = genAI.getGenerativeModel({ model: 'gemini-1.5-pro' });
 64 |     this.chat = this.model.startChat();
 65 |     
 66 |     this.server = new McpServer({
 67 |       name: "gemini",
 68 |       version: "1.0.0",
 69 |       capabilities: {
 70 |         tools: {
 71 |           generate_text: {
 72 |             description: "Generate text using Gemini Pro model",
 73 |             streaming: true
 74 |           }
 75 |         }
 76 |       }
 77 |     });
 78 | 
 79 |     this.transport = new StdioServerTransport();
 80 |   }
 81 | 
 82 |   private async generateText(params: GenerateTextParams) {
 83 |     try {
 84 |       const { prompt, temperature = 0.7, maxOutputTokens = 8192, topK, topP, stream = false } = params;
 85 |       const generationConfig = {
 86 |         temperature,
 87 |         maxOutputTokens,
 88 |         topK,
 89 |         topP,
 90 |       };
 91 | 
 92 |       console.log('Sending message to Gemini:', prompt);
 93 | 
 94 |       if (stream) {
 95 |         const result = await this.chat.sendMessageStream(prompt);
 96 |         let fullText = '';
 97 |         
 98 |         for await (const chunk of result.stream) {
 99 |           const chunkText = chunk.text();
100 |           fullText += chunkText;
101 |           
102 |           // Send intermediate chunk as progress event
103 |           // Note: Progress events are not yet supported in MCP SDK
104 |           // this.server.emit("generate_text/progress", {
105 |           //   content: [{
106 |           //     type: "text",
107 |           //     text: chunkText
108 |           //   }]
109 |           // });
110 |         }
111 | 
112 |         console.log('Received streamed response from Gemini:', fullText);
113 | 
114 |         return {
115 |           content: [{
116 |             type: "text" as const,
117 |             text: fullText
118 |           }]
119 |         };
120 |       } else {
121 |         const result = await this.chat.sendMessage(prompt);
122 |         const response = result.response.text();
123 |         
124 |         console.log('Received response from Gemini:', response);
125 | 
126 |         return {
127 |           content: [{
128 |             type: "text" as const,
129 |             text: response
130 |           }]
131 |         };
132 |       }
133 |     } catch (err) {
134 |       console.error('Error generating content:', err);
135 |       return {
136 |         content: [{
137 |           type: "text" as const,
138 |           text: err instanceof Error ? err.message : 'Internal error'
139 |         }],
140 |         isError: true
141 |       };
142 |     }
143 |   }
144 | 
145 |   async start() {
146 |     try {
147 |       console.info('Initializing Gemini MCP server...');
148 | 
149 |       // Register generate_text tool
150 |       this.server.tool(
151 |         "generate_text",
152 |         generateTextSchema.shape,
153 |         async (args: GenerateTextParams) => this.generateText(args)
154 |       );
155 | 
156 |       // Restore stdout for MCP communication
157 |       process.stdout.write = originalStdoutWrite;
158 |       
159 |       // Connect using stdio transport
160 |       await this.server.connect(this.transport);
161 |       console.info('Server started successfully and waiting for messages...');
162 |     } catch (error) {
163 |       console.error('Failed to start server:', error);
164 |       process.exit(1);
165 |     }
166 |   }
167 | 
168 |   async stop() {
169 |     try {
170 |       // Note: Disconnect is not yet supported in MCP SDK
171 |       // await this.server.disconnect();
172 |       console.info('Server stopped successfully');
173 |     } catch (error) {
174 |       console.error('Error stopping server:', error);
175 |       process.exit(1);
176 |     }
177 |   }
178 | }
179 | 
180 | // Handle shutdown gracefully
181 | process.on('SIGINT', () => {
182 |   console.info('Server shutting down');
183 |   process.exit(0);
184 | });
185 | 
186 | // Handle uncaught errors
187 | process.on('uncaughtException', (error) => {
188 |   console.error('Uncaught exception:', error);
189 |   process.exit(1);
190 | });
191 | 
192 | process.on('unhandledRejection', (reason) => {
193 |   console.error('Unhandled rejection:', reason);
194 |   process.exit(1);
195 | });
196 | 
197 | // Start the server
198 | const server = new GeminiMCPServer();
199 | server.start().catch(error => {
200 |   console.error('Unhandled error:', error);
201 |   process.exit(1);
202 | }); 
```

--------------------------------------------------------------------------------
/src/mcp_client.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { spawn, ChildProcess } from 'child_process';
  2 | import WebSocket from 'ws';
  3 | 
  4 | export interface MCPServerParameters {
  5 |   command: string;
  6 |   args: string[];
  7 |   env?: NodeJS.ProcessEnv | null;
  8 | }
  9 | 
 10 | export interface MCPClient {
 11 |   connect(): Promise<void>;
 12 |   disconnect(): Promise<void>;
 13 |   call_tool(toolName: string): (args: any) => Promise<any>;
 14 |   list_tools(): Promise<any[]>;
 15 | }
 16 | 
 17 | export class MCPClientImpl implements MCPClient {
 18 |   private process: ChildProcess | null = null;
 19 |   private socket: WebSocket | null = null;
 20 |   private messageQueue: Buffer[] = [];
 21 |   private currentResolver: ((value: Buffer) => void) | null = null;
 22 |   private rpcInterface: {
 23 |     read: () => Promise<Buffer>;
 24 |     write: (data: Buffer) => Promise<void>;
 25 |   } | null = null;
 26 | 
 27 |   constructor(private serverParams: MCPServerParameters) {
 28 |     console.log('MCPClientImpl initialized with params:', {
 29 |       command: serverParams.command,
 30 |       args: serverParams.args,
 31 |       env: serverParams.env ? Object.keys(serverParams.env) : null
 32 |     });
 33 |   }
 34 | 
 35 |   async connect(): Promise<void> {
 36 |     console.log('Attempting to connect to MCP server...');
 37 |     return new Promise((resolve, reject) => {
 38 |       console.log('Spawning process:', this.serverParams.command, this.serverParams.args);
 39 |       this.process = spawn(this.serverParams.command, this.serverParams.args, {
 40 |         env: {
 41 |           ...process.env,
 42 |           ...this.serverParams.env
 43 |         }
 44 |       });
 45 | 
 46 |       if (!this.process) {
 47 |         const error = new Error('Failed to start MCP server process');
 48 |         console.error('Spawn failed:', error);
 49 |         reject(error);
 50 |         return;
 51 |       }
 52 | 
 53 |       console.log('Process spawned with PID:', this.process.pid);
 54 | 
 55 |       this.process.on('error', (err: Error) => {
 56 |         console.error('MCP server process error:', err);
 57 |         console.error('Error details:', {
 58 |           message: err.message,
 59 |           name: err.name,
 60 |           stack: err.stack
 61 |         });
 62 |         reject(new Error(`Failed to execute MCP server: ${err.message}`));
 63 |         this.process = null;
 64 |       });
 65 | 
 66 |       this.process.on('exit', (code: number, signal: string) => {
 67 |         console.warn(`MCP server process exited with code ${code} and signal ${signal}`);
 68 |         this.process = null;
 69 |       });
 70 | 
 71 |       let wsUrl = '';
 72 |       this.process.stdout?.on('data', (data: Buffer) => {
 73 |         const msg = data.toString('utf-8');
 74 |         console.log('MCP server stdout:', msg);
 75 |         const match = msg.match(/ws:\/\/localhost:\d+/);
 76 |         if (match) {
 77 |           wsUrl = match[0];
 78 |           console.log('WebSocket URL found:', wsUrl);
 79 |           this.createWebSocket(wsUrl).then(resolve).catch(reject);
 80 |         }
 81 |       });
 82 | 
 83 |       this.process.stderr?.on('data', (data: Buffer) => {
 84 |         console.error(`MCP server stderr: ${data.toString('utf-8')}`);
 85 |       });
 86 |     });
 87 |   }
 88 | 
 89 |   private async createWebSocket(wsUrl: string): Promise<void> {
 90 |     console.log('Creating WebSocket connection to:', wsUrl);
 91 |     return new Promise((resolve, reject) => {
 92 |       this.socket = new WebSocket(wsUrl);
 93 | 
 94 |       this.socket.on('open', () => {
 95 |         console.log('WebSocket connection established');
 96 |         this.rpcInterface = {
 97 |           read: async () => {
 98 |             console.log('RPC read called');
 99 |             return new Promise<Buffer>((resolveRead) => {
100 |               if (this.messageQueue.length > 0) {
101 |                 const message = Buffer.concat(this.messageQueue);
102 |                 this.messageQueue = [];
103 |                 console.log('Reading from message queue:', message.toString());
104 |                 resolveRead(message);
105 |               } else {
106 |                 console.log('Waiting for message...');
107 |                 this.currentResolver = resolveRead;
108 |               }
109 |             });
110 |           },
111 |           write: async (data: Buffer) => {
112 |             console.log('RPC write called with data:', data.toString());
113 |             if (!this.socket?.readyState) {
114 |               const error = new Error('WebSocket not connected');
115 |               console.error('Write failed:', error);
116 |               throw error;
117 |             }
118 |             this.socket.send(data);
119 |             console.log('Data sent successfully');
120 |           },
121 |         };
122 |         resolve();
123 |       });
124 | 
125 |       this.socket.on('message', (data: WebSocket.Data) => {
126 |         console.log('WebSocket message received:', data.toString());
127 |         const buffer = Buffer.from(data as Buffer);
128 |         if (this.currentResolver) {
129 |           console.log('Resolving pending read');
130 |           this.currentResolver(buffer);
131 |           this.currentResolver = null;
132 |         } else {
133 |           console.log('Queueing message');
134 |           this.messageQueue.push(buffer);
135 |         }
136 |       });
137 | 
138 |       this.socket.on('error', (err: Error) => {
139 |         console.error('WebSocket error:', {
140 |           message: err.message,
141 |           name: err.name,
142 |           stack: err.stack
143 |         });
144 |         reject(new Error(`WebSocket connection failed: ${err.message}`));
145 |       });
146 | 
147 |       this.socket.on('close', (code: number, reason: Buffer) => {
148 |         console.log(`WebSocket connection closed with code ${code}`, {
149 |           reason: reason.toString(),
150 |           wasClean: code === 1000
151 |         });
152 |       });
153 | 
154 |       // Add connection timeout
155 |       setTimeout(() => {
156 |         if (this.socket?.readyState !== WebSocket.OPEN) {
157 |           const error = new Error('WebSocket connection timeout');
158 |           console.error('Connection timeout:', error);
159 |           reject(error);
160 |         }
161 |       }, 10000); // 10 second timeout
162 |     });
163 |   }
164 | 
165 |   async list_tools(): Promise<any[]> {
166 |     if (!this.rpcInterface) {
167 |       throw new Error('Not connected to MCP server');
168 |     }
169 | 
170 |     const request = {
171 |       jsonrpc: '2.0',
172 |       method: 'list_tools',
173 |       id: Math.floor(Math.random() * 1000000),
174 |     };
175 | 
176 |     await this.rpcInterface.write(Buffer.from(JSON.stringify(request)));
177 |     const response = await this.rpcInterface.read();
178 |     const result = JSON.parse(response.toString());
179 | 
180 |     if (result.error) {
181 |       throw new Error(result.error.message);
182 |     }
183 | 
184 |     return result.result;
185 |   }
186 | 
187 |   call_tool(toolName: string): (args: any) => Promise<any> {
188 |     const rpcInterface = this.rpcInterface;
189 |     if (!rpcInterface) {
190 |       throw new Error('Not connected to MCP server');
191 |     }
192 | 
193 |     return async (args: any) => {
194 |       const request = {
195 |         jsonrpc: '2.0',
196 |         method: toolName,
197 |         params: args,
198 |         id: Math.floor(Math.random() * 1000000),
199 |       };
200 | 
201 |       await rpcInterface.write(Buffer.from(JSON.stringify(request)));
202 |       const response = await rpcInterface.read();
203 |       const result = JSON.parse(response.toString());
204 | 
205 |       if (result.error) {
206 |         throw new Error(result.error.message);
207 |       }
208 | 
209 |       return result.result;
210 |     };
211 |   }
212 | 
213 |   async disconnect(): Promise<void> {
214 |     if (this.socket?.readyState === WebSocket.OPEN) {
215 |       this.socket.close();
216 |       this.socket = null;
217 |     }
218 | 
219 |     if (this.process) {
220 |       this.process.kill();
221 |       this.process = null;
222 |     }
223 |   }
224 | } 
```

--------------------------------------------------------------------------------
/src/gemini_mcp_agent.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import {
  2 |   GenerativeModel,
  3 |   GoogleGenerativeAI,
  4 |   GenerateContentResult,
  5 |   Content,
  6 |   Part,
  7 |   GenerationConfig,
  8 | } from '@google/generative-ai';
  9 | import { config } from 'dotenv';
 10 | import { MCPClientImpl, MCPServerParameters } from './mcp_client';
 11 | import { createInterface } from 'readline';
 12 | import * as fs from 'fs';
 13 | import * as path from 'path';
 14 | 
 15 | config(); // Load environment variables
 16 | 
 17 | // Use Gemini 1.5 Pro for enhanced capabilities
 18 | const MODEL_ID = 'gemini-1.5-pro';
 19 | 
 20 | // System prompt optimized for Gemini
 21 | const SYSTEM_PROMPT = `You are a helpful assistant that specializes in routing requests to Google's Gemini Pro model. You should:
 22 | 
 23 | 1. Recognize and respond to any requests that mention Gemini, including phrases like:
 24 |    - "ask Gemini..."
 25 |    - "get Gemini to..."
 26 |    - "have Gemini..."
 27 |    - "tell Gemini..."
 28 |    Or any similar variations that imply the user wants to interact with Gemini.
 29 | 
 30 | 2. For such requests:
 31 |    - Extract the actual query/task from the request
 32 |    - Remove the "ask Gemini" or similar prefix
 33 |    - Send the cleaned query to Gemini
 34 |    - Return Gemini's response
 35 | 
 36 | 3. For requests that don't explicitly mention Gemini, process them normally using your own capabilities
 37 | 
 38 | 4. Maintain conversation context and generate accurate responses
 39 | 
 40 | Please provide clear responses while acting as a seamless bridge to Gemini when requested.`;
 41 | 
 42 | interface FunctionDeclaration {
 43 |   name: string;
 44 |   description: string;
 45 |   parameters: {
 46 |     type: string;
 47 |     properties: Record<string, any>;
 48 |     required: string[];
 49 |   };
 50 | }
 51 | 
 52 | interface Tool {
 53 |   functionDeclarations: FunctionDeclaration[];
 54 | }
 55 | 
 56 | interface MCPTool {
 57 |   name: string;
 58 |   callable: (...args: any[]) => Promise<any>;
 59 |   schema: {
 60 |     type: string;
 61 |     function: {
 62 |       name: string;
 63 |       description: string;
 64 |       parameters: any;
 65 |     };
 66 |   };
 67 | }
 68 | 
 69 | interface FunctionCall {
 70 |   name: string;
 71 |   args: Record<string, any>;
 72 | }
 73 | 
 74 | type ExtendedPart = Part & {
 75 |   functionCall?: FunctionCall;
 76 |   functionResponse?: {
 77 |     name: string;
 78 |     response: { result: any };
 79 |   };
 80 | };
 81 | 
 82 | interface MCPServerConfig {
 83 |   command: string;
 84 |   args: string[];
 85 |   env?: Record<string, string>;
 86 |   headers?: Record<string, string>;
 87 |   baseUrl?: string;
 88 | }
 89 | 
 90 | interface MCPServersConfig {
 91 |   mcpServers: Record<string, MCPServerConfig>;
 92 | }
 93 | 
 94 | class MCPAgent {
 95 |   private genAI: GoogleGenerativeAI;
 96 |   private model: GenerativeModel;
 97 |   private mcpClient: MCPClientImpl;
 98 |   private tools: { [key: string]: MCPTool } = {};
 99 | 
100 |   constructor(apiKey: string, serverParams: MCPServerParameters) {
101 |     this.genAI = new GoogleGenerativeAI(apiKey);
102 |     this.model = this.genAI.getGenerativeModel({
103 |       model: MODEL_ID,
104 |       generationConfig: {
105 |         temperature: 0,
106 |         maxOutputTokens: 8192, // Increased output length
107 |       },
108 |     });
109 |     this.mcpClient = new MCPClientImpl(serverParams);
110 |   }
111 | 
112 |   async initialize(): Promise<void> {
113 |     await this.mcpClient.connect();
114 |     await this.setupTools();
115 |   }
116 | 
117 |   private async setupTools(): Promise<void> {
118 |     const mcpTools = await this.mcpClient.list_tools();
119 |     this.tools = mcpTools.reduce((acc, tool) => {
120 |       acc[tool.name] = {
121 |         name: tool.name,
122 |         callable: this.mcpClient.call_tool(tool.name),
123 |         schema: {
124 |           type: 'function',
125 |           function: {
126 |             name: tool.name,
127 |             description: tool.description,
128 |             parameters: tool.inputSchema,
129 |           },
130 |         },
131 |       };
132 |       return acc;
133 |     }, {} as { [key: string]: MCPTool });
134 | 
135 |     // Log available tools
136 |     console.log('Available tools:');
137 |     Object.values(this.tools).forEach(tool => {
138 |       console.log(`- ${tool.name}: ${tool.schema.function.description}`);
139 |     });
140 |   }
141 | 
142 |   async processUserInput(
143 |     input: string,
144 |     messages: Content[] = []
145 |   ): Promise<Content[]> {
146 |     // Add system prompt if this is the first message
147 |     if (messages.length === 0) {
148 |       messages.push({
149 |         role: 'system',
150 |         parts: [{ text: SYSTEM_PROMPT }]
151 |       });
152 |     }
153 | 
154 |     // Check if this is a Gemini-specific request
155 |     const geminiPatterns = [
156 |       /^(?:ask|tell|get|have|let)\s+gemini\s+(?:to\s+)?(.+)/i,
157 |       /^gemini[,:]?\s+(.+)/i,
158 |       /^(?:can you )?(?:ask|tell|get|have|let)\s+gemini\s+(?:to\s+)?(.+)/i
159 |     ];
160 | 
161 |     let cleanedInput = input;
162 |     for (const pattern of geminiPatterns) {
163 |       const match = input.match(pattern);
164 |       if (match) {
165 |         cleanedInput = match[1].trim();
166 |         console.log('Detected Gemini request. Cleaned input:', cleanedInput);
167 |         break;
168 |       }
169 |     }
170 | 
171 |     const contents: Content[] = [...messages];
172 |     contents.push({ role: 'user', parts: [{ text: cleanedInput }] });
173 | 
174 |     const response = await this.model.generateContent({
175 |       contents,
176 |       generationConfig: {
177 |         temperature: 0,
178 |         maxOutputTokens: 8192,
179 |       },
180 |     });
181 | 
182 |     const result = await response.response;
183 |     if (!result.candidates?.[0]?.content?.parts) {
184 |       throw new Error('Invalid response from Gemini API');
185 |     }
186 | 
187 |     contents.push({ role: 'model', parts: result.candidates[0].content.parts });
188 | 
189 |     for (const part of result.candidates[0].content.parts as ExtendedPart[]) {
190 |       if (part.functionCall) {
191 |         const { name, args } = part.functionCall;
192 |         const toolResult = await this.tools[name].callable(args);
193 | 
194 |         contents.push({
195 |           role: 'user',
196 |           parts: [
197 |             {
198 |               functionResponse: {
199 |                 name,
200 |                 response: { result: toolResult },
201 |               },
202 |             } as ExtendedPart,
203 |           ],
204 |         });
205 | 
206 |         const followUpResponse = await this.model.generateContent({
207 |           contents,
208 |           generationConfig: {
209 |             temperature: 0,
210 |             maxOutputTokens: 8192,
211 |           },
212 |         });
213 | 
214 |         const followUpResult = await followUpResponse.response;
215 |         if (!followUpResult.candidates?.[0]?.content?.parts) {
216 |           throw new Error('Invalid follow-up response from Gemini API');
217 |         }
218 | 
219 |         contents.push({
220 |           role: 'model',
221 |           parts: followUpResult.candidates[0].content.parts,
222 |         });
223 |       }
224 |     }
225 | 
226 |     return contents;
227 |   }
228 | 
229 |   async disconnect(): Promise<void> {
230 |     await this.mcpClient.disconnect();
231 |   }
232 | }
233 | 
234 | async function main() {
235 |   // Load Claude Desktop config
236 |   const homedir = require('os').homedir();
237 |   const configPath = path.join(homedir, 'Library/Application Support/Claude/claude_desktop_config.json');
238 |   
239 |   let mcpConfig: MCPServersConfig;
240 |   try {
241 |     const configContent = fs.readFileSync(configPath, 'utf-8');
242 |     mcpConfig = JSON.parse(configContent);
243 |   } catch (error) {
244 |     console.error('Failed to load Claude Desktop config:', error);
245 |     process.exit(1);
246 |   }
247 | 
248 |   // Get Gemini server config
249 |   const geminiConfig = mcpConfig.mcpServers['gemini'];
250 |   if (!geminiConfig) {
251 |     throw new Error('Gemini MCP server configuration not found');
252 |   }
253 | 
254 |   const apiKey = geminiConfig.env?.GEMINI_API_KEY || process.env.GEMINI_API_KEY;
255 |   if (!apiKey) {
256 |     throw new Error('GEMINI_API_KEY not found in environment or config');
257 |   }
258 | 
259 |   // Configure MCP server parameters for Gemini
260 |   const serverParams: MCPServerParameters = {
261 |     command: geminiConfig.command,
262 |     args: geminiConfig.args,
263 |     env: {
264 |       ...geminiConfig.env,
265 |       GEMINI_API_KEY: apiKey
266 |     }
267 |   };
268 | 
269 |   const agent = new MCPAgent(apiKey, serverParams);
270 |   await agent.initialize();
271 | 
272 |   const readline = createInterface({
273 |     input: process.stdin,
274 |     output: process.stdout,
275 |   });
276 | 
277 |   let messages: Content[] = [];
278 | 
279 |   try {
280 |     while (true) {
281 |       const input = await new Promise<string>((resolve) => {
282 |         readline.question('Enter your prompt (or "quit" to exit): ', resolve);
283 |       });
284 | 
285 |       if (['quit', 'exit', 'q'].includes(input.toLowerCase())) {
286 |         break;
287 |       }
288 | 
289 |       try {
290 |         messages = await agent.processUserInput(input, messages);
291 |         // Find and display the last model message
292 |         for (let i = messages.length - 1; i >= 0; i--) {
293 |           const message = messages[i];
294 |           if (message.role === 'model') {
295 |             for (const part of message.parts) {
296 |               if (part.text?.trim()) {
297 |                 console.log(`Assistant: ${part.text}`);
298 |                 break;
299 |               }
300 |             }
301 |             break;
302 |           }
303 |         }
304 |       } catch (error) {
305 |         console.error('Error processing input:', error);
306 |       }
307 |     }
308 |   } finally {
309 |     readline.close();
310 |     await agent.disconnect();
311 |   }
312 | }
313 | 
314 | if (require.main === module) {
315 |   main().catch((error) => {
316 |     console.error('Fatal error:', error);
317 |     process.exit(1);
318 |   });
319 | } 
```