# Directory Structure ``` ├── .cursorrules ├── .gitignore ├── Dockerfile ├── LICENSE.txt ├── package-lock.json ├── package.json ├── README.md ├── smithery.yaml ├── src │ ├── gemini_mcp_agent.ts │ ├── gemini_mcp_server.ts │ └── mcp_client.ts └── tsconfig.json ``` # Files -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- ``` # Dependencies node_modules/ .pnp .pnp.js # Build output dist/ build/ # Environment variables .env .env.local .env.development.local .env.test.local .env.production.local # IDE and editor files .idea/ .vscode/ *.swp *.swo .DS_Store # Logs npm-debug.log* yarn-debug.log* yarn-error.log* logs/ *.log # Testing coverage/ # Misc .DS_Store .env.local .env.development.local .env.test.local .env.production.local # Claude Desktop config (contains API keys) **/claude_desktop_config.json ``` -------------------------------------------------------------------------------- /.cursorrules: -------------------------------------------------------------------------------- ``` <cursor-tools Integration> Use the following commands to get AI assistance: cursor-tools web "your question" - Get answers from the web using Perplexity AI cursor-tools repo "your question" - Get context-aware answers about this repository using Google Gemini cursor-tools web is good for getting up-to-date information from the web that are not repository specific. For example, you can ask it to get the names and details of the latest OpenAI models or details about an external API. cursor-tools repo has the entire repository context available to it so it is good for repository search and tasks that require holistic understanding such as planning, debugging and answering questions about the architecture. if cursor-tools is not found in your PATH, you can run it with `npm exec cursor-tools "your question"` or `yarn cursor-tools "your question"` or `pnpm cursor-tools "your question"` depending on your package manager if cursor-tools is installed as a dependency. If cursor-tools is not installed as a dependency you should fall back to using `npx cursor-tools "your question"`. Note: configuration is in cursor-tools.config.json (falling back to ~/.cursor-tools/config.json) Note: api keys are loaded from .cursor-tools.env (falling back to ~/.cursor-tools/.env) </cursor-tools Integration> ``` -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- ```markdown # gemini-mcp-server [](https://smithery.ai/server/@georgejeffers/gemini-mcp-server) A TypeScript implementation of a Model Context Protocol (MCP) server that integrates with Google's Gemini Pro model. <a href="https://glama.ai/mcp/servers/ejwvacw7s0"> <img width="380" height="200" src="https://glama.ai/mcp/servers/ejwvacw7s0/badge" alt="Gemini Server MCP server" /> </a> ## MCP Tools ### generate_text *From server: gemini* ## Prerequisites - Node.js 18 or higher - Google Gemini API key - TypeScript - Claude Desktop app ## Installation ### Installing via Smithery To install Gemini MCP Server for Claude Desktop automatically via [Smithery](https://smithery.ai/server/@georgejeffers/gemini-mcp-server): ```bash npx -y @smithery/cli install @georgejeffers/gemini-mcp-server --client claude ``` ### Manual Installation 1. Clone the repository: ```bash git clone https://github.com/GeorgeJeffers/gemini-mcp-server.git cd gemini-mcp-server ``` 2. Install dependencies: ```bash npm install ``` 4. Build: ```bash npm run build ``` ## Claude Desktop Integration To use this server with Claude Desktop: 1. Open Claude Desktop 2. Go to Settings > Developer 3. Click "Edit Config" 4. Add the following configuration: ```json { "name": "gemini", "command": "node", "args": ["dist/gemini_mcp_server.js"], "env": { "GEMINI_API_KEY": "your_api_key_here" }, "cwd": "/path/to/mcp-gemini-server" } ``` Replace: - `/path/to/mcp-gemini-server` with the absolute path to where you cloned this repository - `your_api_key_here` with your actual Google Gemini API key The server will now be available in Claude Desktop's MCP server list. ## License MIT ## Author GeorgeJeffers ``` -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- ```dockerfile # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile FROM node:lts-alpine WORKDIR /app # Copy package files COPY package*.json ./ # Install dependencies without running lifecycle scripts RUN npm install --ignore-scripts # Install missing ws module and its type declarations RUN npm install ws @types/ws # Copy source code to work directory COPY . . # Build the TypeScript code RUN npm run build # Start the server CMD ["npm", "start"] ``` -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- ```json { "compilerOptions": { "target": "ES2020", "module": "CommonJS", "moduleResolution": "node", "outDir": "./dist", "rootDir": "./src", "strict": true, "esModuleInterop": true, "skipLibCheck": true, "forceConsistentCasingInFileNames": true, "resolveJsonModule": true, "allowJs": true, "sourceMap": true }, "ts-node": { "transpileOnly": true, "files": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist"] } ``` -------------------------------------------------------------------------------- /smithery.yaml: -------------------------------------------------------------------------------- ```yaml # Smithery configuration file: https://smithery.ai/docs/config#smitheryyaml startCommand: type: stdio configSchema: # JSON Schema defining the configuration options for the MCP. type: object required: - geminiApiKey properties: geminiApiKey: type: string description: Your Google Gemini API Key commandFunction: # A JS function that produces the CLI command based on the given config to start the MCP on stdio. |- (config) => ({ command: 'node', args: ['dist/gemini_mcp_server.js'], env: { GEMINI_API_KEY: config.geminiApiKey } }) exampleConfig: geminiApiKey: your_api_key_here ``` -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- ```json { "name": "gemini-mcp-server", "version": "1.0.0", "description": "A TypeScript implementation of a Model Context Protocol (MCP) server that integrates with Google's Gemini Pro model.", "main": "dist/gemini_mcp_server.js", "scripts": { "build": "tsc", "start": "node dist/gemini_mcp_server.js", "dev": "tsc -w", "test": "echo \"Error: no test specified\" && exit 1" }, "keywords": ["mcp", "gemini", "ai", "typescript"], "author": "GeorgeJeffers", "license": "MIT", "dependencies": { "@google/generative-ai": "^0.2.0", "@modelcontextprotocol/sdk": "^1.4.1", "dotenv": "^16.4.5", "zod": "^3.24.1" }, "devDependencies": { "@types/node": "^20.11.24", "typescript": "^5.3.3" } } ``` -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- ``` MIT License Copyright (c) 2024 Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ``` -------------------------------------------------------------------------------- /src/gemini_mcp_server.ts: -------------------------------------------------------------------------------- ```typescript import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js"; import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js"; import { GoogleGenerativeAI, GenerativeModel } from '@google/generative-ai'; import { config } from 'dotenv'; import { z } from "zod"; // Immediately send the startup message before anything else can write to stdout process.stdout.write(JSON.stringify({ jsonrpc: "2.0", method: "startup", params: { transport: "stdio" } }) + '\n'); // Redirect stdout to stderr for everything else const originalStdoutWrite = process.stdout.write.bind(process.stdout); process.stdout.write = (chunk: any, ...args: any[]) => { return process.stderr.write(chunk, ...args); }; // Redirect console methods to stderr const consoleMethods = ['log', 'info', 'warn', 'error', 'debug'] as const; consoleMethods.forEach(method => { (console as any)[method] = (...args: any[]) => process.stderr.write(`[${method}] ` + args.join(' ') + '\n'); }); // Suppress npm and Node.js startup messages process.env.NODE_ENV = 'production'; process.env.NO_UPDATE_NOTIFIER = '1'; process.env.SUPPRESS_NO_CONFIG_WARNING = 'true'; process.env.npm_config_loglevel = 'silent'; // Load environment variables config(); const GEMINI_API_KEY = process.env.GEMINI_API_KEY ?? ''; if (!GEMINI_API_KEY) { console.error('GEMINI_API_KEY environment variable is required'); process.exit(1); } // Define tool schemas const generateTextSchema = z.object({ prompt: z.string().min(1), temperature: z.number().min(0).max(1).optional(), maxOutputTokens: z.number().min(1).max(8192).optional(), topK: z.number().min(1).max(40).optional(), topP: z.number().min(0).max(1).optional(), stream: z.boolean().optional(), }); type GenerateTextParams = z.infer<typeof generateTextSchema>; class GeminiMCPServer { private model: GenerativeModel; private server: McpServer; private transport: StdioServerTransport; private chat: any; // Store chat session constructor() { const genAI = new GoogleGenerativeAI(GEMINI_API_KEY); this.model = genAI.getGenerativeModel({ model: 'gemini-1.5-pro' }); this.chat = this.model.startChat(); this.server = new McpServer({ name: "gemini", version: "1.0.0", capabilities: { tools: { generate_text: { description: "Generate text using Gemini Pro model", streaming: true } } } }); this.transport = new StdioServerTransport(); } private async generateText(params: GenerateTextParams) { try { const { prompt, temperature = 0.7, maxOutputTokens = 8192, topK, topP, stream = false } = params; const generationConfig = { temperature, maxOutputTokens, topK, topP, }; console.log('Sending message to Gemini:', prompt); if (stream) { const result = await this.chat.sendMessageStream(prompt); let fullText = ''; for await (const chunk of result.stream) { const chunkText = chunk.text(); fullText += chunkText; // Send intermediate chunk as progress event // Note: Progress events are not yet supported in MCP SDK // this.server.emit("generate_text/progress", { // content: [{ // type: "text", // text: chunkText // }] // }); } console.log('Received streamed response from Gemini:', fullText); return { content: [{ type: "text" as const, text: fullText }] }; } else { const result = await this.chat.sendMessage(prompt); const response = result.response.text(); console.log('Received response from Gemini:', response); return { content: [{ type: "text" as const, text: response }] }; } } catch (err) { console.error('Error generating content:', err); return { content: [{ type: "text" as const, text: err instanceof Error ? err.message : 'Internal error' }], isError: true }; } } async start() { try { console.info('Initializing Gemini MCP server...'); // Register generate_text tool this.server.tool( "generate_text", generateTextSchema.shape, async (args: GenerateTextParams) => this.generateText(args) ); // Restore stdout for MCP communication process.stdout.write = originalStdoutWrite; // Connect using stdio transport await this.server.connect(this.transport); console.info('Server started successfully and waiting for messages...'); } catch (error) { console.error('Failed to start server:', error); process.exit(1); } } async stop() { try { // Note: Disconnect is not yet supported in MCP SDK // await this.server.disconnect(); console.info('Server stopped successfully'); } catch (error) { console.error('Error stopping server:', error); process.exit(1); } } } // Handle shutdown gracefully process.on('SIGINT', () => { console.info('Server shutting down'); process.exit(0); }); // Handle uncaught errors process.on('uncaughtException', (error) => { console.error('Uncaught exception:', error); process.exit(1); }); process.on('unhandledRejection', (reason) => { console.error('Unhandled rejection:', reason); process.exit(1); }); // Start the server const server = new GeminiMCPServer(); server.start().catch(error => { console.error('Unhandled error:', error); process.exit(1); }); ``` -------------------------------------------------------------------------------- /src/mcp_client.ts: -------------------------------------------------------------------------------- ```typescript import { spawn, ChildProcess } from 'child_process'; import WebSocket from 'ws'; export interface MCPServerParameters { command: string; args: string[]; env?: NodeJS.ProcessEnv | null; } export interface MCPClient { connect(): Promise<void>; disconnect(): Promise<void>; call_tool(toolName: string): (args: any) => Promise<any>; list_tools(): Promise<any[]>; } export class MCPClientImpl implements MCPClient { private process: ChildProcess | null = null; private socket: WebSocket | null = null; private messageQueue: Buffer[] = []; private currentResolver: ((value: Buffer) => void) | null = null; private rpcInterface: { read: () => Promise<Buffer>; write: (data: Buffer) => Promise<void>; } | null = null; constructor(private serverParams: MCPServerParameters) { console.log('MCPClientImpl initialized with params:', { command: serverParams.command, args: serverParams.args, env: serverParams.env ? Object.keys(serverParams.env) : null }); } async connect(): Promise<void> { console.log('Attempting to connect to MCP server...'); return new Promise((resolve, reject) => { console.log('Spawning process:', this.serverParams.command, this.serverParams.args); this.process = spawn(this.serverParams.command, this.serverParams.args, { env: { ...process.env, ...this.serverParams.env } }); if (!this.process) { const error = new Error('Failed to start MCP server process'); console.error('Spawn failed:', error); reject(error); return; } console.log('Process spawned with PID:', this.process.pid); this.process.on('error', (err: Error) => { console.error('MCP server process error:', err); console.error('Error details:', { message: err.message, name: err.name, stack: err.stack }); reject(new Error(`Failed to execute MCP server: ${err.message}`)); this.process = null; }); this.process.on('exit', (code: number, signal: string) => { console.warn(`MCP server process exited with code ${code} and signal ${signal}`); this.process = null; }); let wsUrl = ''; this.process.stdout?.on('data', (data: Buffer) => { const msg = data.toString('utf-8'); console.log('MCP server stdout:', msg); const match = msg.match(/ws:\/\/localhost:\d+/); if (match) { wsUrl = match[0]; console.log('WebSocket URL found:', wsUrl); this.createWebSocket(wsUrl).then(resolve).catch(reject); } }); this.process.stderr?.on('data', (data: Buffer) => { console.error(`MCP server stderr: ${data.toString('utf-8')}`); }); }); } private async createWebSocket(wsUrl: string): Promise<void> { console.log('Creating WebSocket connection to:', wsUrl); return new Promise((resolve, reject) => { this.socket = new WebSocket(wsUrl); this.socket.on('open', () => { console.log('WebSocket connection established'); this.rpcInterface = { read: async () => { console.log('RPC read called'); return new Promise<Buffer>((resolveRead) => { if (this.messageQueue.length > 0) { const message = Buffer.concat(this.messageQueue); this.messageQueue = []; console.log('Reading from message queue:', message.toString()); resolveRead(message); } else { console.log('Waiting for message...'); this.currentResolver = resolveRead; } }); }, write: async (data: Buffer) => { console.log('RPC write called with data:', data.toString()); if (!this.socket?.readyState) { const error = new Error('WebSocket not connected'); console.error('Write failed:', error); throw error; } this.socket.send(data); console.log('Data sent successfully'); }, }; resolve(); }); this.socket.on('message', (data: WebSocket.Data) => { console.log('WebSocket message received:', data.toString()); const buffer = Buffer.from(data as Buffer); if (this.currentResolver) { console.log('Resolving pending read'); this.currentResolver(buffer); this.currentResolver = null; } else { console.log('Queueing message'); this.messageQueue.push(buffer); } }); this.socket.on('error', (err: Error) => { console.error('WebSocket error:', { message: err.message, name: err.name, stack: err.stack }); reject(new Error(`WebSocket connection failed: ${err.message}`)); }); this.socket.on('close', (code: number, reason: Buffer) => { console.log(`WebSocket connection closed with code ${code}`, { reason: reason.toString(), wasClean: code === 1000 }); }); // Add connection timeout setTimeout(() => { if (this.socket?.readyState !== WebSocket.OPEN) { const error = new Error('WebSocket connection timeout'); console.error('Connection timeout:', error); reject(error); } }, 10000); // 10 second timeout }); } async list_tools(): Promise<any[]> { if (!this.rpcInterface) { throw new Error('Not connected to MCP server'); } const request = { jsonrpc: '2.0', method: 'list_tools', id: Math.floor(Math.random() * 1000000), }; await this.rpcInterface.write(Buffer.from(JSON.stringify(request))); const response = await this.rpcInterface.read(); const result = JSON.parse(response.toString()); if (result.error) { throw new Error(result.error.message); } return result.result; } call_tool(toolName: string): (args: any) => Promise<any> { const rpcInterface = this.rpcInterface; if (!rpcInterface) { throw new Error('Not connected to MCP server'); } return async (args: any) => { const request = { jsonrpc: '2.0', method: toolName, params: args, id: Math.floor(Math.random() * 1000000), }; await rpcInterface.write(Buffer.from(JSON.stringify(request))); const response = await rpcInterface.read(); const result = JSON.parse(response.toString()); if (result.error) { throw new Error(result.error.message); } return result.result; }; } async disconnect(): Promise<void> { if (this.socket?.readyState === WebSocket.OPEN) { this.socket.close(); this.socket = null; } if (this.process) { this.process.kill(); this.process = null; } } } ``` -------------------------------------------------------------------------------- /src/gemini_mcp_agent.ts: -------------------------------------------------------------------------------- ```typescript import { GenerativeModel, GoogleGenerativeAI, GenerateContentResult, Content, Part, GenerationConfig, } from '@google/generative-ai'; import { config } from 'dotenv'; import { MCPClientImpl, MCPServerParameters } from './mcp_client'; import { createInterface } from 'readline'; import * as fs from 'fs'; import * as path from 'path'; config(); // Load environment variables // Use Gemini 1.5 Pro for enhanced capabilities const MODEL_ID = 'gemini-1.5-pro'; // System prompt optimized for Gemini const SYSTEM_PROMPT = `You are a helpful assistant that specializes in routing requests to Google's Gemini Pro model. You should: 1. Recognize and respond to any requests that mention Gemini, including phrases like: - "ask Gemini..." - "get Gemini to..." - "have Gemini..." - "tell Gemini..." Or any similar variations that imply the user wants to interact with Gemini. 2. For such requests: - Extract the actual query/task from the request - Remove the "ask Gemini" or similar prefix - Send the cleaned query to Gemini - Return Gemini's response 3. For requests that don't explicitly mention Gemini, process them normally using your own capabilities 4. Maintain conversation context and generate accurate responses Please provide clear responses while acting as a seamless bridge to Gemini when requested.`; interface FunctionDeclaration { name: string; description: string; parameters: { type: string; properties: Record<string, any>; required: string[]; }; } interface Tool { functionDeclarations: FunctionDeclaration[]; } interface MCPTool { name: string; callable: (...args: any[]) => Promise<any>; schema: { type: string; function: { name: string; description: string; parameters: any; }; }; } interface FunctionCall { name: string; args: Record<string, any>; } type ExtendedPart = Part & { functionCall?: FunctionCall; functionResponse?: { name: string; response: { result: any }; }; }; interface MCPServerConfig { command: string; args: string[]; env?: Record<string, string>; headers?: Record<string, string>; baseUrl?: string; } interface MCPServersConfig { mcpServers: Record<string, MCPServerConfig>; } class MCPAgent { private genAI: GoogleGenerativeAI; private model: GenerativeModel; private mcpClient: MCPClientImpl; private tools: { [key: string]: MCPTool } = {}; constructor(apiKey: string, serverParams: MCPServerParameters) { this.genAI = new GoogleGenerativeAI(apiKey); this.model = this.genAI.getGenerativeModel({ model: MODEL_ID, generationConfig: { temperature: 0, maxOutputTokens: 8192, // Increased output length }, }); this.mcpClient = new MCPClientImpl(serverParams); } async initialize(): Promise<void> { await this.mcpClient.connect(); await this.setupTools(); } private async setupTools(): Promise<void> { const mcpTools = await this.mcpClient.list_tools(); this.tools = mcpTools.reduce((acc, tool) => { acc[tool.name] = { name: tool.name, callable: this.mcpClient.call_tool(tool.name), schema: { type: 'function', function: { name: tool.name, description: tool.description, parameters: tool.inputSchema, }, }, }; return acc; }, {} as { [key: string]: MCPTool }); // Log available tools console.log('Available tools:'); Object.values(this.tools).forEach(tool => { console.log(`- ${tool.name}: ${tool.schema.function.description}`); }); } async processUserInput( input: string, messages: Content[] = [] ): Promise<Content[]> { // Add system prompt if this is the first message if (messages.length === 0) { messages.push({ role: 'system', parts: [{ text: SYSTEM_PROMPT }] }); } // Check if this is a Gemini-specific request const geminiPatterns = [ /^(?:ask|tell|get|have|let)\s+gemini\s+(?:to\s+)?(.+)/i, /^gemini[,:]?\s+(.+)/i, /^(?:can you )?(?:ask|tell|get|have|let)\s+gemini\s+(?:to\s+)?(.+)/i ]; let cleanedInput = input; for (const pattern of geminiPatterns) { const match = input.match(pattern); if (match) { cleanedInput = match[1].trim(); console.log('Detected Gemini request. Cleaned input:', cleanedInput); break; } } const contents: Content[] = [...messages]; contents.push({ role: 'user', parts: [{ text: cleanedInput }] }); const response = await this.model.generateContent({ contents, generationConfig: { temperature: 0, maxOutputTokens: 8192, }, }); const result = await response.response; if (!result.candidates?.[0]?.content?.parts) { throw new Error('Invalid response from Gemini API'); } contents.push({ role: 'model', parts: result.candidates[0].content.parts }); for (const part of result.candidates[0].content.parts as ExtendedPart[]) { if (part.functionCall) { const { name, args } = part.functionCall; const toolResult = await this.tools[name].callable(args); contents.push({ role: 'user', parts: [ { functionResponse: { name, response: { result: toolResult }, }, } as ExtendedPart, ], }); const followUpResponse = await this.model.generateContent({ contents, generationConfig: { temperature: 0, maxOutputTokens: 8192, }, }); const followUpResult = await followUpResponse.response; if (!followUpResult.candidates?.[0]?.content?.parts) { throw new Error('Invalid follow-up response from Gemini API'); } contents.push({ role: 'model', parts: followUpResult.candidates[0].content.parts, }); } } return contents; } async disconnect(): Promise<void> { await this.mcpClient.disconnect(); } } async function main() { // Load Claude Desktop config const homedir = require('os').homedir(); const configPath = path.join(homedir, 'Library/Application Support/Claude/claude_desktop_config.json'); let mcpConfig: MCPServersConfig; try { const configContent = fs.readFileSync(configPath, 'utf-8'); mcpConfig = JSON.parse(configContent); } catch (error) { console.error('Failed to load Claude Desktop config:', error); process.exit(1); } // Get Gemini server config const geminiConfig = mcpConfig.mcpServers['gemini']; if (!geminiConfig) { throw new Error('Gemini MCP server configuration not found'); } const apiKey = geminiConfig.env?.GEMINI_API_KEY || process.env.GEMINI_API_KEY; if (!apiKey) { throw new Error('GEMINI_API_KEY not found in environment or config'); } // Configure MCP server parameters for Gemini const serverParams: MCPServerParameters = { command: geminiConfig.command, args: geminiConfig.args, env: { ...geminiConfig.env, GEMINI_API_KEY: apiKey } }; const agent = new MCPAgent(apiKey, serverParams); await agent.initialize(); const readline = createInterface({ input: process.stdin, output: process.stdout, }); let messages: Content[] = []; try { while (true) { const input = await new Promise<string>((resolve) => { readline.question('Enter your prompt (or "quit" to exit): ', resolve); }); if (['quit', 'exit', 'q'].includes(input.toLowerCase())) { break; } try { messages = await agent.processUserInput(input, messages); // Find and display the last model message for (let i = messages.length - 1; i >= 0; i--) { const message = messages[i]; if (message.role === 'model') { for (const part of message.parts) { if (part.text?.trim()) { console.log(`Assistant: ${part.text}`); break; } } break; } } } catch (error) { console.error('Error processing input:', error); } } } finally { readline.close(); await agent.disconnect(); } } if (require.main === module) { main().catch((error) => { console.error('Fatal error:', error); process.exit(1); }); } ```