# Directory Structure
```
├── .env.example
├── .eslintrc.json
├── .github
│ └── workflows
│ ├── ci.yml
│ └── release.yml
├── .gitignore
├── .npmignore
├── .prettierignore
├── .prettierrc.json
├── Contribution-Guide.md
├── docker-compose.yml
├── Dockerfile
├── docs
│ ├── api.md
│ ├── configuration.md
│ └── troubleshooting.md
├── jest.config.js
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── scripts
│ ├── build.sh
│ ├── dev.sh
│ └── test.sh
├── src
│ ├── config
│ │ └── index.ts
│ ├── enhanced-stdio-server.ts
│ ├── types.ts
│ └── utils
│ ├── errors.ts
│ ├── logger.ts
│ ├── rateLimiter.ts
│ └── validation.ts
├── tests
│ ├── integration
│ │ └── gemini-api.test.ts
│ ├── setup.ts
│ └── unit
│ ├── config.test.ts
│ ├── errors.test.ts
│ └── validation.test.ts
└── tsconfig.json
```
# Files
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
```
1 | # Source files
2 | src/
3 | *.ts
4 | !dist/**/*.d.ts
5 |
6 | # Development files
7 | .gitignore
8 | .eslintrc*
9 | .prettierrc*
10 | tsconfig.json
11 | jest.config.*
12 |
13 | # Documentation (keep essential ones)
14 | docs/
15 | *.md
16 | !README.md
17 | !LICENSE
18 | !CHANGELOG.md
19 |
20 | # Test files
21 | __tests__/
22 | *.test.*
23 | *.spec.*
24 |
25 | # CI/CD
26 | .github/
27 |
28 | # IDE
29 | .vscode/
30 | .idea/
31 |
32 | # Misc
33 | .env
34 | .env.*
35 | *.log
36 | .DS_Store
```
--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------
```
1 | # Dependencies
2 | node_modules/
3 |
4 | # Build output
5 | dist/
6 | build/
7 | *.tsbuildinfo
8 |
9 | # Coverage
10 | coverage/
11 |
12 | # Logs
13 | *.log
14 | logs/
15 |
16 | # Environment files
17 | .env
18 | .env.local
19 | .env.*.local
20 |
21 | # Cache directories
22 | .cache/
23 | .parcel-cache/
24 | .npm/
25 | .eslintcache
26 |
27 | # OS files
28 | .DS_Store
29 | Thumbs.db
30 |
31 | # IDE files
32 | .vscode/
33 | .idea/
34 |
35 | # Package files
36 | package-lock.json
37 | yarn.lock
38 | pnpm-lock.yaml
39 |
40 | # Generated files
41 | CHANGELOG.md
42 |
```
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
```
1 | # Dependencies
2 | node_modules/
3 | npm-debug.log*
4 | yarn-debug.log*
5 | yarn-error.log*
6 | pnpm-debug.log*
7 |
8 | # Build output
9 | dist/
10 | build/
11 | *.tsbuildinfo
12 |
13 | # Environment variables
14 | .env
15 | .env.local
16 | .env.development.local
17 | .env.test.local
18 | .env.production.local
19 |
20 | # Logs
21 | logs/
22 | *.log
23 |
24 | # Test coverage
25 | coverage/
26 | .nyc_output
27 | *.lcov
28 |
29 | # IDE files
30 | .vscode/
31 | .idea/
32 | *.swp
33 | *.swo
34 | *~
35 |
36 | # OS files
37 | .DS_Store
38 | .DS_Store?
39 | ._*
40 | Thumbs.db
41 | ehthumbs.db
42 |
43 | # Claude configuration
44 | .claude/
45 | claude_desktop_config*.json
46 |
47 | # Temporary files
48 | tmp/
49 | temp/
50 | test-mcp-schema.js
51 |
52 | # Cache directories
53 | .npm
54 | .eslintcache
55 | .cache
56 | .parcel-cache
```
--------------------------------------------------------------------------------
/.prettierrc.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "semi": true,
3 | "trailingComma": "none",
4 | "singleQuote": true,
5 | "printWidth": 100,
6 | "tabWidth": 2,
7 | "useTabs": false,
8 | "bracketSpacing": true,
9 | "bracketSameLine": false,
10 | "arrowParens": "avoid",
11 | "endOfLine": "lf",
12 | "overrides": [
13 | {
14 | "files": "*.json",
15 | "options": {
16 | "printWidth": 80,
17 | "tabWidth": 2
18 | }
19 | },
20 | {
21 | "files": "*.md",
22 | "options": {
23 | "printWidth": 80,
24 | "proseWrap": "always"
25 | }
26 | },
27 | {
28 | "files": "*.yml",
29 | "options": {
30 | "tabWidth": 2,
31 | "singleQuote": false
32 | }
33 | }
34 | ]
35 | }
36 |
```
--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------
```
1 | GEMINI_API_KEY=your_gemini_api_key_here
2 |
3 | # Logging level (default: info)
4 | # Options: error, warn, info, debug
5 | LOG_LEVEL=info
6 |
7 | # Enable performance metrics (default: false)
8 | ENABLE_METRICS=false
9 |
10 | # Rate limiting configuration
11 | RATE_LIMIT_ENABLED=true # Enable/disable rate limiting (default: true)
12 | RATE_LIMIT_REQUESTS=100 # Max requests per window (default: 100)
13 | RATE_LIMIT_WINDOW=60000 # Time window in ms (default: 60000 = 1 minute)
14 |
15 | # Request timeout in milliseconds (default: 30000 = 30 seconds)
16 | REQUEST_TIMEOUT=30000
17 |
18 | # Environment mode (default: production)
19 | NODE_ENV=production
```
--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "root": true,
3 | "parser": "@typescript-eslint/parser",
4 | "plugins": [
5 | "@typescript-eslint"
6 | ],
7 | "extends": [
8 | "eslint:recommended",
9 | "plugin:@typescript-eslint/recommended",
10 | "prettier"
11 | ],
12 | "parserOptions": {
13 | "ecmaVersion": 2022,
14 | "sourceType": "module",
15 | "project": "./tsconfig.json"
16 | },
17 | "env": {
18 | "node": true,
19 | "es2022": true
20 | },
21 | "rules": {
22 | "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }],
23 | "@typescript-eslint/no-explicit-any": "warn",
24 | "@typescript-eslint/explicit-function-return-type": "off",
25 | "@typescript-eslint/explicit-module-boundary-types": "off",
26 | "@typescript-eslint/no-non-null-assertion": "warn",
27 | "no-console": "off",
28 | "prefer-const": "error",
29 | "no-var": "error",
30 | "object-shorthand": "error",
31 | "prefer-template": "error",
32 | "no-duplicate-imports": "error",
33 | "eqeqeq": ["error", "always"],
34 | "no-eval": "error",
35 | "no-implied-eval": "error",
36 | "no-new-func": "error",
37 | "no-return-assign": "error",
38 | "no-self-compare": "error",
39 | "no-throw-literal": "error",
40 | "no-unused-expressions": "error",
41 | "radix": "error"
42 | },
43 | "overrides": [
44 | {
45 | "files": ["**/*.test.ts", "**/*.spec.ts"],
46 | "env": {
47 | "jest": true
48 | },
49 | "rules": {
50 | "@typescript-eslint/no-explicit-any": "off",
51 | "no-unused-expressions": "off"
52 | }
53 | }
54 | ],
55 | "ignorePatterns": [
56 | "dist/",
57 | "node_modules/",
58 | "coverage/",
59 | "*.js",
60 | "*.d.ts"
61 | ]
62 | }
63 |
```
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
```markdown
1 | # 🤖 MCP Server Gemini
2 |
3 | <!-- [](https://badge.fury.io/js/mcp-server-gemini) -->
4 | [](https://opensource.org/licenses/MIT)
5 | [](https://www.typescriptlang.org/)
6 | [](https://nodejs.org/)
7 | <!-- [](https://github.com/gurr-i/mcp-server-gemini/actions) -->
8 |
9 | A **state-of-the-art Model Context Protocol (MCP) server** that provides seamless integration with Google's Gemini AI models. This server enables Claude Desktop and other MCP-compatible clients to leverage the full power of Gemini's advanced AI capabilities.
10 |
11 | ## ✨ Features
12 |
13 | ### 🧠 **Latest Gemini Models**
14 | - **Gemini 2.5 Pro** - Most capable thinking model for complex reasoning
15 | - **Gemini 2.5 Flash** - Fast thinking model with best price/performance
16 | - **Gemini 2.0 Series** - Latest generation models with advanced features
17 | - **Gemini 1.5 Series** - Proven, reliable models for production use
18 |
19 | ### 🚀 **Advanced Capabilities**
20 | - **🧠 Thinking Models** - Gemini 2.5 series with step-by-step reasoning
21 | - **🔍 Google Search Grounding** - Real-time web information integration
22 | - **📊 JSON Mode** - Structured output with schema validation
23 | - **🎯 System Instructions** - Behavior customization and control
24 | - **👁️ Vision Support** - Image analysis and multimodal capabilities
25 | - **💬 Conversation Memory** - Context preservation across interactions
26 |
27 | ### 🛠️ **Production Ready**
28 | - **TypeScript** - Full type safety and modern development
29 | - **Comprehensive Error Handling** - Robust error management and recovery
30 | - **Rate Limiting** - Built-in protection against API abuse
31 | - **Detailed Logging** - Comprehensive monitoring and debugging
32 | - **Input Validation** - Secure parameter validation with Zod
33 | - **Retry Logic** - Automatic retry with exponential backoff
34 |
35 | ## 🚀 Quick Start
36 |
37 | ### Prerequisites
38 |
39 | - **Node.js 16+** ([Download](https://nodejs.org/))
40 | - **Google AI Studio API Key** ([Get one here](https://aistudio.google.com/app/apikey))
41 |
42 | ### Installation
43 |
44 | #### Option 1: Global Installation (Recommended)
45 | ```bash
46 | npm install -g mcp-server-gemini
47 | ```
48 |
49 | #### Option 2: Local Development
50 | ```bash
51 | git clone https://github.com/gurr-i/mcp-server-gemini-pro.git
52 | cd mcp-server-gemini-pro
53 | npm install
54 | npm run build
55 | ```
56 |
57 | ### Configuration
58 |
59 | #### 1. Set up your API key
60 |
61 | **Option A: Environment Variable**
62 | ```bash
63 | export GEMINI_API_KEY="your_api_key_here"
64 | ```
65 |
66 | **Option B: .env file**
67 | ```bash
68 | echo "GEMINI_API_KEY=your_api_key_here" > .env
69 | ```
70 |
71 | #### 2. Configure Claude Desktop
72 |
73 | Add to your `claude_desktop_config.json`:
74 |
75 | **For Global Installation:**
76 | ```json
77 | {
78 | "mcpServers": {
79 | "gemini": {
80 | "command": "mcp-server-gemini",
81 | "env": {
82 | "GEMINI_API_KEY": "your_api_key_here"
83 | }
84 | }
85 | }
86 | }
87 | ```
88 |
89 | **For Local Installation:**
90 | ```json
91 | {
92 | "mcpServers": {
93 | "gemini": {
94 | "command": "node",
95 | "args": ["/path/to/mcp-server-gemini-pro/dist/enhanced-stdio-server.js"],
96 | "env": {
97 | "GEMINI_API_KEY": "your_api_key_here"
98 | }
99 | }
100 | }
101 | }
102 | ```
103 |
104 | #### 3. Restart Claude Desktop
105 |
106 | Close and restart Claude Desktop completely for changes to take effect.
107 |
108 | ## 💡 Usage Examples
109 |
110 | Once configured, you can use Gemini through Claude Desktop with natural language:
111 |
112 | ### Basic Text Generation
113 | ```
114 | "Use Gemini to explain quantum computing in simple terms"
115 | "Generate a creative story about AI using Gemini 2.5 Pro"
116 | ```
117 |
118 | ### Advanced Features
119 | ```
120 | "Use Gemini with JSON mode to extract key points from this text"
121 | "Use Gemini with grounding to get the latest news about AI"
122 | "Generate a Python function using Gemini's thinking capabilities"
123 | ```
124 |
125 | ### Image Analysis
126 | ```
127 | "Analyze this image with Gemini" (attach image)
128 | "What's in this screenshot using Gemini vision?"
129 | ```
130 |
131 | ### Development Tasks
132 | ```
133 | "Use Gemini to review this code and suggest improvements"
134 | "Generate comprehensive tests for this function using Gemini"
135 | ```
136 |
137 | ## ⚙️ Configuration
138 |
139 | ### Environment Variables
140 |
141 | The server can be configured using environment variables or a `.env` file:
142 |
143 | #### Required Configuration
144 | ```bash
145 | # Google AI Studio API Key (required)
146 | GEMINI_API_KEY=your_api_key_here
147 | ```
148 |
149 | #### Optional Configuration
150 | ```bash
151 | # Logging level (default: info)
152 | # Options: error, warn, info, debug
153 | LOG_LEVEL=info
154 |
155 | # Enable performance metrics (default: false)
156 | ENABLE_METRICS=false
157 |
158 | # Rate limiting configuration
159 | RATE_LIMIT_ENABLED=true # Enable/disable rate limiting (default: true)
160 | RATE_LIMIT_REQUESTS=100 # Max requests per window (default: 100)
161 | RATE_LIMIT_WINDOW=60000 # Time window in ms (default: 60000 = 1 minute)
162 |
163 | # Request timeout in milliseconds (default: 30000 = 30 seconds)
164 | REQUEST_TIMEOUT=30000
165 |
166 | # Environment mode (default: production)
167 | NODE_ENV=production
168 | ```
169 |
170 | ### Environment Setup
171 |
172 | #### Development Environment
173 | ```bash
174 | # .env for development
175 | GEMINI_API_KEY=your_api_key_here
176 | NODE_ENV=development
177 | LOG_LEVEL=debug
178 | RATE_LIMIT_ENABLED=false
179 | REQUEST_TIMEOUT=60000
180 | ```
181 |
182 | #### Production Environment
183 | ```bash
184 | # .env for production
185 | GEMINI_API_KEY=your_api_key_here
186 | NODE_ENV=production
187 | LOG_LEVEL=warn
188 | RATE_LIMIT_ENABLED=true
189 | RATE_LIMIT_REQUESTS=100
190 | RATE_LIMIT_WINDOW=60000
191 | REQUEST_TIMEOUT=30000
192 | ENABLE_METRICS=true
193 | ```
194 |
195 | ### Claude Desktop Configuration
196 |
197 | #### Configuration File Locations
198 | | OS | Path |
199 | |----|------|
200 | | **macOS** | `~/Library/Application Support/Claude/claude_desktop_config.json` |
201 | | **Windows** | `%APPDATA%\Claude\claude_desktop_config.json` |
202 | | **Linux** | `~/.config/Claude/claude_desktop_config.json` |
203 |
204 | #### Basic Configuration
205 | ```json
206 | {
207 | "mcpServers": {
208 | "gemini": {
209 | "command": "mcp-server-gemini",
210 | "env": {
211 | "GEMINI_API_KEY": "your_api_key_here"
212 | }
213 | }
214 | }
215 | }
216 | ```
217 |
218 | #### Advanced Configuration
219 | ```json
220 | {
221 | "mcpServers": {
222 | "gemini": {
223 | "command": "mcp-server-gemini",
224 | "env": {
225 | "GEMINI_API_KEY": "your_api_key_here",
226 | "LOG_LEVEL": "info",
227 | "RATE_LIMIT_REQUESTS": "200",
228 | "REQUEST_TIMEOUT": "45000"
229 | }
230 | }
231 | }
232 | }
233 | ```
234 |
235 | #### Local Development Configuration
236 | ```json
237 | {
238 | "mcpServers": {
239 | "gemini": {
240 | "command": "node",
241 | "args": ["/path/to/mcp-server-gemini-pro/dist/enhanced-stdio-server.js"],
242 | "cwd": "/path/to/mcp-server-gemini-pro",
243 | "env": {
244 | "GEMINI_API_KEY": "your_api_key_here",
245 | "NODE_ENV": "development",
246 | "LOG_LEVEL": "debug"
247 | }
248 | }
249 | }
250 | }
251 | ```
252 |
253 | ## 🛠️ Available Tools
254 |
255 | | Tool | Description | Key Features |
256 | |------|-------------|--------------|
257 | | **generate_text** | Generate text with advanced features | Thinking models, JSON mode, grounding |
258 | | **analyze_image** | Analyze images using vision models | Multi-modal understanding, detailed analysis |
259 | | **count_tokens** | Count tokens for cost estimation | Accurate token counting for all models |
260 | | **list_models** | List all available Gemini models | Real-time model availability and features |
261 | | **embed_text** | Generate text embeddings | High-quality vector representations |
262 | | **get_help** | Get usage help and documentation | Self-documenting with examples |
263 |
264 | ## 📊 Model Comparison
265 |
266 | | Model | Context Window | Features | Best For | Speed |
267 | |-------|----------------|----------|----------|-------|
268 | | **gemini-2.5-pro** | 2M tokens | Thinking, JSON, Grounding | Complex reasoning, coding | Slower |
269 | | **gemini-2.5-flash** ⭐ | 1M tokens | Thinking, JSON, Grounding | General purpose | Fast |
270 | | **gemini-2.5-flash-lite** | 1M tokens | Thinking, JSON | High-throughput tasks | Fastest |
271 | | **gemini-2.0-flash** | 1M tokens | JSON, Grounding | Standard tasks | Fast |
272 | | **gemini-2.0-flash-lite** | 1M tokens | JSON | Simple tasks | Fastest |
273 | | **gemini-2.0-pro-experimental** | 2M tokens | JSON, Grounding | Experimental features | Medium |
274 | | **gemini-1.5-pro** | 2M tokens | JSON | Legacy support | Medium |
275 | | **gemini-1.5-flash** | 1M tokens | JSON | Legacy support | Fast |
276 |
277 | ## 🔧 Development
278 |
279 | ### Prerequisites
280 | - **Node.js 16+** ([Download](https://nodejs.org/))
281 | - **npm 7+** (comes with Node.js)
282 | - **Git** for version control
283 | - **Google AI Studio API Key** ([Get one here](https://aistudio.google.com/app/apikey))
284 |
285 | ### Setup
286 | ```bash
287 | # Clone the repository
288 | git clone https://github.com/gurr-i/mcp-server-gemini-pro.git
289 | cd mcp-server-gemini-pro
290 |
291 | # Install dependencies
292 | npm install
293 |
294 | # Set up environment variables
295 | cp .env.example .env
296 | # Edit .env and add your GEMINI_API_KEY
297 | ```
298 |
299 | ### Available Scripts
300 |
301 | #### Development
302 | ```bash
303 | npm run dev # Start development server with hot reload
304 | npm run dev:watch # Start with file watching (nodemon)
305 | npm run build # Build for production
306 | npm run build:watch # Build with watch mode
307 | npm run clean # Clean build directory
308 | ```
309 |
310 | #### Testing
311 | ```bash
312 | npm test # Run all tests
313 | npm run test:watch # Run tests in watch mode
314 | npm run test:coverage # Run tests with coverage report
315 | npm run test:integration # Run integration tests (requires API key)
316 | ```
317 |
318 | #### Code Quality
319 | ```bash
320 | npm run lint # Lint TypeScript code
321 | npm run lint:fix # Fix linting issues automatically
322 | npm run format # Format code with Prettier
323 | npm run format:check # Check code formatting
324 | npm run type-check # Run TypeScript type checking
325 | npm run validate # Run all quality checks (lint + test + type-check)
326 | ```
327 |
328 | #### Release & Distribution
329 | ```bash
330 | npm run prepack # Prepare package for publishing
331 | npm run release # Build, validate, and publish to npm
332 | ```
333 |
334 | ### Project Structure
335 | ```
336 | mcp-server-gemini/
337 | ├── src/ # Source code
338 | │ ├── config/ # Configuration management
339 | │ │ └── index.ts # Environment config with Zod validation
340 | │ ├── utils/ # Utility modules
341 | │ │ ├── logger.ts # Structured logging system
342 | │ │ ├── errors.ts # Custom error classes & handling
343 | │ │ ├── validation.ts # Input validation with Zod
344 | │ │ └── rateLimiter.ts # Rate limiting implementation
345 | │ ├── enhanced-stdio-server.ts # Main MCP server implementation
346 | │ └── types.ts # TypeScript type definitions
347 | ├── tests/ # Test suite
348 | │ ├── unit/ # Unit tests
349 | │ │ ├── config.test.ts # Configuration tests
350 | │ │ ├── validation.test.ts # Validation tests
351 | │ │ └── errors.test.ts # Error handling tests
352 | │ ├── integration/ # Integration tests
353 | │ │ └── gemini-api.test.ts # Real API integration tests
354 | │ └── setup.ts # Test setup and utilities
355 | ├── docs/ # Documentation
356 | │ ├── api.md # API reference
357 | │ ├── configuration.md # Configuration guide
358 | │ └── troubleshooting.md # Troubleshooting guide
359 | ├── scripts/ # Build and utility scripts
360 | │ ├── build.sh # Production build script
361 | │ ├── dev.sh # Development script
362 | │ └── test.sh # Test execution script
363 | ├── .github/workflows/ # GitHub Actions CI/CD
364 | │ ├── ci.yml # Continuous integration
365 | │ └── release.yml # Automated releases
366 | ├── dist/ # Built output (generated)
367 | ├── coverage/ # Test coverage reports (generated)
368 | └── node_modules/ # Dependencies (generated)
369 | ```
370 |
371 | ## 🧪 Testing
372 |
373 | ### Test Suite Overview
374 | The project includes comprehensive testing with unit tests, integration tests, and code coverage reporting.
375 |
376 | ### Running Tests
377 |
378 | #### All Tests
379 | ```bash
380 | npm test # Run all tests (unit tests only by default)
381 | npm run test:watch # Run tests in watch mode for development
382 | npm run test:coverage # Run tests with coverage report
383 | ```
384 |
385 | #### Unit Tests
386 | ```bash
387 | npm test -- --testPathPattern=unit # Run only unit tests
388 | npm test -- --testNamePattern="config" # Run specific test suites
389 | ```
390 |
391 | #### Integration Tests
392 | Integration tests require a valid `GEMINI_API_KEY` and make real API calls:
393 |
394 | ```bash
395 | # Set API key and run integration tests
396 | GEMINI_API_KEY=your_api_key_here npm run test:integration
397 |
398 | # Or set in .env file and run
399 | npm run test:integration
400 | ```
401 |
402 | #### Test Coverage
403 | ```bash
404 | npm run test:coverage # Generate coverage report
405 | open coverage/lcov-report/index.html # View coverage report (macOS)
406 | ```
407 |
408 | ### Test Structure
409 |
410 | #### Unit Tests (`tests/unit/`)
411 | - **Configuration Tests**: Environment variable validation, config loading
412 | - **Validation Tests**: Input validation, schema validation, sanitization
413 | - **Error Handling Tests**: Custom error classes, error recovery, retry logic
414 | - **Utility Tests**: Logger, rate limiter, helper functions
415 |
416 | #### Integration Tests (`tests/integration/`)
417 | - **Gemini API Tests**: Real API calls to test connectivity and functionality
418 | - **Model Testing**: Verify all supported models work correctly
419 | - **Feature Testing**: JSON mode, grounding, embeddings, token counting
420 |
421 | ### Writing Tests
422 |
423 | #### Test File Structure
424 | ```typescript
425 | // tests/unit/example.test.ts
426 | import { describe, it, expect, beforeEach, afterEach } from '@jest/globals';
427 | import { YourModule } from '../../src/your-module.js';
428 |
429 | describe('YourModule', () => {
430 | beforeEach(() => {
431 | // Setup before each test
432 | });
433 |
434 | afterEach(() => {
435 | // Cleanup after each test
436 | });
437 |
438 | it('should do something', () => {
439 | // Test implementation
440 | expect(result).toBe(expected);
441 | });
442 | });
443 | ```
444 |
445 | #### Custom Matchers
446 | The test suite includes custom Jest matchers:
447 | ```typescript
448 | expect(response).toBeValidMCPResponse(); // Validates MCP response format
449 | ```
450 |
451 | ### Test Configuration
452 | Tests are configured in `jest.config.js` with:
453 | - **TypeScript Support**: Full ES modules and TypeScript compilation
454 | - **Coverage Thresholds**: Minimum 70% coverage required
455 | - **Test Timeout**: 30 seconds for integration tests
456 | - **Setup Files**: Automatic test environment setup
457 |
458 | ## 🐳 Docker Deployment
459 |
460 | ### Using Docker
461 |
462 | #### Build and Run
463 | ```bash
464 | # Build the Docker image
465 | docker build -t mcp-server-gemini .
466 |
467 | # Run the container
468 | docker run -d \
469 | --name mcp-server-gemini \
470 | -e GEMINI_API_KEY=your_api_key_here \
471 | -e LOG_LEVEL=info \
472 | mcp-server-gemini
473 | ```
474 |
475 | #### Using Docker Compose
476 | ```bash
477 | # Create .env file with your API key
478 | echo "GEMINI_API_KEY=your_api_key_here" > .env
479 |
480 | # Start the service
481 | docker-compose up -d
482 |
483 | # View logs
484 | docker-compose logs -f
485 |
486 | # Stop the service
487 | docker-compose down
488 | ```
489 |
490 | #### Development with Docker
491 | ```bash
492 | # Start development environment
493 | docker-compose --profile dev up
494 |
495 | # This mounts source code for live reloading
496 | ```
497 |
498 | ### Environment-Specific Deployments
499 |
500 | #### Production Deployment
501 | ```bash
502 | # Production build
503 | docker build --target production -t mcp-server-gemini:prod .
504 |
505 | # Run with production settings
506 | docker run -d \
507 | --name mcp-server-gemini-prod \
508 | --restart unless-stopped \
509 | -e GEMINI_API_KEY=your_api_key_here \
510 | -e NODE_ENV=production \
511 | -e LOG_LEVEL=warn \
512 | -e RATE_LIMIT_ENABLED=true \
513 | -e ENABLE_METRICS=true \
514 | mcp-server-gemini:prod
515 | ```
516 |
517 | #### Health Checks
518 | ```bash
519 | # Check container health
520 | docker ps
521 | docker logs mcp-server-gemini
522 |
523 | # Manual health check
524 | docker exec mcp-server-gemini node -e "console.log('Health check passed')"
525 | ```
526 |
527 | ## 🚀 Deployment Options
528 |
529 | ### 1. npm Global Installation
530 | ```bash
531 | # Install globally
532 | npm install -g mcp-server-gemini
533 |
534 | # Run directly
535 | GEMINI_API_KEY=your_key mcp-server-gemini
536 | ```
537 |
538 | ### 2. Local Installation
539 | ```bash
540 | # Clone and build
541 | git clone https://github.com/gurr-i/mcp-server-gemini-pro.git
542 | cd mcp-server-gemini-pro
543 | npm install
544 | npm run build
545 |
546 | # Run locally
547 | GEMINI_API_KEY=your_key npm start
548 | ```
549 |
550 | ### 3. Docker Deployment
551 | ```bash
552 | # Using Docker Hub (when published)
553 | docker run -e GEMINI_API_KEY=your_key mcp-server-gemini-pro:latest
554 |
555 | # Using local build
556 | docker build -t mcp-server-gemini-pro .
557 | docker run -e GEMINI_API_KEY=your_key mcp-server-gemini-pro
558 | ```
559 |
560 | ### 4. Process Manager (PM2)
561 | ```bash
562 | # Install PM2
563 | npm install -g pm2
564 |
565 | # Create ecosystem file
566 | cat > ecosystem.config.js << EOF
567 | module.exports = {
568 | apps: [{
569 | name: 'mcp-server-gemini',
570 | script: './dist/enhanced-stdio-server.js',
571 | env: {
572 | NODE_ENV: 'production',
573 | GEMINI_API_KEY: 'your_api_key_here',
574 | LOG_LEVEL: 'info'
575 | }
576 | }]
577 | }
578 | EOF
579 |
580 | # Start with PM2
581 | pm2 start ecosystem.config.js
582 | pm2 save
583 | pm2 startup
584 | ```
585 |
586 | ## 🔧 Troubleshooting
587 |
588 | ### Common Issues
589 |
590 | #### 1. Server Won't Start
591 | ```bash
592 | # Check if API key is set
593 | echo $GEMINI_API_KEY
594 |
595 | # Verify .env file exists and is readable
596 | cat .env | grep GEMINI_API_KEY
597 |
598 | # Check file permissions
599 | ls -la .env
600 | chmod 600 .env
601 | ```
602 |
603 | #### 2. API Key Issues
604 | ```bash
605 | # Test API key manually
606 | curl -H "Content-Type: application/json" \
607 | -d '{"contents":[{"parts":[{"text":"Hello"}]}]}' \
608 | -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=YOUR_API_KEY"
609 | ```
610 |
611 | #### 3. Claude Desktop Integration
612 | ```bash
613 | # Verify config file location (macOS)
614 | ls -la ~/Library/Application\ Support/Claude/claude_desktop_config.json
615 |
616 | # Validate JSON syntax
617 | cat claude_desktop_config.json | jq .
618 |
619 | # Check server installation
620 | which mcp-server-gemini
621 | npm list -g mcp-server-gemini
622 | ```
623 |
624 | #### 4. Rate Limiting
625 | ```bash
626 | # Temporarily disable rate limiting
627 | export RATE_LIMIT_ENABLED=false
628 |
629 | # Increase limits
630 | export RATE_LIMIT_REQUESTS=1000
631 | export RATE_LIMIT_WINDOW=60000
632 | ```
633 |
634 | ### Debug Mode
635 | ```bash
636 | # Enable debug logging
637 | export LOG_LEVEL=debug
638 | npm run dev
639 |
640 | # Or for production
641 | export LOG_LEVEL=debug
642 | npm start
643 | ```
644 |
645 | ### Getting Help
646 | - 🐛 [Report Issues](https://github.com/gurr-i/mcp-server-gemini-pro/issues)
647 | - 💬 [Discussions](https://github.com/gurr-i/mcp-server-gemini-pro/discussions)
648 | - 📚 [Documentation](docs/)
649 |
650 | ## 🔒 Security
651 |
652 | ### API Key Security
653 | - **Never commit API keys** to version control
654 | - **Use environment variables** or secure secret management
655 | - **Rotate keys regularly** for production use
656 | - **Use different keys** for development and production
657 |
658 | ### Rate Limiting
659 | - **Enable rate limiting** in production (`RATE_LIMIT_ENABLED=true`)
660 | - **Configure appropriate limits** based on your usage patterns
661 | - **Monitor API usage** to prevent quota exhaustion
662 |
663 | ### Input Validation
664 | - All inputs are **automatically validated** and sanitized
665 | - **XSS and injection protection** built-in
666 | - **Schema validation** for all tool parameters
667 |
668 | ### Container Security
669 | - Runs as **non-root user** in Docker
670 | - **Read-only filesystem** with minimal privileges
671 | - **Security scanning** in CI/CD pipeline
672 |
673 | ## 📚 Documentation
674 |
675 | - [API Documentation](docs/api.md)
676 | - [Configuration Guide](docs/configuration.md)
677 | - [Troubleshooting](docs/troubleshooting.md)
678 | - [Contributing Guide](CONTRIBUTING.md)
679 |
680 | ## 🤝 Contributing
681 |
682 | We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
683 |
684 | ### Development Workflow
685 | 1. Fork the repository
686 | 2. Create a feature branch
687 | 3. Make your changes
688 | 4. Add tests
689 | 5. Run `npm run validate`
690 | 6. Submit a pull request
691 |
692 | ## 📄 License
693 |
694 | MIT License - see [LICENSE](LICENSE) file for details.
695 |
696 | ## 🙏 Acknowledgments
697 |
698 | - Google AI for the Gemini API
699 | - Anthropic for the Model Context Protocol
700 | - The open-source community for inspiration and feedback
701 |
702 | ## 📞 Support
703 |
704 | - 🐛 [Report Issues](https://github.com/gurr-i/mcp-server-gemini-pro/issues)
705 | - 💬 [Discussions](https://github.com/gurr-i/mcp-server-gemini-pro/discussions)
706 | - 📧 [Email Support](mailto:[email protected])
707 |
708 | ---
709 |
710 | <div align="center">
711 | <strong>Made with ❤️ By Gurveer for the AI development community</strong>
712 | </div>
713 |
```
--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "compilerOptions": {
3 | "target": "ES2022",
4 | "module": "NodeNext",
5 | "moduleResolution": "NodeNext",
6 | "outDir": "./dist",
7 | "rootDir": "./src",
8 | "strict": true,
9 | "esModuleInterop": true,
10 | "skipLibCheck": true,
11 | "forceConsistentCasingInFileNames": true,
12 | "sourceMap": true
13 | },
14 | "ts-node": {
15 | "esm": true
16 | },
17 | "include": ["src/**/*"],
18 | "exclude": ["node_modules"]
19 | }
```
--------------------------------------------------------------------------------
/scripts/build.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | # Build script for MCP Server Gemini
4 | set -e
5 |
6 | echo "🔧 Building MCP Server Gemini..."
7 |
8 | # Clean previous build
9 | echo "🧹 Cleaning previous build..."
10 | rm -rf dist/
11 |
12 | # Type check
13 | echo "🔍 Type checking..."
14 | npx tsc --noEmit
15 |
16 | # Build
17 | echo "🏗️ Building TypeScript..."
18 | npx tsc
19 |
20 | # Copy additional files
21 | echo "📋 Copying additional files..."
22 | cp package.json dist/
23 | cp README.md dist/
24 | cp LICENSE dist/
25 |
26 | echo "✅ Build completed successfully!"
27 | echo "📦 Output directory: dist/"
28 |
```
--------------------------------------------------------------------------------
/scripts/test.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | # Test script for MCP Server Gemini
4 | set -e
5 |
6 | echo "🧪 Running tests for MCP Server Gemini..."
7 |
8 | # Run linting
9 | echo "🔍 Running ESLint..."
10 | npx eslint src/**/*.ts --fix
11 |
12 | # Run type checking
13 | echo "📝 Type checking..."
14 | npx tsc --noEmit
15 |
16 | # Run unit tests
17 | echo "🧪 Running unit tests..."
18 | npx jest --coverage
19 |
20 | # Run integration tests if API key is available
21 | if [ -n "$GEMINI_API_KEY" ]; then
22 | echo "🔗 Running integration tests..."
23 | npx jest --testPathPattern=integration
24 | else
25 | echo "⚠️ Skipping integration tests (GEMINI_API_KEY not set)"
26 | fi
27 |
28 | echo "✅ All tests completed successfully!"
29 |
```
--------------------------------------------------------------------------------
/scripts/dev.sh:
--------------------------------------------------------------------------------
```bash
1 | #!/bin/bash
2 |
3 | # Development script for MCP Server Gemini
4 | set -e
5 |
6 | echo "🚀 Starting MCP Server Gemini in development mode..."
7 |
8 | # Check if .env exists
9 | if [ ! -f .env ]; then
10 | echo "⚠️ .env file not found. Creating template..."
11 | echo "GEMINI_API_KEY=your_api_key_here" > .env
12 | echo "📝 Please edit .env file with your actual API key"
13 | exit 1
14 | fi
15 |
16 | # Check if API key is set
17 | if ! grep -q "^GEMINI_API_KEY=.*[^=]$" .env; then
18 | echo "❌ GEMINI_API_KEY not set in .env file"
19 | echo "💡 Please add your Gemini API key to .env file"
20 | exit 1
21 | fi
22 |
23 | echo "✅ Environment configured"
24 | echo "🔧 Starting development server with hot reload..."
25 |
26 | # Start with ts-node and watch mode
27 | npx nodemon --exec "node --loader ts-node/esm src/enhanced-stdio-server.ts" --ext ts --watch src/
28 |
```
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
```dockerfile
1 | # Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
2 | # Use a Node.js image
3 | FROM node:18-alpine AS builder
4 |
5 | # Set the working directory
6 | WORKDIR /app
7 |
8 | # Copy package.json and package-lock.json
9 | COPY package.json package-lock.json ./
10 |
11 | # Install dependencies
12 | RUN npm install
13 |
14 | # Copy the rest of the application code
15 | COPY . .
16 |
17 | # Build the application
18 | RUN npm run build
19 |
20 | # Use a smaller Node.js image for the release
21 | FROM node:18-slim AS release
22 |
23 | # Set the working directory
24 | WORKDIR /app
25 |
26 | # Copy the build from the builder stage
27 | COPY --from=builder /app/dist /app/dist
28 | COPY --from=builder /app/package.json /app/package-lock.json /app/
29 |
30 | # Install production dependencies only
31 | RUN npm ci --production
32 |
33 | # Expose the necessary port
34 | EXPOSE 3005
35 |
36 | # Command to run the application
37 | CMD ["node", "dist/index.js"]
```
--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------
```markdown
1 | # Troubleshooting Guide
2 |
3 | ## Common Issues
4 |
5 | ### Connection Problems
6 |
7 | 1. Port Already in Use
8 | ```bash
9 | Error: EADDRINUSE: address already in use :::3005
10 | ```
11 | Solution:
12 | - Check if another process is using port 3005
13 | - Kill the existing process
14 | - Change the port number
15 |
16 | 2. WebSocket Connection Failed
17 | ```
18 | Error: Connection refused
19 | ```
20 | Solution:
21 | - Verify server is running
22 | - Check firewall settings
23 | - Confirm correct port
24 |
25 | ### API Issues
26 |
27 | 1. Invalid API Key
28 | ```
29 | Error: Invalid API key provided
30 | ```
31 | Solution:
32 | - Check GEMINI_API_KEY environment variable
33 | - Verify API key is valid
34 | - Regenerate API key if needed
35 |
36 | 2. Rate Limiting
37 | ```
38 | Error: Resource exhausted
39 | ```
40 | Solution:
41 | - Implement backoff strategy
42 | - Check quota limits
43 | - Upgrade API tier if needed
44 |
45 | ## Protocol Errors
46 |
47 | 1. Invalid Message Format
48 | ```json
49 | Error: Parse error (-32700)
50 | ```
51 | Solution:
52 | - Check JSON syntax
53 | - Verify message format
54 | - Validate against schema
55 |
56 | 2. Method Not Found
57 | ```json
58 | Error: Method not found (-32601)
59 | ```
60 | Solution:
61 | - Check method name
62 | - Verify protocol version
63 | - Update capabilities
64 |
65 | ## Debugging Steps
66 |
67 | 1. Enable Debug Mode
68 | ```bash
69 | DEBUG=true npm start
70 | ```
71 |
72 | 2. Check Logs
73 | ```bash
74 | tail -f debug.log
75 | ```
76 |
77 | 3. Monitor WebSocket Traffic
78 | ```bash
79 | wscat -c ws://localhost:3005
80 | ```
81 |
82 | ## Getting Help
83 |
84 | 1. Check Documentation
85 | - Review implementation notes
86 | - Check protocol specification
87 | - Read troubleshooting guide
88 |
89 | 2. Open Issues
90 | - Search existing issues
91 | - Provide error details
92 | - Include reproduction steps
93 |
94 | 3. Community Support
95 | - Join discussions
96 | - Ask questions
97 | - Share solutions
98 |
```
--------------------------------------------------------------------------------
/jest.config.js:
--------------------------------------------------------------------------------
```javascript
1 | /** @type {import('jest').Config} */
2 | module.exports = {
3 | // Use ts-jest preset for TypeScript support
4 | preset: 'ts-jest/presets/default-esm',
5 |
6 | // Test environment
7 | testEnvironment: 'node',
8 |
9 | // Enable ESM support
10 | extensionsToTreatAsEsm: ['.ts'],
11 |
12 | // Module name mapping for ESM imports
13 | moduleNameMapping: {
14 | '^(\\.{1,2}/.*)\\.js$': '$1',
15 | },
16 |
17 | // Transform configuration
18 | transform: {
19 | '^.+\\.ts$': ['ts-jest', {
20 | useESM: true,
21 | tsconfig: {
22 | module: 'ESNext',
23 | moduleResolution: 'node'
24 | }
25 | }]
26 | },
27 |
28 | // Test file patterns
29 | testMatch: [
30 | '**/tests/**/*.test.ts',
31 | '**/__tests__/**/*.test.ts'
32 | ],
33 |
34 | // Test roots
35 | roots: ['<rootDir>/src', '<rootDir>/tests'],
36 |
37 | // Ignore patterns
38 | testPathIgnorePatterns: [
39 | '/node_modules/',
40 | '/dist/',
41 | '/build/'
42 | ],
43 |
44 | // Coverage configuration
45 | collectCoverage: true,
46 | coverageDirectory: 'coverage',
47 | coverageReporters: [
48 | 'text',
49 | 'lcov',
50 | 'html',
51 | 'json-summary'
52 | ],
53 |
54 | // Coverage collection patterns
55 | collectCoverageFrom: [
56 | 'src/**/*.ts',
57 | '!src/**/*.d.ts',
58 | '!src/**/*.test.ts',
59 | '!src/**/__tests__/**',
60 | '!src/types.ts'
61 | ],
62 |
63 | // Coverage thresholds
64 | coverageThreshold: {
65 | global: {
66 | branches: 70,
67 | functions: 70,
68 | lines: 70,
69 | statements: 70
70 | }
71 | },
72 |
73 | // Test timeout
74 | testTimeout: 30000,
75 |
76 | // Verbose output
77 | verbose: true,
78 |
79 | // Clear mocks between tests
80 | clearMocks: true,
81 |
82 | // Restore mocks after each test
83 | restoreMocks: true,
84 |
85 | // Module file extensions
86 | moduleFileExtensions: ['ts', 'js', 'json', 'node']
87 | };
```
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - 'v*'
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout code
14 | uses: actions/checkout@v4
15 |
16 | - name: Use Node.js 20.x
17 | uses: actions/setup-node@v4
18 | with:
19 | node-version: 20.x
20 | cache: 'npm'
21 |
22 | - name: Install dependencies
23 | run: npm ci
24 |
25 | - name: Run full validation
26 | run: npm run validate
27 | env:
28 | GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
29 |
30 | - name: Build project
31 | run: npm run build
32 |
33 | publish:
34 | needs: test
35 | runs-on: ubuntu-latest
36 |
37 | steps:
38 | - name: Checkout code
39 | uses: actions/checkout@v4
40 |
41 | - name: Use Node.js 20.x
42 | uses: actions/setup-node@v4
43 | with:
44 | node-version: 20.x
45 | cache: 'npm'
46 | registry-url: 'https://registry.npmjs.org'
47 |
48 | - name: Install dependencies
49 | run: npm ci
50 |
51 | - name: Build project
52 | run: npm run build
53 |
54 | - name: Publish to npm
55 | run: npm publish
56 | env:
57 | NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
58 |
59 | - name: Create GitHub Release
60 | uses: actions/create-release@v1
61 | env:
62 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
63 | with:
64 | tag_name: ${{ github.ref }}
65 | release_name: Release ${{ github.ref }}
66 | draft: false
67 | prerelease: false
68 | body: |
69 | ## Changes
70 |
71 | See [CHANGELOG.md](CHANGELOG.md) for detailed changes.
72 |
73 | ## Installation
74 |
75 | ```bash
76 | npm install -g mcp-server-gemini@${{ github.ref_name }}
77 | ```
78 |
79 | ## Docker
80 |
81 | ```bash
82 | docker pull mcp-server-gemini:${{ github.ref_name }}
83 | ```
84 |
```
--------------------------------------------------------------------------------
/tests/setup.ts:
--------------------------------------------------------------------------------
```typescript
1 | import 'dotenv/config';
2 |
3 | // Set test environment
4 | process.env.NODE_ENV = 'test';
5 |
6 | // Mock console methods to reduce noise in tests
7 | const originalConsoleError = console.error;
8 | const originalConsoleWarn = console.warn;
9 | const originalConsoleLog = console.log;
10 |
11 | beforeAll(() => {
12 | // Suppress console output during tests unless explicitly needed
13 | console.error = jest.fn();
14 | console.warn = jest.fn();
15 | console.log = jest.fn();
16 | });
17 |
18 | afterAll(() => {
19 | // Restore original console methods
20 | console.error = originalConsoleError;
21 | console.warn = originalConsoleWarn;
22 | console.log = originalConsoleLog;
23 | });
24 |
25 | // Global test timeout
26 | jest.setTimeout(30000);
27 |
28 | // Mock timers for tests that need them
29 | beforeEach(() => {
30 | jest.clearAllTimers();
31 | });
32 |
33 | // Clean up after each test
34 | afterEach(() => {
35 | jest.clearAllMocks();
36 | jest.restoreAllMocks();
37 | });
38 |
39 | // Global error handler for unhandled promise rejections
40 | process.on('unhandledRejection', (reason, promise) => {
41 | console.error('Unhandled Rejection at:', promise, 'reason:', reason);
42 | });
43 |
44 | // Extend Jest matchers if needed
45 | expect.extend({
46 | toBeValidMCPResponse(received) {
47 | const pass =
48 | received &&
49 | typeof received === 'object' &&
50 | received.jsonrpc === '2.0' &&
51 | received.id !== undefined &&
52 | (received.result !== undefined || received.error !== undefined);
53 |
54 | if (pass) {
55 | return {
56 | message: () => `expected ${JSON.stringify(received)} not to be a valid MCP response`,
57 | pass: true,
58 | };
59 | } else {
60 | return {
61 | message: () => `expected ${JSON.stringify(received)} to be a valid MCP response`,
62 | pass: false,
63 | };
64 | }
65 | },
66 | });
67 |
68 | // Type declaration for custom matcher
69 | declare global {
70 | namespace jest {
71 | interface Matchers<R> {
72 | toBeValidMCPResponse(): R;
73 | }
74 | }
75 | }
76 |
```
--------------------------------------------------------------------------------
/src/config/index.ts:
--------------------------------------------------------------------------------
```typescript
1 | import 'dotenv/config';
2 | import { z } from 'zod';
3 |
4 | // Configuration schema validation
5 | const ConfigSchema = z.object({
6 | // API Configuration
7 | geminiApiKey: z.string().min(1, 'GEMINI_API_KEY is required'),
8 |
9 | // Server Configuration
10 | logLevel: z.enum(['error', 'warn', 'info', 'debug']).default('info'),
11 | enableMetrics: z.boolean().default(false),
12 |
13 | // Rate Limiting
14 | rateLimitEnabled: z.boolean().default(true),
15 | rateLimitRequests: z.number().default(100),
16 | rateLimitWindow: z.number().default(60000), // 1 minute
17 |
18 | // Timeouts
19 | requestTimeout: z.number().default(30000), // 30 seconds
20 |
21 | // Development
22 | isDevelopment: z.boolean().default(false)
23 | });
24 |
25 | export type Config = z.infer<typeof ConfigSchema>;
26 |
27 | /**
28 | * Load and validate configuration from environment variables
29 | */
30 | export function loadConfig(): Config {
31 | const rawConfig = {
32 | geminiApiKey: process.env.GEMINI_API_KEY,
33 | logLevel: process.env.LOG_LEVEL,
34 | enableMetrics: process.env.ENABLE_METRICS === 'true',
35 | rateLimitEnabled: process.env.RATE_LIMIT_ENABLED !== 'false',
36 | rateLimitRequests: process.env.RATE_LIMIT_REQUESTS
37 | ? parseInt(process.env.RATE_LIMIT_REQUESTS, 10)
38 | : undefined,
39 | rateLimitWindow: process.env.RATE_LIMIT_WINDOW
40 | ? parseInt(process.env.RATE_LIMIT_WINDOW, 10)
41 | : undefined,
42 | requestTimeout: process.env.REQUEST_TIMEOUT
43 | ? parseInt(process.env.REQUEST_TIMEOUT, 10)
44 | : undefined,
45 | isDevelopment: process.env.NODE_ENV === 'development'
46 | };
47 |
48 | try {
49 | return ConfigSchema.parse(rawConfig);
50 | } catch (error) {
51 | if (error instanceof z.ZodError) {
52 | const issues = error.issues
53 | .map(issue => `${issue.path.join('.')}: ${issue.message}`)
54 | .join('\n');
55 | throw new Error(`Configuration validation failed:\n${issues}`);
56 | }
57 | throw error;
58 | }
59 | }
60 |
61 | // Export singleton config instance
62 | export const config = loadConfig();
63 |
```
--------------------------------------------------------------------------------
/src/utils/logger.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { config } from '../config/index.js';
2 |
3 | export enum LogLevel {
4 | ERROR = 0,
5 | WARN = 1,
6 | INFO = 2,
7 | DEBUG = 3
8 | }
9 |
10 | const LOG_LEVEL_MAP: Record<string, LogLevel> = {
11 | error: LogLevel.ERROR,
12 | warn: LogLevel.WARN,
13 | info: LogLevel.INFO,
14 | debug: LogLevel.DEBUG
15 | };
16 |
17 | class Logger {
18 | private currentLevel: LogLevel;
19 |
20 | constructor() {
21 | this.currentLevel = LOG_LEVEL_MAP[config.logLevel] ?? LogLevel.INFO;
22 | }
23 |
24 | private formatMessage(level: string, message: string, meta?: any): string {
25 | const timestamp = new Date().toISOString();
26 | const metaStr = meta ? ` ${JSON.stringify(meta)}` : '';
27 | return `[${timestamp}] ${level.padEnd(5)} ${message}${metaStr}`;
28 | }
29 |
30 | private log(level: LogLevel, levelName: string, message: string, meta?: any): void {
31 | if (level <= this.currentLevel) {
32 | const formattedMessage = this.formatMessage(levelName, message, meta);
33 |
34 | // Use stderr for logging to avoid interfering with MCP protocol on stdout
35 | if (level === LogLevel.ERROR) {
36 | console.error(formattedMessage);
37 | } else {
38 | console.error(formattedMessage);
39 | }
40 | }
41 | }
42 |
43 | error(message: string, meta?: any): void {
44 | this.log(LogLevel.ERROR, '❌ ERROR', message, meta);
45 | }
46 |
47 | warn(message: string, meta?: any): void {
48 | this.log(LogLevel.WARN, '⚠️ WARN', message, meta);
49 | }
50 |
51 | info(message: string, meta?: any): void {
52 | this.log(LogLevel.INFO, 'ℹ️ INFO', message, meta);
53 | }
54 |
55 | debug(message: string, meta?: any): void {
56 | this.log(LogLevel.DEBUG, '🐛 DEBUG', message, meta);
57 | }
58 |
59 | // Convenience methods with emojis for better UX
60 | startup(message: string, meta?: any): void {
61 | this.info(`🚀 ${message}`, meta);
62 | }
63 |
64 | success(message: string, meta?: any): void {
65 | this.info(`✅ ${message}`, meta);
66 | }
67 |
68 | request(message: string, meta?: any): void {
69 | this.debug(`📨 ${message}`, meta);
70 | }
71 |
72 | response(message: string, meta?: any): void {
73 | this.debug(`📤 ${message}`, meta);
74 | }
75 |
76 | api(message: string, meta?: any): void {
77 | this.debug(`🤖 ${message}`, meta);
78 | }
79 |
80 | security(message: string, meta?: any): void {
81 | this.warn(`🔒 ${message}`, meta);
82 | }
83 | }
84 |
85 | export const logger = new Logger();
86 |
```
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
```yaml
1 | version: '3.8'
2 |
3 | services:
4 | mcp-server-gemini:
5 | build:
6 | context: .
7 | dockerfile: Dockerfile
8 | target: production
9 | image: mcp-server-gemini:latest
10 | container_name: mcp-server-gemini
11 | restart: unless-stopped
12 |
13 | # Environment configuration
14 | environment:
15 | - NODE_ENV=production
16 | - LOG_LEVEL=info
17 | - RATE_LIMIT_ENABLED=true
18 | - RATE_LIMIT_REQUESTS=100
19 | - RATE_LIMIT_WINDOW=60000
20 | - REQUEST_TIMEOUT=30000
21 |
22 | # Load environment variables from file
23 | env_file:
24 | - .env
25 |
26 | # Resource limits
27 | deploy:
28 | resources:
29 | limits:
30 | memory: 512M
31 | cpus: '0.5'
32 | reservations:
33 | memory: 256M
34 | cpus: '0.25'
35 |
36 | # Health check
37 | healthcheck:
38 | test: ["CMD", "node", "-e", "console.log('Health check passed')"]
39 | interval: 30s
40 | timeout: 10s
41 | retries: 3
42 | start_period: 40s
43 |
44 | # Logging configuration
45 | logging:
46 | driver: "json-file"
47 | options:
48 | max-size: "10m"
49 | max-file: "3"
50 |
51 | # Security options
52 | security_opt:
53 | - no-new-privileges:true
54 |
55 | # Read-only root filesystem (except for tmp)
56 | read_only: true
57 | tmpfs:
58 | - /tmp:noexec,nosuid,size=100m
59 |
60 | # Drop all capabilities and add only necessary ones
61 | cap_drop:
62 | - ALL
63 | cap_add:
64 | - SETGID
65 | - SETUID
66 |
67 | # Use non-root user
68 | user: "1001:1001"
69 |
70 | # Development service
71 | mcp-server-gemini-dev:
72 | build:
73 | context: .
74 | dockerfile: Dockerfile
75 | target: builder
76 | image: mcp-server-gemini:dev
77 | container_name: mcp-server-gemini-dev
78 | restart: "no"
79 |
80 | environment:
81 | - NODE_ENV=development
82 | - LOG_LEVEL=debug
83 | - RATE_LIMIT_ENABLED=false
84 | - REQUEST_TIMEOUT=60000
85 |
86 | env_file:
87 | - .env
88 |
89 | # Mount source code for development
90 | volumes:
91 | - ./src:/app/src:ro
92 | - ./package.json:/app/package.json:ro
93 | - ./tsconfig.json:/app/tsconfig.json:ro
94 |
95 | # Override command for development
96 | command: ["npm", "run", "dev"]
97 |
98 | profiles:
99 | - dev
100 |
101 | # Networks
102 | networks:
103 | default:
104 | name: mcp-server-gemini-network
105 | driver: bridge
106 |
107 | # Volumes for persistent data (if needed)
108 | volumes:
109 | logs:
110 | driver: local
111 |
```
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
```yaml
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [ main, develop ]
6 | pull_request:
7 | branches: [ main, develop ]
8 |
9 | jobs:
10 | test:
11 | runs-on: ubuntu-latest
12 |
13 | strategy:
14 | matrix:
15 | node-version: [16.x, 18.x, 20.x]
16 |
17 | steps:
18 | - name: Checkout code
19 | uses: actions/checkout@v4
20 |
21 | - name: Use Node.js ${{ matrix.node-version }}
22 | uses: actions/setup-node@v4
23 | with:
24 | node-version: ${{ matrix.node-version }}
25 | cache: 'npm'
26 |
27 | - name: Install dependencies
28 | run: npm ci
29 |
30 | - name: Run type checking
31 | run: npm run type-check
32 |
33 | - name: Run linting
34 | run: npm run lint
35 |
36 | - name: Run formatting check
37 | run: npm run format:check
38 |
39 | - name: Run unit tests
40 | run: npm test -- --testPathIgnorePatterns=integration
41 |
42 | - name: Run integration tests
43 | if: matrix.node-version == '20.x'
44 | env:
45 | GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
46 | run: npm run test:integration
47 |
48 | - name: Build project
49 | run: npm run build
50 |
51 | - name: Upload coverage to Codecov
52 | if: matrix.node-version == '20.x'
53 | uses: codecov/codecov-action@v3
54 | with:
55 | file: ./coverage/lcov.info
56 | flags: unittests
57 | name: codecov-umbrella
58 |
59 | security:
60 | runs-on: ubuntu-latest
61 |
62 | steps:
63 | - name: Checkout code
64 | uses: actions/checkout@v4
65 |
66 | - name: Use Node.js 20.x
67 | uses: actions/setup-node@v4
68 | with:
69 | node-version: 20.x
70 | cache: 'npm'
71 |
72 | - name: Install dependencies
73 | run: npm ci
74 |
75 | - name: Run security audit
76 | run: npm audit --audit-level=moderate
77 |
78 | - name: Check for vulnerabilities
79 | run: npx audit-ci --moderate
80 |
81 | build-and-test:
82 | runs-on: ${{ matrix.os }}
83 |
84 | strategy:
85 | matrix:
86 | os: [ubuntu-latest, windows-latest, macos-latest]
87 | node-version: [20.x]
88 |
89 | steps:
90 | - name: Checkout code
91 | uses: actions/checkout@v4
92 |
93 | - name: Use Node.js ${{ matrix.node-version }}
94 | uses: actions/setup-node@v4
95 | with:
96 | node-version: ${{ matrix.node-version }}
97 | cache: 'npm'
98 |
99 | - name: Install dependencies
100 | run: npm ci
101 |
102 | - name: Build project
103 | run: npm run build
104 |
105 | - name: Test built server
106 | run: |
107 | timeout 10s node dist/enhanced-stdio-server.js || true
108 | shell: bash
109 | env:
110 | GEMINI_API_KEY: test-key
111 |
```
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
```json
1 | {
2 | "name": "mcp-server-gemini",
3 | "version": "1.0.0",
4 | "description": "A state-of-the-art Model Context Protocol (MCP) server that provides seamless integration with Google's Gemini AI models. This server enables Claude Desktop and other MCP-compatible clients to leverage the full power of Gemini's advanced AI capabilities.",
5 | "main": "dist/enhanced-stdio-server.js",
6 | "bin": {
7 | "mcp-server-gemini": "./dist/enhanced-stdio-server.js"
8 | },
9 | "engines": {
10 | "node": ">=16.0.0",
11 | "npm": ">=7.0.0"
12 | },
13 | "scripts": {
14 | "build": "npm run clean && tsc",
15 | "build:watch": "tsc --watch",
16 | "clean": "rimraf dist",
17 | "prepare": "npm run build",
18 | "start": "node dist/enhanced-stdio-server.js",
19 | "dev": "node --loader ts-node/esm src/enhanced-stdio-server.ts",
20 | "dev:watch": "nodemon --exec \"node --loader ts-node/esm src/enhanced-stdio-server.ts\" --ext ts --watch src/",
21 | "test": "jest",
22 | "test:watch": "jest --watch",
23 | "test:coverage": "jest --coverage",
24 | "test:integration": "jest --testPathPattern=integration",
25 | "lint": "eslint src/**/*.ts",
26 | "lint:fix": "eslint src/**/*.ts --fix",
27 | "format": "prettier --write src/**/*.ts",
28 | "format:check": "prettier --check src/**/*.ts",
29 | "type-check": "tsc --noEmit",
30 | "validate": "npm run type-check && npm run lint && npm run test",
31 | "prepack": "npm run validate && npm run build",
32 | "release": "npm run validate && npm run build && npm publish"
33 | },
34 | "type": "module",
35 | "dependencies": {
36 | "@google/genai": "^1.8.0",
37 | "dotenv": "^16.4.5",
38 | "zod": "^3.22.4"
39 | },
40 | "devDependencies": {
41 | "@types/jest": "^29.5.0",
42 | "@types/node": "^20.10.5",
43 | "@typescript-eslint/eslint-plugin": "^6.21.0",
44 | "@typescript-eslint/parser": "^6.21.0",
45 | "eslint": "^8.0.0",
46 | "eslint-config-prettier": "^10.1.8",
47 | "jest": "^29.5.0",
48 | "nodemon": "^3.0.2",
49 | "prettier": "^3.0.0",
50 | "rimraf": "^5.0.5",
51 | "ts-jest": "^29.1.0",
52 | "ts-node": "^10.9.2",
53 | "typescript": "^5.3.3"
54 | },
55 | "keywords": [
56 | "mcp",
57 | "model-context-protocol",
58 | "gemini",
59 | "google-gemini",
60 | "ai",
61 | "llm",
62 | "claude-desktop",
63 | "cursor",
64 | "windsurf",
65 | "typescript"
66 | ],
67 | "author": "Gurveer",
68 | "license": "MIT",
69 | "repository": {
70 | "type": "git",
71 | "url": "git+https://github.com/gurr-i/mcp-server-gemini-pro.git"
72 | },
73 | "bugs": {
74 | "url": "https://github.com/gurr-i/mcp-server-gemini/issues"
75 | },
76 | "homepage": "https://github.com/gurr-i/mcp-server-gemini#readme",
77 | "files": [
78 | "dist",
79 | "README.md",
80 | "LICENSE",
81 | "CHANGELOG.md"
82 | ]
83 | }
84 |
```
--------------------------------------------------------------------------------
/Contribution-Guide.md:
--------------------------------------------------------------------------------
```markdown
1 | # Contributing to Gemini MCP Server
2 |
3 | Thank you for your interest in contributing to the Gemini MCP Server! This document provides guidelines for contributing to the project.
4 |
5 | ## Code of Conduct
6 |
7 | This project follows a standard code of conduct. Please be respectful and constructive in all interactions.
8 |
9 | ## How to Contribute
10 |
11 | ### Reporting Issues
12 |
13 | 1. Check if the issue already exists in the [issue tracker](https://github.com/gurr-i/mcp-server-gemini/issues)
14 | 2. If not, create a new issue with:
15 | - Clear description of the problem
16 | - Steps to reproduce
17 | - Expected behavior
18 | - Actual behavior
19 | - Your environment (OS, MCP client, Node.js version)
20 | - Relevant logs or error messages
21 |
22 | ### Suggesting Enhancements
23 |
24 | 1. Check if the enhancement has already been suggested
25 | 2. Create a new issue with the `enhancement` label
26 | 3. Describe the feature and why it would be useful
27 | 4. Provide examples of how it would work
28 |
29 | ### Pull Requests
30 |
31 | 1. Fork the repository
32 | 2. Create a new branch: `git checkout -b feature/your-feature-name`
33 | 3. Make your changes
34 | 4. Write or update tests if applicable
35 | 5. Update documentation if needed
36 | 6. Commit your changes with clear commit messages
37 | 7. Push to your fork
38 | 8. Create a pull request
39 |
40 | #### Pull Request Guidelines
41 |
42 | - Keep PRs focused - one feature or fix per PR
43 | - Follow the existing code style
44 | - Update the README.md if you're adding new features
45 | - Add tests for new functionality
46 | - Make sure all tests pass: `npm test`
47 | - Update type definitions if changing APIs
48 |
49 | ## Development Setup
50 |
51 | ```bash
52 | # Clone your fork
53 | git clone https://github.com/YOUR_USERNAME/mcp-server-gemini.git
54 | cd mcp-server-gemini
55 |
56 | # Install dependencies
57 | npm install
58 |
59 | # Run in development mode
60 | npm run dev
61 |
62 | # Run tests
63 | npm test
64 |
65 | # Build the project
66 | npm run build
67 |
68 | # Lint the code
69 | npm run lint
70 | ```
71 |
72 | ## Code Style
73 |
74 | - TypeScript with strict mode
75 | - ESM modules
76 | - Use async/await over callbacks
77 | - Add JSDoc comments for public APIs
78 | - Follow the existing patterns in the codebase
79 |
80 | ## Testing
81 |
82 | - Write tests for new features
83 | - Ensure existing tests pass
84 | - Test with multiple MCP clients if possible
85 | - Test error cases and edge conditions
86 |
87 | ## Documentation
88 |
89 | - Update README.md for new features
90 | - Add JSDoc comments for new functions
91 | - Update USAGE_GUIDE.md if adding new tools
92 | - Update PARAMETERS_REFERENCE.md for new parameters
93 |
94 | ## Release Process
95 |
96 | Maintainers will:
97 | 1. Review and merge PRs
98 | 2. Update version in package.json
99 | 3. Update CHANGELOG.md
100 | 4. Create a new release on GitHub
101 | 5. Publish to npm if applicable
102 |
103 | ## Questions?
104 |
105 | Feel free to open an issue for any questions about contributing!
```
--------------------------------------------------------------------------------
/tests/unit/config.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { loadConfig } from '../../src/config/index.js';
2 |
3 | describe('Configuration', () => {
4 | const originalEnv = process.env;
5 |
6 | beforeEach(() => {
7 | jest.resetModules();
8 | process.env = { ...originalEnv };
9 | });
10 |
11 | afterAll(() => {
12 | process.env = originalEnv;
13 | });
14 |
15 | describe('loadConfig', () => {
16 | it('should load valid configuration', () => {
17 | process.env.GEMINI_API_KEY = 'test-api-key';
18 |
19 | const config = loadConfig();
20 |
21 | expect(config.geminiApiKey).toBe('test-api-key');
22 | expect(config.logLevel).toBe('info');
23 | expect(config.rateLimitEnabled).toBe(true);
24 | });
25 |
26 | it('should throw error for missing API key', () => {
27 | delete process.env.GEMINI_API_KEY;
28 |
29 | expect(() => loadConfig()).toThrow('GEMINI_API_KEY is required');
30 | });
31 |
32 | it('should use custom log level', () => {
33 | process.env.GEMINI_API_KEY = 'test-api-key';
34 | process.env.LOG_LEVEL = 'debug';
35 |
36 | const config = loadConfig();
37 |
38 | expect(config.logLevel).toBe('debug');
39 | });
40 |
41 | it('should parse numeric values correctly', () => {
42 | process.env.GEMINI_API_KEY = 'test-api-key';
43 | process.env.RATE_LIMIT_REQUESTS = '200';
44 | process.env.RATE_LIMIT_WINDOW = '120000';
45 | process.env.REQUEST_TIMEOUT = '60000';
46 |
47 | const config = loadConfig();
48 |
49 | expect(config.rateLimitRequests).toBe(200);
50 | expect(config.rateLimitWindow).toBe(120000);
51 | expect(config.requestTimeout).toBe(60000);
52 | });
53 |
54 | it('should parse boolean values correctly', () => {
55 | process.env.GEMINI_API_KEY = 'test-api-key';
56 | process.env.ENABLE_METRICS = 'true';
57 | process.env.RATE_LIMIT_ENABLED = 'false';
58 |
59 | const config = loadConfig();
60 |
61 | expect(config.enableMetrics).toBe(true);
62 | expect(config.rateLimitEnabled).toBe(false);
63 | });
64 |
65 | it('should detect development environment', () => {
66 | process.env.GEMINI_API_KEY = 'test-api-key';
67 | process.env.NODE_ENV = 'development';
68 |
69 | const config = loadConfig();
70 |
71 | expect(config.isDevelopment).toBe(true);
72 | });
73 |
74 | it('should throw error for invalid log level', () => {
75 | process.env.GEMINI_API_KEY = 'test-api-key';
76 | process.env.LOG_LEVEL = 'invalid';
77 |
78 | expect(() => loadConfig()).toThrow('Configuration validation failed');
79 | });
80 |
81 | it('should throw error for invalid numeric values', () => {
82 | process.env.GEMINI_API_KEY = 'test-api-key';
83 | process.env.RATE_LIMIT_REQUESTS = 'not-a-number';
84 |
85 | expect(() => loadConfig()).toThrow('Configuration validation failed');
86 | });
87 | });
88 | });
89 |
```
--------------------------------------------------------------------------------
/src/utils/rateLimiter.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { config } from '../config/index.js';
2 | import { RateLimitError } from './errors.js';
3 | import { logger } from './logger.js';
4 |
5 | interface RateLimitEntry {
6 | count: number;
7 | resetTime: number;
8 | }
9 |
10 | /**
11 | * Simple in-memory rate limiter using sliding window
12 | */
13 | export class RateLimiter {
14 | private requests = new Map<string, RateLimitEntry>();
15 | private cleanupInterval: NodeJS.Timeout;
16 |
17 | constructor(
18 | private maxRequests: number = config.rateLimitRequests,
19 | private windowMs: number = config.rateLimitWindow
20 | ) {
21 | // Clean up expired entries every minute
22 | this.cleanupInterval = setInterval(() => {
23 | this.cleanup();
24 | }, 60000);
25 | }
26 |
27 | /**
28 | * Check if request is allowed for the given identifier
29 | */
30 | checkLimit(identifier: string = 'default'): void {
31 | if (!config.rateLimitEnabled) {
32 | return;
33 | }
34 |
35 | const now = Date.now();
36 | const entry = this.requests.get(identifier);
37 |
38 | if (!entry) {
39 | // First request for this identifier
40 | this.requests.set(identifier, {
41 | count: 1,
42 | resetTime: now + this.windowMs
43 | });
44 | return;
45 | }
46 |
47 | if (now >= entry.resetTime) {
48 | // Window has expired, reset
49 | entry.count = 1;
50 | entry.resetTime = now + this.windowMs;
51 | return;
52 | }
53 |
54 | if (entry.count >= this.maxRequests) {
55 | const resetIn = Math.ceil((entry.resetTime - now) / 1000);
56 | logger.security(`Rate limit exceeded for ${identifier}`, {
57 | count: entry.count,
58 | limit: this.maxRequests,
59 | resetIn
60 | });
61 |
62 | throw new RateLimitError(`Rate limit exceeded. Try again in ${resetIn} seconds.`);
63 | }
64 |
65 | entry.count++;
66 | }
67 |
68 | /**
69 | * Get current usage for identifier
70 | */
71 | getUsage(identifier: string = 'default'): { count: number; limit: number; resetTime: number } {
72 | const entry = this.requests.get(identifier);
73 | const now = Date.now();
74 |
75 | if (!entry || now >= entry.resetTime) {
76 | return {
77 | count: 0,
78 | limit: this.maxRequests,
79 | resetTime: now + this.windowMs
80 | };
81 | }
82 |
83 | return {
84 | count: entry.count,
85 | limit: this.maxRequests,
86 | resetTime: entry.resetTime
87 | };
88 | }
89 |
90 | /**
91 | * Clean up expired entries
92 | */
93 | private cleanup(): void {
94 | const now = Date.now();
95 | let cleaned = 0;
96 |
97 | for (const [identifier, entry] of this.requests.entries()) {
98 | if (now >= entry.resetTime) {
99 | this.requests.delete(identifier);
100 | cleaned++;
101 | }
102 | }
103 |
104 | if (cleaned > 0) {
105 | logger.debug(`Cleaned up ${cleaned} expired rate limit entries`);
106 | }
107 | }
108 |
109 | /**
110 | * Reset rate limit for identifier
111 | */
112 | reset(identifier: string = 'default'): void {
113 | this.requests.delete(identifier);
114 | logger.debug(`Reset rate limit for ${identifier}`);
115 | }
116 |
117 | /**
118 | * Destroy the rate limiter and clean up resources
119 | */
120 | destroy(): void {
121 | if (this.cleanupInterval) {
122 | clearInterval(this.cleanupInterval);
123 | }
124 | this.requests.clear();
125 | }
126 | }
127 |
128 | // Export singleton instance
129 | export const rateLimiter = new RateLimiter();
130 |
```
--------------------------------------------------------------------------------
/src/utils/errors.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Custom error classes for the MCP Server
3 | */
4 |
5 | export class MCPError extends Error {
6 | constructor(
7 | message: string,
8 | public code: number = -32603,
9 | public data?: any
10 | ) {
11 | super(message);
12 | this.name = 'MCPError';
13 | }
14 |
15 | toMCPResponse(id: any) {
16 | return {
17 | jsonrpc: '2.0',
18 | id,
19 | error: {
20 | code: this.code,
21 | message: this.message,
22 | ...(this.data && { data: this.data })
23 | }
24 | };
25 | }
26 | }
27 |
28 | export class ValidationError extends MCPError {
29 | constructor(message: string, data?: any) {
30 | super(message, -32602, data);
31 | this.name = 'ValidationError';
32 | }
33 | }
34 |
35 | export class AuthenticationError extends MCPError {
36 | constructor(message: string = 'Invalid API key') {
37 | super(message, -32001);
38 | this.name = 'AuthenticationError';
39 | }
40 | }
41 |
42 | export class RateLimitError extends MCPError {
43 | constructor(message: string = 'Rate limit exceeded') {
44 | super(message, -32002);
45 | this.name = 'RateLimitError';
46 | }
47 | }
48 |
49 | export class TimeoutError extends MCPError {
50 | constructor(message: string = 'Request timeout') {
51 | super(message, -32003);
52 | this.name = 'TimeoutError';
53 | }
54 | }
55 |
56 | export class GeminiAPIError extends MCPError {
57 | constructor(
58 | message: string,
59 | public originalError?: any
60 | ) {
61 | super(message, -32603);
62 | this.name = 'GeminiAPIError';
63 | this.data = originalError;
64 | }
65 | }
66 |
67 | /**
68 | * Error handler utility functions
69 | */
70 | export class ErrorHandler {
71 | static handleGeminiError(error: any): GeminiAPIError {
72 | if (error?.error) {
73 | const geminiError = error.error;
74 | let message = 'Gemini API error';
75 |
76 | if (geminiError.message) {
77 | message = geminiError.message;
78 | } else if (geminiError.status) {
79 | message = `Gemini API error: ${geminiError.status}`;
80 | }
81 |
82 | return new GeminiAPIError(message, geminiError);
83 | }
84 |
85 | return new GeminiAPIError('Unknown Gemini API error', error);
86 | }
87 |
88 | static isRetryableError(error: any): boolean {
89 | if (error instanceof GeminiAPIError) {
90 | const status = error.originalError?.status;
91 | // Retry on server errors and rate limits
92 | return (
93 | status === 'UNAVAILABLE' ||
94 | status === 'RESOURCE_EXHAUSTED' ||
95 | status === 'INTERNAL' ||
96 | error.originalError?.code === 503 ||
97 | error.originalError?.code === 429
98 | );
99 | }
100 | return false;
101 | }
102 |
103 | static getRetryDelay(attempt: number): number {
104 | // Exponential backoff: 1s, 2s, 4s, 8s, 16s
105 | return Math.min(1000 * Math.pow(2, attempt), 16000);
106 | }
107 | }
108 |
109 | /**
110 | * Async retry utility with exponential backoff
111 | */
112 | export async function withRetry<T>(
113 | operation: () => Promise<T>,
114 | maxAttempts: number = 3,
115 | baseDelay: number = 1000
116 | ): Promise<T> {
117 | let lastError: any;
118 |
119 | for (let attempt = 0; attempt < maxAttempts; attempt++) {
120 | try {
121 | return await operation();
122 | } catch (error) {
123 | lastError = error;
124 |
125 | if (attempt === maxAttempts - 1 || !ErrorHandler.isRetryableError(error)) {
126 | throw error;
127 | }
128 |
129 | const delay = baseDelay * Math.pow(2, attempt);
130 | await new Promise(resolve => setTimeout(resolve, delay));
131 | }
132 | }
133 |
134 | throw lastError;
135 | }
136 |
```
--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------
```typescript
1 | // Base MCP Protocol Types
2 | export interface MCPRequest {
3 | jsonrpc: '2.0';
4 | id: string | number;
5 | method: string;
6 | params?: any;
7 | }
8 |
9 | export interface MCPResponse {
10 | jsonrpc: '2.0';
11 | id: string | number;
12 | result?: any;
13 | error?: {
14 | code: number;
15 | message: string;
16 | data?: any;
17 | };
18 | }
19 |
20 | export interface MCPError {
21 | code: number;
22 | message: string;
23 | data?: any;
24 | }
25 |
26 | // Connection Management
27 | export interface ConnectionState {
28 | connectedAt: Date;
29 | lastMessageAt: Date;
30 | initialized: boolean;
31 | activeRequests: Set<string | number>;
32 | ip: string;
33 | }
34 |
35 | // Notification Types
36 | export interface NotificationMessage {
37 | jsonrpc: '2.0';
38 | method: string;
39 | params?: any;
40 | }
41 |
42 | export interface ErrorNotification extends NotificationMessage {
43 | method: 'notifications/error';
44 | params: {
45 | code: number;
46 | message: string;
47 | data?: any;
48 | };
49 | }
50 |
51 | export interface ProgressParams {
52 | progressToken: string | number;
53 | progress: number;
54 | total?: number;
55 | }
56 |
57 | export interface ProgressNotification extends NotificationMessage {
58 | method: 'notifications/progress';
59 | params: ProgressParams;
60 | }
61 |
62 | // Request Types
63 | export interface GenerateRequest extends MCPRequest {
64 | method: 'generate';
65 | params: {
66 | prompt: string;
67 | temperature?: number;
68 | maxTokens?: number;
69 | stopSequences?: string[];
70 | };
71 | }
72 |
73 | export interface GenerateResponse extends MCPResponse {
74 | result: {
75 | type: 'completion';
76 | content: string;
77 | metadata: {
78 | model: string;
79 | provider: string;
80 | temperature?: number;
81 | maxTokens?: number;
82 | stopSequences?: string[];
83 | };
84 | };
85 | }
86 |
87 | export interface StreamRequest extends MCPRequest {
88 | method: 'stream';
89 | params: {
90 | prompt: string;
91 | temperature?: number;
92 | maxTokens?: number;
93 | stopSequences?: string[];
94 | };
95 | }
96 |
97 | export interface StreamResponse extends MCPResponse {
98 | result: {
99 | type: 'stream';
100 | content: string;
101 | done: boolean;
102 | };
103 | }
104 |
105 | export interface CancelRequest extends MCPRequest {
106 | method: 'cancel';
107 | params: {
108 | requestId: string | number;
109 | };
110 | }
111 |
112 | export interface ConfigureRequest extends MCPRequest {
113 | method: 'configure';
114 | params: {
115 | configuration: {
116 | model?: string;
117 | temperature?: number;
118 | maxTokens?: number;
119 | stopSequences?: string[];
120 | timeout?: number;
121 | };
122 | };
123 | }
124 |
125 | // Server Configuration
126 | export interface ServerInfo {
127 | name: string;
128 | version: string;
129 | }
130 |
131 | export interface ServerCapabilities {
132 | experimental?: Record<string, any>;
133 | prompts?: {
134 | listChanged?: boolean;
135 | };
136 | resources?: {
137 | subscribe?: boolean;
138 | listChanged?: boolean;
139 | };
140 | tools?: {
141 | listChanged?: boolean;
142 | };
143 | logging?: Record<string, any>;
144 | }
145 |
146 | export interface InitializeResult {
147 | protocolVersion: string;
148 | serverInfo: ServerInfo;
149 | capabilities: ServerCapabilities;
150 | }
151 |
152 | // Lifecycle Types
153 | export interface ShutdownRequest extends MCPRequest {
154 | method: 'shutdown';
155 | }
156 |
157 | export interface ExitNotification extends NotificationMessage {
158 | method: 'exit';
159 | }
160 |
161 | // Resource and Prompt References (for interfaces)
162 | export interface ResourceReference {
163 | type: 'resource';
164 | uri: string;
165 | }
166 |
167 | export interface PromptReference {
168 | type: 'prompt';
169 | name: string;
170 | }
171 |
172 | export interface CompletionArgument {
173 | name: string;
174 | value: string;
175 | }
176 |
177 | export interface Completion {
178 | values: Array<{
179 | value: string;
180 | description?: string;
181 | }>;
182 | total?: number;
183 | hasMore?: boolean;
184 | }
185 |
```
--------------------------------------------------------------------------------
/src/utils/validation.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from 'zod';
2 | import { ValidationError } from './errors.js';
3 |
4 | /**
5 | * Common validation schemas
6 | */
7 | export const CommonSchemas = {
8 | // Gemini model names
9 | geminiModel: z.enum([
10 | 'gemini-2.5-pro',
11 | 'gemini-2.5-flash',
12 | 'gemini-2.5-flash-lite',
13 | 'gemini-2.0-flash',
14 | 'gemini-2.0-flash-lite',
15 | 'gemini-2.0-pro-experimental',
16 | 'gemini-1.5-pro',
17 | 'gemini-1.5-flash'
18 | ]),
19 |
20 | // Temperature range
21 | temperature: z.number().min(0).max(2),
22 |
23 | // Token limits
24 | maxTokens: z.number().min(1).max(8192),
25 |
26 | // Top-k and top-p parameters
27 | topK: z.number().min(1).max(100),
28 | topP: z.number().min(0).max(1),
29 |
30 | // Conversation ID
31 | conversationId: z.string().min(1).max(100),
32 |
33 | // JSON schema string
34 | jsonSchema: z.string().refine(val => {
35 | try {
36 | JSON.parse(val);
37 | return true;
38 | } catch {
39 | return false;
40 | }
41 | }, 'Must be valid JSON'),
42 |
43 | // Safety settings string
44 | safetySettings: z.string().refine(val => {
45 | try {
46 | const parsed = JSON.parse(val);
47 | return Array.isArray(parsed);
48 | } catch {
49 | return false;
50 | }
51 | }, 'Must be valid JSON array'),
52 |
53 | // Base64 image data
54 | base64Image: z
55 | .string()
56 | .regex(/^data:image\/(png|jpeg|jpg|gif|webp);base64,/, 'Must be valid base64 image data'),
57 |
58 | // URL validation
59 | imageUrl: z.string().url('Must be a valid URL')
60 | };
61 |
62 | /**
63 | * Tool parameter validation schemas
64 | */
65 | export const ToolSchemas = {
66 | generateText: z.object({
67 | prompt: z.string().min(1, 'Prompt is required'),
68 | model: CommonSchemas.geminiModel.optional(),
69 | systemInstruction: z.string().optional(),
70 | temperature: CommonSchemas.temperature.optional(),
71 | maxTokens: CommonSchemas.maxTokens.optional(),
72 | topK: CommonSchemas.topK.optional(),
73 | topP: CommonSchemas.topP.optional(),
74 | jsonMode: z.boolean().optional(),
75 | jsonSchema: CommonSchemas.jsonSchema.optional(),
76 | grounding: z.boolean().optional(),
77 | safetySettings: CommonSchemas.safetySettings.optional(),
78 | conversationId: CommonSchemas.conversationId.optional()
79 | }),
80 |
81 | analyzeImage: z
82 | .object({
83 | prompt: z.string().min(1, 'Prompt is required'),
84 | imageUrl: CommonSchemas.imageUrl.optional(),
85 | imageBase64: CommonSchemas.base64Image.optional(),
86 | model: z.enum(['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash']).optional()
87 | })
88 | .refine(
89 | data => data.imageUrl || data.imageBase64,
90 | 'Either imageUrl or imageBase64 must be provided'
91 | ),
92 |
93 | countTokens: z.object({
94 | text: z.string().min(1, 'Text is required'),
95 | model: CommonSchemas.geminiModel.optional()
96 | }),
97 |
98 | listModels: z.object({
99 | filter: z.enum(['all', 'thinking', 'vision', 'grounding', 'json_mode']).optional()
100 | }),
101 |
102 | embedText: z.object({
103 | text: z.string().min(1, 'Text is required'),
104 | model: z.enum(['text-embedding-004', 'text-multilingual-embedding-002']).optional()
105 | }),
106 |
107 | getHelp: z.object({
108 | topic: z
109 | .enum(['overview', 'tools', 'models', 'parameters', 'examples', 'quick-start'])
110 | .optional()
111 | })
112 | };
113 |
114 | /**
115 | * Validation utility class
116 | */
117 | export class Validator {
118 | /**
119 | * Validate tool parameters
120 | */
121 | static validateToolParams<T>(schema: z.ZodSchema<T>, params: unknown): T {
122 | try {
123 | return schema.parse(params);
124 | } catch (error) {
125 | if (error instanceof z.ZodError) {
126 | const issues = error.issues
127 | .map(issue => `${issue.path.join('.')}: ${issue.message}`)
128 | .join(', ');
129 | throw new ValidationError(`Invalid parameters: ${issues}`);
130 | }
131 | throw error;
132 | }
133 | }
134 |
135 | /**
136 | * Sanitize string input
137 | */
138 | static sanitizeString(input: string, maxLength: number = 10000): string {
139 | if (typeof input !== 'string') {
140 | throw new ValidationError('Input must be a string');
141 | }
142 |
143 | if (input.length > maxLength) {
144 | throw new ValidationError(`Input too long (max ${maxLength} characters)`);
145 | }
146 |
147 | // Remove null bytes and other control characters except newlines and tabs
148 | // eslint-disable-next-line no-control-regex
149 | return input.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, '');
150 | }
151 |
152 | /**
153 | * Validate JSON string
154 | */
155 | static validateJSON(jsonString: string): any {
156 | try {
157 | return JSON.parse(jsonString);
158 | } catch (error) {
159 | throw new ValidationError('Invalid JSON format');
160 | }
161 | }
162 |
163 | /**
164 | * Validate MCP request structure
165 | */
166 | static validateMCPRequest(request: any): void {
167 | if (!request || typeof request !== 'object') {
168 | throw new ValidationError('Request must be an object');
169 | }
170 |
171 | if (request.jsonrpc !== '2.0') {
172 | throw new ValidationError('Invalid JSON-RPC version');
173 | }
174 |
175 | if (typeof request.method !== 'string') {
176 | throw new ValidationError('Method must be a string');
177 | }
178 |
179 | if (request.id === undefined || request.id === null) {
180 | throw new ValidationError('Request ID is required');
181 | }
182 | }
183 | }
184 |
```
--------------------------------------------------------------------------------
/docs/configuration.md:
--------------------------------------------------------------------------------
```markdown
1 | # Configuration Guide
2 |
3 | ## Environment Variables
4 |
5 | The MCP Server Gemini can be configured using environment variables or a `.env` file.
6 |
7 | ### Required Configuration
8 |
9 | | Variable | Required | Description | Example |
10 | |----------|----------|-------------|---------|
11 | | `GEMINI_API_KEY` | ✅ | Your Google AI Studio API key | `AIzaSyBCmjkUwSC6409pyCSq6qHd-XMelU` |
12 |
13 | ### Optional Configuration
14 |
15 | | Variable | Default | Description | Example |
16 | |----------|---------|-------------|---------|
17 | | `LOG_LEVEL` | `info` | Logging level (error, warn, info, debug) | `debug` |
18 | | `ENABLE_METRICS` | `false` | Enable performance metrics | `true` |
19 | | `RATE_LIMIT_ENABLED` | `true` | Enable rate limiting | `false` |
20 | | `RATE_LIMIT_REQUESTS` | `100` | Max requests per window | `200` |
21 | | `RATE_LIMIT_WINDOW` | `60000` | Rate limit window in ms | `120000` |
22 | | `REQUEST_TIMEOUT` | `30000` | Request timeout in ms | `60000` |
23 | | `NODE_ENV` | `production` | Environment mode | `development` |
24 |
25 | ### Example .env File
26 |
27 | ```bash
28 | # Required
29 | GEMINI_API_KEY=your_api_key_here
30 |
31 | # Optional - Logging
32 | LOG_LEVEL=info
33 | ENABLE_METRICS=false
34 |
35 | # Optional - Rate Limiting
36 | RATE_LIMIT_ENABLED=true
37 | RATE_LIMIT_REQUESTS=100
38 | RATE_LIMIT_WINDOW=60000
39 |
40 | # Optional - Timeouts
41 | REQUEST_TIMEOUT=30000
42 |
43 | # Optional - Development
44 | NODE_ENV=production
45 | ```
46 |
47 | ## MCP Client Configuration
48 |
49 | ### Claude Desktop
50 |
51 | #### Configuration File Locations
52 |
53 | | OS | Path |
54 | |----|------|
55 | | **macOS** | `~/Library/Application Support/Claude/claude_desktop_config.json` |
56 | | **Windows** | `%APPDATA%\Claude\claude_desktop_config.json` |
57 | | **Linux** | `~/.config/Claude/claude_desktop_config.json` |
58 |
59 | #### Basic Configuration
60 |
61 | ```json
62 | {
63 | "mcpServers": {
64 | "gemini": {
65 | "command": "mcp-server-gemini",
66 | "env": {
67 | "GEMINI_API_KEY": "your_api_key_here"
68 | }
69 | }
70 | }
71 | }
72 | ```
73 |
74 | #### Advanced Configuration
75 |
76 | ```json
77 | {
78 | "mcpServers": {
79 | "gemini": {
80 | "command": "mcp-server-gemini",
81 | "env": {
82 | "GEMINI_API_KEY": "your_api_key_here",
83 | "LOG_LEVEL": "debug",
84 | "RATE_LIMIT_REQUESTS": "200",
85 | "REQUEST_TIMEOUT": "60000"
86 | }
87 | }
88 | }
89 | }
90 | ```
91 |
92 | #### Local Development Configuration
93 |
94 | ```json
95 | {
96 | "mcpServers": {
97 | "gemini": {
98 | "command": "node",
99 | "args": ["/path/to/mcp-server-gemini/dist/enhanced-stdio-server.js"],
100 | "cwd": "/path/to/mcp-server-gemini",
101 | "env": {
102 | "GEMINI_API_KEY": "your_api_key_here",
103 | "NODE_ENV": "development",
104 | "LOG_LEVEL": "debug"
105 | }
106 | }
107 | }
108 | }
109 | ```
110 |
111 | ### Cursor IDE
112 |
113 | Add to your Cursor MCP configuration:
114 |
115 | ```json
116 | {
117 | "mcpServers": {
118 | "gemini": {
119 | "type": "stdio",
120 | "command": "mcp-server-gemini",
121 | "env": {
122 | "GEMINI_API_KEY": "your_api_key_here"
123 | }
124 | }
125 | }
126 | }
127 | ```
128 |
129 | ### Windsurf
130 |
131 | Configure in Windsurf settings:
132 |
133 | ```json
134 | {
135 | "mcp": {
136 | "servers": {
137 | "gemini": {
138 | "command": "mcp-server-gemini",
139 | "env": {
140 | "GEMINI_API_KEY": "your_api_key_here"
141 | }
142 | }
143 | }
144 | }
145 | }
146 | ```
147 |
148 | ## Security Configuration
149 |
150 | ### API Key Management
151 |
152 | #### Best Practices
153 | 1. **Never commit API keys** to version control
154 | 2. **Use environment variables** or secure secret management
155 | 3. **Rotate keys regularly** for production use
156 | 4. **Use different keys** for development and production
157 |
158 | #### Secure Storage Options
159 |
160 | **Option 1: Environment Variables**
161 | ```bash
162 | export GEMINI_API_KEY="your_api_key_here"
163 | ```
164 |
165 | **Option 2: .env File (Development)**
166 | ```bash
167 | # .env
168 | GEMINI_API_KEY=your_api_key_here
169 | ```
170 |
171 | **Option 3: System Keychain (macOS)**
172 | ```bash
173 | security add-generic-password -a "mcp-gemini" -s "gemini-api-key" -w "your_api_key_here"
174 | ```
175 |
176 | **Option 4: Docker Secrets**
177 | ```yaml
178 | # docker-compose.yml
179 | services:
180 | mcp-server:
181 | image: mcp-server-gemini
182 | secrets:
183 | - gemini_api_key
184 | secrets:
185 | gemini_api_key:
186 | external: true
187 | ```
188 |
189 | ### Rate Limiting Configuration
190 |
191 | Configure rate limiting to protect your API quota:
192 |
193 | ```bash
194 | # Conservative settings
195 | RATE_LIMIT_ENABLED=true
196 | RATE_LIMIT_REQUESTS=50
197 | RATE_LIMIT_WINDOW=60000
198 |
199 | # High-throughput settings
200 | RATE_LIMIT_ENABLED=true
201 | RATE_LIMIT_REQUESTS=500
202 | RATE_LIMIT_WINDOW=60000
203 |
204 | # Disable for development
205 | RATE_LIMIT_ENABLED=false
206 | ```
207 |
208 | ## Performance Configuration
209 |
210 | ### Timeout Settings
211 |
212 | ```bash
213 | # Conservative (stable connections)
214 | REQUEST_TIMEOUT=30000
215 |
216 | # Aggressive (fast networks)
217 | REQUEST_TIMEOUT=10000
218 |
219 | # Patient (complex requests)
220 | REQUEST_TIMEOUT=120000
221 | ```
222 |
223 | ### Logging Configuration
224 |
225 | ```bash
226 | # Production
227 | LOG_LEVEL=warn
228 | ENABLE_METRICS=true
229 |
230 | # Development
231 | LOG_LEVEL=debug
232 | ENABLE_METRICS=false
233 |
234 | # Debugging
235 | LOG_LEVEL=debug
236 | ENABLE_METRICS=true
237 | ```
238 |
239 | ## Troubleshooting Configuration
240 |
241 | ### Common Issues
242 |
243 | #### 1. API Key Not Found
244 | ```bash
245 | # Check if environment variable is set
246 | echo $GEMINI_API_KEY
247 |
248 | # Verify .env file exists and is readable
249 | cat .env | grep GEMINI_API_KEY
250 | ```
251 |
252 | #### 2. Permission Errors
253 | ```bash
254 | # Check file permissions
255 | ls -la .env
256 |
257 | # Fix permissions
258 | chmod 600 .env
259 | ```
260 |
261 | #### 3. Rate Limiting Issues
262 | ```bash
263 | # Temporarily disable rate limiting
264 | RATE_LIMIT_ENABLED=false
265 |
266 | # Increase limits
267 | RATE_LIMIT_REQUESTS=1000
268 | RATE_LIMIT_WINDOW=60000
269 | ```
270 |
271 | ### Debug Configuration
272 |
273 | Enable debug mode for troubleshooting:
274 |
275 | ```json
276 | {
277 | "mcpServers": {
278 | "gemini": {
279 | "command": "mcp-server-gemini",
280 | "env": {
281 | "GEMINI_API_KEY": "your_api_key_here",
282 | "LOG_LEVEL": "debug",
283 | "NODE_ENV": "development"
284 | }
285 | }
286 | }
287 | }
288 | ```
289 |
290 | ## Validation
291 |
292 | The server validates all configuration on startup. Invalid configuration will result in clear error messages:
293 |
294 | ```
295 | Configuration validation failed:
296 | geminiApiKey: GEMINI_API_KEY is required
297 | rateLimitRequests: Expected number, received string
298 | ```
299 |
300 | ## Configuration Schema
301 |
302 | The server uses Zod for configuration validation. See `src/config/index.ts` for the complete schema definition.
303 |
304 | ## Environment-Specific Configurations
305 |
306 | ### Development
307 | ```bash
308 | NODE_ENV=development
309 | LOG_LEVEL=debug
310 | RATE_LIMIT_ENABLED=false
311 | REQUEST_TIMEOUT=60000
312 | ```
313 |
314 | ### Production
315 | ```bash
316 | NODE_ENV=production
317 | LOG_LEVEL=warn
318 | RATE_LIMIT_ENABLED=true
319 | RATE_LIMIT_REQUESTS=100
320 | REQUEST_TIMEOUT=30000
321 | ENABLE_METRICS=true
322 | ```
323 |
324 | ### Testing
325 | ```bash
326 | NODE_ENV=test
327 | LOG_LEVEL=error
328 | RATE_LIMIT_ENABLED=false
329 | REQUEST_TIMEOUT=10000
330 | ```
331 |
```
--------------------------------------------------------------------------------
/tests/integration/gemini-api.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { GoogleGenAI } from '@google/genai';
2 | import { config } from '../../src/config/index.js';
3 |
4 | // Skip integration tests if no API key is provided
5 | const describeIf = (condition: boolean) => condition ? describe : describe.skip;
6 |
7 | describeIf(!!process.env.GEMINI_API_KEY)('Gemini API Integration', () => {
8 | let genAI: GoogleGenAI;
9 |
10 | beforeAll(() => {
11 | genAI = new GoogleGenAI({ apiKey: config.geminiApiKey });
12 | });
13 |
14 | describe('Text Generation', () => {
15 | it('should generate text with gemini-2.5-flash', async () => {
16 | const result = await genAI.models.generateContent({
17 | model: 'gemini-2.5-flash',
18 | contents: [{
19 | parts: [{ text: 'Say hello in exactly 3 words.' }],
20 | role: 'user'
21 | }]
22 | });
23 |
24 | expect(result).toBeDefined();
25 | expect(result.candidates).toBeDefined();
26 | expect(result.candidates!.length).toBeGreaterThan(0);
27 | expect(result.candidates![0].content?.parts?.[0]?.text).toBeDefined();
28 | }, 30000);
29 |
30 | it('should generate text with system instruction', async () => {
31 | const result = await genAI.models.generateContent({
32 | model: 'gemini-2.5-flash',
33 | systemInstruction: {
34 | parts: [{ text: 'You are a helpful assistant that always responds with exactly one word.' }]
35 | },
36 | contents: [{
37 | parts: [{ text: 'What is the capital of France?' }],
38 | role: 'user'
39 | }]
40 | });
41 |
42 | expect(result).toBeDefined();
43 | expect(result.candidates).toBeDefined();
44 | expect(result.candidates!.length).toBeGreaterThan(0);
45 |
46 | const responseText = result.candidates![0].content?.parts?.[0]?.text;
47 | expect(responseText).toBeDefined();
48 | expect(responseText!.trim().split(/\s+/).length).toBeLessThanOrEqual(2); // Allow for some flexibility
49 | }, 30000);
50 |
51 | it('should generate JSON output', async () => {
52 | const result = await genAI.models.generateContent({
53 | model: 'gemini-2.5-flash',
54 | generationConfig: {
55 | responseMimeType: 'application/json',
56 | responseSchema: {
57 | type: 'object',
58 | properties: {
59 | answer: { type: 'string' },
60 | confidence: { type: 'number' }
61 | },
62 | required: ['answer', 'confidence']
63 | }
64 | },
65 | contents: [{
66 | parts: [{ text: 'What is 2+2? Respond with your answer and confidence level.' }],
67 | role: 'user'
68 | }]
69 | });
70 |
71 | expect(result).toBeDefined();
72 | expect(result.candidates).toBeDefined();
73 |
74 | const responseText = result.candidates![0].content?.parts?.[0]?.text;
75 | expect(responseText).toBeDefined();
76 |
77 | // Should be valid JSON
78 | const jsonResponse = JSON.parse(responseText!);
79 | expect(jsonResponse.answer).toBeDefined();
80 | expect(jsonResponse.confidence).toBeDefined();
81 | expect(typeof jsonResponse.confidence).toBe('number');
82 | }, 30000);
83 | });
84 |
85 | describe('Token Counting', () => {
86 | it('should count tokens for text', async () => {
87 | const result = await genAI.models.countTokens({
88 | model: 'gemini-2.5-flash',
89 | contents: 'This is a test message for token counting.'
90 | });
91 |
92 | expect(result).toBeDefined();
93 | expect(result.totalTokens).toBeGreaterThan(0);
94 | expect(typeof result.totalTokens).toBe('number');
95 | }, 10000);
96 |
97 | it('should count tokens for longer text', async () => {
98 | const longText = 'This is a longer test message. '.repeat(100);
99 | const result = await genAI.models.countTokens({
100 | model: 'gemini-2.5-flash',
101 | contents: longText
102 | });
103 |
104 | expect(result).toBeDefined();
105 | expect(result.totalTokens).toBeGreaterThan(100); // Should be significantly more tokens
106 | }, 10000);
107 | });
108 |
109 | describe('Model Listing', () => {
110 | it('should list available models', async () => {
111 | const result = await genAI.models.list();
112 |
113 | expect(result).toBeDefined();
114 | expect(result.models).toBeDefined();
115 | expect(Array.isArray(result.models)).toBe(true);
116 | expect(result.models!.length).toBeGreaterThan(0);
117 |
118 | // Check that we have some expected models
119 | const modelNames = result.models!.map(model => model.name);
120 | expect(modelNames.some(name => name?.includes('gemini'))).toBe(true);
121 | }, 10000);
122 | });
123 |
124 | describe('Embeddings', () => {
125 | it('should generate embeddings', async () => {
126 | const result = await genAI.models.embedContent({
127 | model: 'text-embedding-004',
128 | contents: 'This is a test text for embedding generation.'
129 | });
130 |
131 | expect(result).toBeDefined();
132 | expect(result.embeddings).toBeDefined();
133 | expect(Array.isArray(result.embeddings)).toBe(true);
134 | expect(result.embeddings!.length).toBeGreaterThan(0);
135 |
136 | const embedding = result.embeddings![0];
137 | expect(embedding.values).toBeDefined();
138 | expect(Array.isArray(embedding.values)).toBe(true);
139 | expect(embedding.values!.length).toBeGreaterThan(0);
140 |
141 | // Check that values are numbers
142 | embedding.values!.forEach(value => {
143 | expect(typeof value).toBe('number');
144 | });
145 | }, 15000);
146 | });
147 |
148 | describe('Error Handling', () => {
149 | it('should handle invalid model name', async () => {
150 | await expect(
151 | genAI.models.generateContent({
152 | model: 'invalid-model-name',
153 | contents: [{
154 | parts: [{ text: 'Test' }],
155 | role: 'user'
156 | }]
157 | })
158 | ).rejects.toThrow();
159 | }, 10000);
160 |
161 | it('should handle empty content', async () => {
162 | await expect(
163 | genAI.models.generateContent({
164 | model: 'gemini-2.5-flash',
165 | contents: [{
166 | parts: [{ text: '' }],
167 | role: 'user'
168 | }]
169 | })
170 | ).rejects.toThrow();
171 | }, 10000);
172 | });
173 |
174 | describe('Rate Limiting', () => {
175 | it('should handle multiple concurrent requests', async () => {
176 | const requests = Array.from({ length: 5 }, (_, i) =>
177 | genAI.models.generateContent({
178 | model: 'gemini-2.5-flash',
179 | contents: [{
180 | parts: [{ text: `Test request ${i + 1}` }],
181 | role: 'user'
182 | }]
183 | })
184 | );
185 |
186 | const results = await Promise.allSettled(requests);
187 |
188 | // At least some requests should succeed
189 | const successful = results.filter(result => result.status === 'fulfilled');
190 | expect(successful.length).toBeGreaterThan(0);
191 |
192 | // Check that successful results have the expected structure
193 | successful.forEach(result => {
194 | if (result.status === 'fulfilled') {
195 | expect(result.value.candidates).toBeDefined();
196 | expect(result.value.candidates!.length).toBeGreaterThan(0);
197 | }
198 | });
199 | }, 60000);
200 | });
201 | });
202 |
```
--------------------------------------------------------------------------------
/docs/api.md:
--------------------------------------------------------------------------------
```markdown
1 | # API Documentation
2 |
3 | ## Overview
4 |
5 | The MCP Server Gemini provides 6 powerful tools for interacting with Google's Gemini AI models through the Model Context Protocol.
6 |
7 | ## Tools
8 |
9 | ### 1. generate_text
10 |
11 | Generate text using Gemini models with advanced features.
12 |
13 | #### Parameters
14 |
15 | | Parameter | Type | Required | Default | Description |
16 | |-----------|------|----------|---------|-------------|
17 | | `prompt` | string | ✅ | - | The text prompt to send to Gemini |
18 | | `model` | string | ❌ | `gemini-2.5-flash` | Gemini model to use |
19 | | `systemInstruction` | string | ❌ | - | System instruction to guide behavior |
20 | | `temperature` | number | ❌ | `0.7` | Creativity level (0-2) |
21 | | `maxTokens` | number | ❌ | `2048` | Maximum tokens to generate |
22 | | `topK` | number | ❌ | `40` | Top-k sampling parameter |
23 | | `topP` | number | ❌ | `0.95` | Top-p (nucleus) sampling |
24 | | `jsonMode` | boolean | ❌ | `false` | Enable structured JSON output |
25 | | `jsonSchema` | string | ❌ | - | JSON schema for validation (when jsonMode=true) |
26 | | `grounding` | boolean | ❌ | `false` | Enable Google Search grounding |
27 | | `safetySettings` | string | ❌ | - | Safety settings as JSON string |
28 | | `conversationId` | string | ❌ | - | ID for conversation context |
29 |
30 | #### Example Usage
31 |
32 | ```javascript
33 | // Basic text generation
34 | {
35 | "prompt": "Explain quantum computing in simple terms",
36 | "model": "gemini-2.5-flash",
37 | "temperature": 0.7
38 | }
39 |
40 | // JSON mode with schema
41 | {
42 | "prompt": "Extract key information from this text: ...",
43 | "jsonMode": true,
44 | "jsonSchema": "{\"type\":\"object\",\"properties\":{\"summary\":{\"type\":\"string\"},\"keyPoints\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}}}"
45 | }
46 |
47 | // With grounding for current information
48 | {
49 | "prompt": "What are the latest developments in AI?",
50 | "grounding": true,
51 | "model": "gemini-2.5-pro"
52 | }
53 | ```
54 |
55 | ### 2. analyze_image
56 |
57 | Analyze images using Gemini's vision capabilities.
58 |
59 | #### Parameters
60 |
61 | | Parameter | Type | Required | Default | Description |
62 | |-----------|------|----------|---------|-------------|
63 | | `prompt` | string | ✅ | - | Question or instruction about the image |
64 | | `imageUrl` | string | ❌* | - | URL of the image to analyze |
65 | | `imageBase64` | string | ❌* | - | Base64-encoded image data |
66 | | `model` | string | ❌ | `gemini-2.5-flash` | Vision-capable model |
67 |
68 | *Either `imageUrl` or `imageBase64` must be provided.
69 |
70 | #### Example Usage
71 |
72 | ```javascript
73 | // Analyze image from URL
74 | {
75 | "prompt": "What's in this image?",
76 | "imageUrl": "https://example.com/image.jpg",
77 | "model": "gemini-2.5-pro"
78 | }
79 |
80 | // Analyze base64 image
81 | {
82 | "prompt": "Describe the technical diagram",
83 | "imageBase64": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
84 | }
85 | ```
86 |
87 | ### 3. count_tokens
88 |
89 | Count tokens for cost estimation and planning.
90 |
91 | #### Parameters
92 |
93 | | Parameter | Type | Required | Default | Description |
94 | |-----------|------|----------|---------|-------------|
95 | | `text` | string | ✅ | - | Text to count tokens for |
96 | | `model` | string | ❌ | `gemini-2.5-flash` | Model to use for counting |
97 |
98 | #### Example Usage
99 |
100 | ```javascript
101 | {
102 | "text": "This is a sample text to count tokens for cost estimation.",
103 | "model": "gemini-2.5-pro"
104 | }
105 | ```
106 |
107 | ### 4. list_models
108 |
109 | List all available Gemini models and their capabilities.
110 |
111 | #### Parameters
112 |
113 | | Parameter | Type | Required | Default | Description |
114 | |-----------|------|----------|---------|-------------|
115 | | `filter` | string | ❌ | `all` | Filter models by capability |
116 |
117 | #### Filter Options
118 | - `all` - All available models
119 | - `thinking` - Models with thinking capabilities
120 | - `vision` - Models with vision support
121 | - `grounding` - Models with Google Search grounding
122 | - `json_mode` - Models supporting JSON mode
123 |
124 | #### Example Usage
125 |
126 | ```javascript
127 | // List all models
128 | {
129 | "filter": "all"
130 | }
131 |
132 | // List only thinking models
133 | {
134 | "filter": "thinking"
135 | }
136 | ```
137 |
138 | ### 5. embed_text
139 |
140 | Generate text embeddings using Gemini embedding models.
141 |
142 | #### Parameters
143 |
144 | | Parameter | Type | Required | Default | Description |
145 | |-----------|------|----------|---------|-------------|
146 | | `text` | string | ✅ | - | Text to generate embeddings for |
147 | | `model` | string | ❌ | `text-embedding-004` | Embedding model to use |
148 |
149 | #### Available Embedding Models
150 | - `text-embedding-004` - Latest embedding model
151 | - `text-multilingual-embedding-002` - Multilingual support
152 |
153 | #### Example Usage
154 |
155 | ```javascript
156 | {
157 | "text": "This is a sample text for embedding generation.",
158 | "model": "text-embedding-004"
159 | }
160 | ```
161 |
162 | ### 6. get_help
163 |
164 | Get help and usage information for the server.
165 |
166 | #### Parameters
167 |
168 | | Parameter | Type | Required | Default | Description |
169 | |-----------|------|----------|---------|-------------|
170 | | `topic` | string | ❌ | `overview` | Help topic to get information about |
171 |
172 | #### Available Topics
173 | - `overview` - General overview and quick start
174 | - `tools` - Detailed tool information
175 | - `models` - Model selection guide
176 | - `parameters` - Parameter explanations
177 | - `examples` - Usage examples
178 | - `quick-start` - Quick start guide
179 |
180 | #### Example Usage
181 |
182 | ```javascript
183 | // Get overview
184 | {
185 | "topic": "overview"
186 | }
187 |
188 | // Get tool details
189 | {
190 | "topic": "tools"
191 | }
192 | ```
193 |
194 | ## Response Format
195 |
196 | All tools return responses in the standard MCP format:
197 |
198 | ```javascript
199 | {
200 | "jsonrpc": "2.0",
201 | "id": "request-id",
202 | "result": {
203 | "content": [
204 | {
205 | "type": "text",
206 | "text": "Response content here"
207 | }
208 | ],
209 | "metadata": {
210 | // Additional metadata
211 | }
212 | }
213 | }
214 | ```
215 |
216 | ## Error Handling
217 |
218 | Errors are returned in standard MCP error format:
219 |
220 | ```javascript
221 | {
222 | "jsonrpc": "2.0",
223 | "id": "request-id",
224 | "error": {
225 | "code": -32603,
226 | "message": "Error description",
227 | "data": {
228 | // Additional error details
229 | }
230 | }
231 | }
232 | ```
233 |
234 | ### Common Error Codes
235 |
236 | | Code | Description |
237 | |------|-------------|
238 | | `-32602` | Invalid parameters |
239 | | `-32603` | Internal error |
240 | | `-32001` | Authentication error |
241 | | `-32002` | Rate limit exceeded |
242 | | `-32003` | Request timeout |
243 |
244 | ## Rate Limiting
245 |
246 | The server implements rate limiting to protect against abuse:
247 |
248 | - **Default**: 100 requests per minute
249 | - **Configurable**: Set via environment variables
250 | - **Per-client**: Rate limits are applied per client connection
251 |
252 | ## Best Practices
253 |
254 | ### Model Selection
255 | - Use `gemini-2.5-flash` for general purposes
256 | - Use `gemini-2.5-pro` for complex reasoning
257 | - Use `gemini-2.5-flash-lite` for high-throughput tasks
258 |
259 | ### Parameter Optimization
260 | - Lower temperature (0.1-0.3) for factual content
261 | - Higher temperature (0.8-1.2) for creative content
262 | - Use `maxTokens` to control response length and costs
263 |
264 | ### Error Handling
265 | - Implement retry logic for transient errors
266 | - Handle rate limiting gracefully
267 | - Validate parameters before sending requests
268 |
269 | ### Performance
270 | - Use conversation IDs to maintain context
271 | - Cache embeddings when possible
272 | - Monitor token usage for cost optimization
273 |
```
--------------------------------------------------------------------------------
/tests/unit/errors.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import {
2 | MCPError,
3 | ValidationError,
4 | AuthenticationError,
5 | RateLimitError,
6 | TimeoutError,
7 | GeminiAPIError,
8 | ErrorHandler,
9 | withRetry
10 | } from '../../src/utils/errors.js';
11 |
12 | describe('Error Classes', () => {
13 | describe('MCPError', () => {
14 | it('should create error with default code', () => {
15 | const error = new MCPError('Test error');
16 |
17 | expect(error.message).toBe('Test error');
18 | expect(error.code).toBe(-32603);
19 | expect(error.name).toBe('MCPError');
20 | });
21 |
22 | it('should create error with custom code and data', () => {
23 | const error = new MCPError('Test error', -32602, { field: 'value' });
24 |
25 | expect(error.code).toBe(-32602);
26 | expect(error.data).toEqual({ field: 'value' });
27 | });
28 |
29 | it('should convert to MCP response format', () => {
30 | const error = new MCPError('Test error', -32602, { field: 'value' });
31 | const response = error.toMCPResponse('test-id');
32 |
33 | expect(response).toEqual({
34 | jsonrpc: '2.0',
35 | id: 'test-id',
36 | error: {
37 | code: -32602,
38 | message: 'Test error',
39 | data: { field: 'value' }
40 | }
41 | });
42 | });
43 | });
44 |
45 | describe('ValidationError', () => {
46 | it('should create validation error with correct code', () => {
47 | const error = new ValidationError('Invalid parameter');
48 |
49 | expect(error.message).toBe('Invalid parameter');
50 | expect(error.code).toBe(-32602);
51 | expect(error.name).toBe('ValidationError');
52 | });
53 | });
54 |
55 | describe('AuthenticationError', () => {
56 | it('should create authentication error with default message', () => {
57 | const error = new AuthenticationError();
58 |
59 | expect(error.message).toBe('Invalid API key');
60 | expect(error.code).toBe(-32001);
61 | expect(error.name).toBe('AuthenticationError');
62 | });
63 |
64 | it('should create authentication error with custom message', () => {
65 | const error = new AuthenticationError('Custom auth error');
66 |
67 | expect(error.message).toBe('Custom auth error');
68 | });
69 | });
70 |
71 | describe('RateLimitError', () => {
72 | it('should create rate limit error with default message', () => {
73 | const error = new RateLimitError();
74 |
75 | expect(error.message).toBe('Rate limit exceeded');
76 | expect(error.code).toBe(-32002);
77 | expect(error.name).toBe('RateLimitError');
78 | });
79 | });
80 |
81 | describe('TimeoutError', () => {
82 | it('should create timeout error with default message', () => {
83 | const error = new TimeoutError();
84 |
85 | expect(error.message).toBe('Request timeout');
86 | expect(error.code).toBe(-32003);
87 | expect(error.name).toBe('TimeoutError');
88 | });
89 | });
90 |
91 | describe('GeminiAPIError', () => {
92 | it('should create Gemini API error', () => {
93 | const originalError = { status: 'INVALID_ARGUMENT' };
94 | const error = new GeminiAPIError('API error', originalError);
95 |
96 | expect(error.message).toBe('API error');
97 | expect(error.code).toBe(-32603);
98 | expect(error.name).toBe('GeminiAPIError');
99 | expect(error.originalError).toBe(originalError);
100 | expect(error.data).toBe(originalError);
101 | });
102 | });
103 | });
104 |
105 | describe('ErrorHandler', () => {
106 | describe('handleGeminiError', () => {
107 | it('should handle error with message', () => {
108 | const geminiError = {
109 | error: {
110 | message: 'API key not valid'
111 | }
112 | };
113 |
114 | const result = ErrorHandler.handleGeminiError(geminiError);
115 |
116 | expect(result).toBeInstanceOf(GeminiAPIError);
117 | expect(result.message).toBe('API key not valid');
118 | expect(result.originalError).toBe(geminiError.error);
119 | });
120 |
121 | it('should handle error with status', () => {
122 | const geminiError = {
123 | error: {
124 | status: 'UNAVAILABLE'
125 | }
126 | };
127 |
128 | const result = ErrorHandler.handleGeminiError(geminiError);
129 |
130 | expect(result.message).toBe('Gemini API error: UNAVAILABLE');
131 | });
132 |
133 | it('should handle unknown error format', () => {
134 | const geminiError = { unknown: 'format' };
135 |
136 | const result = ErrorHandler.handleGeminiError(geminiError);
137 |
138 | expect(result.message).toBe('Unknown Gemini API error');
139 | expect(result.originalError).toBe(geminiError);
140 | });
141 | });
142 |
143 | describe('isRetryableError', () => {
144 | it('should identify retryable Gemini API errors', () => {
145 | const retryableErrors = [
146 | new GeminiAPIError('Error', { status: 'UNAVAILABLE' }),
147 | new GeminiAPIError('Error', { status: 'RESOURCE_EXHAUSTED' }),
148 | new GeminiAPIError('Error', { status: 'INTERNAL' }),
149 | new GeminiAPIError('Error', { code: 503 }),
150 | new GeminiAPIError('Error', { code: 429 })
151 | ];
152 |
153 | retryableErrors.forEach(error => {
154 | expect(ErrorHandler.isRetryableError(error)).toBe(true);
155 | });
156 | });
157 |
158 | it('should identify non-retryable errors', () => {
159 | const nonRetryableErrors = [
160 | new GeminiAPIError('Error', { status: 'INVALID_ARGUMENT' }),
161 | new GeminiAPIError('Error', { code: 400 }),
162 | new ValidationError('Invalid input'),
163 | new Error('Generic error')
164 | ];
165 |
166 | nonRetryableErrors.forEach(error => {
167 | expect(ErrorHandler.isRetryableError(error)).toBe(false);
168 | });
169 | });
170 | });
171 |
172 | describe('getRetryDelay', () => {
173 | it('should calculate exponential backoff delays', () => {
174 | expect(ErrorHandler.getRetryDelay(0)).toBe(1000);
175 | expect(ErrorHandler.getRetryDelay(1)).toBe(2000);
176 | expect(ErrorHandler.getRetryDelay(2)).toBe(4000);
177 | expect(ErrorHandler.getRetryDelay(3)).toBe(8000);
178 | expect(ErrorHandler.getRetryDelay(4)).toBe(16000);
179 | });
180 |
181 | it('should cap delay at maximum', () => {
182 | expect(ErrorHandler.getRetryDelay(10)).toBe(16000);
183 | });
184 | });
185 | });
186 |
187 | describe('withRetry', () => {
188 | beforeEach(() => {
189 | jest.clearAllMocks();
190 | });
191 |
192 | it('should succeed on first attempt', async () => {
193 | const operation = jest.fn().mockResolvedValue('success');
194 |
195 | const result = await withRetry(operation, 3);
196 |
197 | expect(result).toBe('success');
198 | expect(operation).toHaveBeenCalledTimes(1);
199 | });
200 |
201 | it('should retry on retryable error', async () => {
202 | const retryableError = new GeminiAPIError('Error', { status: 'UNAVAILABLE' });
203 | const operation = jest.fn()
204 | .mockRejectedValueOnce(retryableError)
205 | .mockRejectedValueOnce(retryableError)
206 | .mockResolvedValue('success');
207 |
208 | const result = await withRetry(operation, 3);
209 |
210 | expect(result).toBe('success');
211 | expect(operation).toHaveBeenCalledTimes(3);
212 | });
213 |
214 | it('should not retry on non-retryable error', async () => {
215 | const nonRetryableError = new ValidationError('Invalid input');
216 | const operation = jest.fn().mockRejectedValue(nonRetryableError);
217 |
218 | await expect(withRetry(operation, 3)).rejects.toThrow(ValidationError);
219 | expect(operation).toHaveBeenCalledTimes(1);
220 | });
221 |
222 | it('should throw last error after max attempts', async () => {
223 | const retryableError = new GeminiAPIError('Error', { status: 'UNAVAILABLE' });
224 | const operation = jest.fn().mockRejectedValue(retryableError);
225 |
226 | await expect(withRetry(operation, 3)).rejects.toThrow(GeminiAPIError);
227 | expect(operation).toHaveBeenCalledTimes(3);
228 | });
229 |
230 | it('should wait between retries', async () => {
231 | const retryableError = new GeminiAPIError('Error', { status: 'UNAVAILABLE' });
232 | const operation = jest.fn()
233 | .mockRejectedValueOnce(retryableError)
234 | .mockResolvedValue('success');
235 |
236 | const startTime = Date.now();
237 | await withRetry(operation, 3, 100);
238 | const endTime = Date.now();
239 |
240 | expect(endTime - startTime).toBeGreaterThanOrEqual(100);
241 | expect(operation).toHaveBeenCalledTimes(2);
242 | });
243 | });
244 |
```
--------------------------------------------------------------------------------
/tests/unit/validation.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Validator, ToolSchemas, CommonSchemas } from '../../src/utils/validation.js';
2 | import { ValidationError } from '../../src/utils/errors.js';
3 |
4 | describe('Validation', () => {
5 | describe('CommonSchemas', () => {
6 | describe('geminiModel', () => {
7 | it('should accept valid Gemini models', () => {
8 | const validModels = [
9 | 'gemini-2.5-pro',
10 | 'gemini-2.5-flash',
11 | 'gemini-2.5-flash-lite',
12 | 'gemini-2.0-flash',
13 | 'gemini-1.5-pro'
14 | ];
15 |
16 | validModels.forEach(model => {
17 | expect(() => CommonSchemas.geminiModel.parse(model)).not.toThrow();
18 | });
19 | });
20 |
21 | it('should reject invalid models', () => {
22 | const invalidModels = ['gpt-4', 'claude-3', 'invalid-model'];
23 |
24 | invalidModels.forEach(model => {
25 | expect(() => CommonSchemas.geminiModel.parse(model)).toThrow();
26 | });
27 | });
28 | });
29 |
30 | describe('temperature', () => {
31 | it('should accept valid temperature values', () => {
32 | const validTemperatures = [0, 0.5, 1.0, 1.5, 2.0];
33 |
34 | validTemperatures.forEach(temp => {
35 | expect(() => CommonSchemas.temperature.parse(temp)).not.toThrow();
36 | });
37 | });
38 |
39 | it('should reject invalid temperature values', () => {
40 | const invalidTemperatures = [-0.1, 2.1, 'not-a-number'];
41 |
42 | invalidTemperatures.forEach(temp => {
43 | expect(() => CommonSchemas.temperature.parse(temp)).toThrow();
44 | });
45 | });
46 | });
47 |
48 | describe('jsonSchema', () => {
49 | it('should accept valid JSON strings', () => {
50 | const validSchemas = [
51 | '{"type": "object"}',
52 | '{"type": "string", "enum": ["a", "b"]}',
53 | '[]'
54 | ];
55 |
56 | validSchemas.forEach(schema => {
57 | expect(() => CommonSchemas.jsonSchema.parse(schema)).not.toThrow();
58 | });
59 | });
60 |
61 | it('should reject invalid JSON strings', () => {
62 | const invalidSchemas = [
63 | '{invalid json}',
64 | 'not json at all',
65 | '{"unclosed": '
66 | ];
67 |
68 | invalidSchemas.forEach(schema => {
69 | expect(() => CommonSchemas.jsonSchema.parse(schema)).toThrow();
70 | });
71 | });
72 | });
73 | });
74 |
75 | describe('ToolSchemas', () => {
76 | describe('generateText', () => {
77 | it('should accept valid parameters', () => {
78 | const validParams = {
79 | prompt: 'Test prompt',
80 | model: 'gemini-2.5-flash',
81 | temperature: 0.7,
82 | maxTokens: 1000
83 | };
84 |
85 | expect(() => ToolSchemas.generateText.parse(validParams)).not.toThrow();
86 | });
87 |
88 | it('should require prompt', () => {
89 | const invalidParams = {
90 | model: 'gemini-2.5-flash'
91 | };
92 |
93 | expect(() => ToolSchemas.generateText.parse(invalidParams)).toThrow();
94 | });
95 |
96 | it('should reject empty prompt', () => {
97 | const invalidParams = {
98 | prompt: ''
99 | };
100 |
101 | expect(() => ToolSchemas.generateText.parse(invalidParams)).toThrow();
102 | });
103 | });
104 |
105 | describe('analyzeImage', () => {
106 | it('should accept valid parameters with imageUrl', () => {
107 | const validParams = {
108 | prompt: 'What is in this image?',
109 | imageUrl: 'https://example.com/image.jpg'
110 | };
111 |
112 | expect(() => ToolSchemas.analyzeImage.parse(validParams)).not.toThrow();
113 | });
114 |
115 | it('should accept valid parameters with imageBase64', () => {
116 | const validParams = {
117 | prompt: 'What is in this image?',
118 | imageBase64: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=='
119 | };
120 |
121 | expect(() => ToolSchemas.analyzeImage.parse(validParams)).not.toThrow();
122 | });
123 |
124 | it('should require either imageUrl or imageBase64', () => {
125 | const invalidParams = {
126 | prompt: 'What is in this image?'
127 | };
128 |
129 | expect(() => ToolSchemas.analyzeImage.parse(invalidParams)).toThrow();
130 | });
131 |
132 | it('should reject invalid imageUrl', () => {
133 | const invalidParams = {
134 | prompt: 'What is in this image?',
135 | imageUrl: 'not-a-url'
136 | };
137 |
138 | expect(() => ToolSchemas.analyzeImage.parse(invalidParams)).toThrow();
139 | });
140 | });
141 | });
142 |
143 | describe('Validator', () => {
144 | describe('validateToolParams', () => {
145 | it('should validate and return parsed parameters', () => {
146 | const params = {
147 | prompt: 'Test prompt',
148 | temperature: 0.7
149 | };
150 |
151 | const result = Validator.validateToolParams(ToolSchemas.generateText, params);
152 |
153 | expect(result.prompt).toBe('Test prompt');
154 | expect(result.temperature).toBe(0.7);
155 | });
156 |
157 | it('should throw ValidationError for invalid parameters', () => {
158 | const params = {
159 | prompt: '',
160 | temperature: 3.0
161 | };
162 |
163 | expect(() => {
164 | Validator.validateToolParams(ToolSchemas.generateText, params);
165 | }).toThrow(ValidationError);
166 | });
167 | });
168 |
169 | describe('sanitizeString', () => {
170 | it('should return clean string unchanged', () => {
171 | const input = 'This is a clean string with\nnewlines and\ttabs.';
172 | const result = Validator.sanitizeString(input);
173 |
174 | expect(result).toBe(input);
175 | });
176 |
177 | it('should remove control characters', () => {
178 | const input = 'String with\x00null\x01control\x1fcharacters';
179 | const result = Validator.sanitizeString(input);
180 |
181 | expect(result).toBe('String withnullcontrolcharacters');
182 | });
183 |
184 | it('should enforce maximum length', () => {
185 | const input = 'a'.repeat(100);
186 |
187 | expect(() => {
188 | Validator.sanitizeString(input, 50);
189 | }).toThrow(ValidationError);
190 | });
191 |
192 | it('should throw error for non-string input', () => {
193 | expect(() => {
194 | Validator.sanitizeString(123 as any);
195 | }).toThrow(ValidationError);
196 | });
197 | });
198 |
199 | describe('validateJSON', () => {
200 | it('should parse valid JSON', () => {
201 | const jsonString = '{"key": "value", "number": 42}';
202 | const result = Validator.validateJSON(jsonString);
203 |
204 | expect(result).toEqual({ key: 'value', number: 42 });
205 | });
206 |
207 | it('should throw ValidationError for invalid JSON', () => {
208 | const invalidJson = '{invalid json}';
209 |
210 | expect(() => {
211 | Validator.validateJSON(invalidJson);
212 | }).toThrow(ValidationError);
213 | });
214 | });
215 |
216 | describe('validateMCPRequest', () => {
217 | it('should accept valid MCP request', () => {
218 | const validRequest = {
219 | jsonrpc: '2.0',
220 | id: 1,
221 | method: 'tools/list'
222 | };
223 |
224 | expect(() => {
225 | Validator.validateMCPRequest(validRequest);
226 | }).not.toThrow();
227 | });
228 |
229 | it('should reject request without jsonrpc', () => {
230 | const invalidRequest = {
231 | id: 1,
232 | method: 'tools/list'
233 | };
234 |
235 | expect(() => {
236 | Validator.validateMCPRequest(invalidRequest);
237 | }).toThrow(ValidationError);
238 | });
239 |
240 | it('should reject request with wrong jsonrpc version', () => {
241 | const invalidRequest = {
242 | jsonrpc: '1.0',
243 | id: 1,
244 | method: 'tools/list'
245 | };
246 |
247 | expect(() => {
248 | Validator.validateMCPRequest(invalidRequest);
249 | }).toThrow(ValidationError);
250 | });
251 |
252 | it('should reject request without method', () => {
253 | const invalidRequest = {
254 | jsonrpc: '2.0',
255 | id: 1
256 | };
257 |
258 | expect(() => {
259 | Validator.validateMCPRequest(invalidRequest);
260 | }).toThrow(ValidationError);
261 | });
262 |
263 | it('should reject request without id', () => {
264 | const invalidRequest = {
265 | jsonrpc: '2.0',
266 | method: 'tools/list'
267 | };
268 |
269 | expect(() => {
270 | Validator.validateMCPRequest(invalidRequest);
271 | }).toThrow(ValidationError);
272 | });
273 | });
274 | });
275 | });
276 |
```
--------------------------------------------------------------------------------
/src/enhanced-stdio-server.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env node
2 | import { GoogleGenAI } from '@google/genai';
3 | import { createInterface } from 'readline';
4 | import { MCPRequest, MCPResponse } from './types.js';
5 | import { config } from './config/index.js';
6 | import { logger } from './utils/logger.js';
7 | import { rateLimiter } from './utils/rateLimiter.js';
8 | import { MCPError, ValidationError } from './utils/errors.js';
9 | import { Validator, ToolSchemas } from './utils/validation.js';
10 |
11 | // Increase max buffer size for large images (10MB)
12 | if (process.stdin.setEncoding) {
13 | process.stdin.setEncoding('utf8');
14 | }
15 |
16 | // Available Gemini models as of July 2025
17 | const GEMINI_MODELS = {
18 | // Thinking models (2.5 series) - latest and most capable
19 | 'gemini-2.5-pro': {
20 | description: 'Most capable thinking model, best for complex reasoning and coding',
21 | features: ['thinking', 'function_calling', 'json_mode', 'grounding', 'system_instructions'],
22 | contextWindow: 2000000, // 2M tokens
23 | thinking: true
24 | },
25 | 'gemini-2.5-flash': {
26 | description: 'Fast thinking model with best price/performance ratio',
27 | features: ['thinking', 'function_calling', 'json_mode', 'grounding', 'system_instructions'],
28 | contextWindow: 1000000, // 1M tokens
29 | thinking: true
30 | },
31 | 'gemini-2.5-flash-lite': {
32 | description: 'Ultra-fast, cost-efficient thinking model for high-throughput tasks',
33 | features: ['thinking', 'function_calling', 'json_mode', 'system_instructions'],
34 | contextWindow: 1000000,
35 | thinking: true
36 | },
37 |
38 | // 2.0 series
39 | 'gemini-2.0-flash': {
40 | description: 'Fast, efficient model with 1M context window',
41 | features: ['function_calling', 'json_mode', 'grounding', 'system_instructions'],
42 | contextWindow: 1000000
43 | },
44 | 'gemini-2.0-flash-lite': {
45 | description: 'Most cost-efficient model for simple tasks',
46 | features: ['function_calling', 'json_mode', 'system_instructions'],
47 | contextWindow: 1000000
48 | },
49 | 'gemini-2.0-pro-experimental': {
50 | description: 'Experimental model with 2M context, excellent for coding',
51 | features: ['function_calling', 'json_mode', 'grounding', 'system_instructions'],
52 | contextWindow: 2000000
53 | },
54 |
55 | // Legacy models (for compatibility)
56 | 'gemini-1.5-pro': {
57 | description: 'Previous generation pro model',
58 | features: ['function_calling', 'json_mode', 'system_instructions'],
59 | contextWindow: 2000000
60 | },
61 | 'gemini-1.5-flash': {
62 | description: 'Previous generation fast model',
63 | features: ['function_calling', 'json_mode', 'system_instructions'],
64 | contextWindow: 1000000
65 | }
66 | };
67 |
68 | class EnhancedStdioMCPServer {
69 | private genAI: GoogleGenAI;
70 | private conversations: Map<string, any[]> = new Map();
71 |
72 | constructor(apiKey: string) {
73 | logger.startup('Connecting to Google Gemini API...');
74 | this.genAI = new GoogleGenAI({ apiKey });
75 | logger.success('Gemini API client initialized');
76 | this.setupStdioInterface();
77 | logger.success('Stdio interface configured');
78 | }
79 |
80 | private setupStdioInterface() {
81 | const rl = createInterface({
82 | input: process.stdin,
83 | output: process.stdout,
84 | terminal: false,
85 | // Increase max line length for large image data
86 | crlfDelay: Infinity
87 | });
88 |
89 | rl.on('line', line => {
90 | if (line.trim()) {
91 | try {
92 | const request: MCPRequest = JSON.parse(line);
93 |
94 | // Validate MCP request structure
95 | Validator.validateMCPRequest(request);
96 |
97 | // Apply rate limiting
98 | rateLimiter.checkLimit();
99 |
100 | logger.request(`Received request: ${request.method} (ID: ${request.id})`);
101 | this.handleRequest(request);
102 | } catch (error) {
103 | logger.error('Failed to parse or validate message:', error);
104 |
105 | // Send error response if we can determine the request ID
106 | try {
107 | const partialRequest = JSON.parse(line);
108 | if (partialRequest.id) {
109 | const errorResponse =
110 | error instanceof MCPError
111 | ? error.toMCPResponse(partialRequest.id)
112 | : new MCPError('Invalid request format').toMCPResponse(partialRequest.id);
113 | this.sendResponse(errorResponse);
114 | }
115 | } catch {
116 | // If we can't parse at all, just log the error
117 | }
118 | }
119 | }
120 | });
121 |
122 | process.stdin.on('error', err => {
123 | console.error('stdin error:', err);
124 | });
125 | }
126 |
127 | private async handleRequest(request: MCPRequest) {
128 | console.error('Handling request:', request.method);
129 | try {
130 | let response: MCPResponse;
131 |
132 | switch (request.method) {
133 | case 'initialize':
134 | response = {
135 | jsonrpc: '2.0',
136 | id: request.id,
137 | result: {
138 | protocolVersion: '2024-11-05',
139 | serverInfo: {
140 | name: 'mcp-server-gemini-enhanced',
141 | version: '4.1.0'
142 | },
143 | capabilities: {
144 | tools: {},
145 | resources: {},
146 | prompts: {}
147 | }
148 | }
149 | };
150 | break;
151 |
152 | case 'tools/list':
153 | response = {
154 | jsonrpc: '2.0',
155 | id: request.id,
156 | result: {
157 | tools: this.getAvailableTools()
158 | }
159 | };
160 | break;
161 |
162 | case 'tools/call':
163 | response = await this.handleToolCall(request);
164 | break;
165 |
166 | case 'resources/list':
167 | response = {
168 | jsonrpc: '2.0',
169 | id: request.id,
170 | result: {
171 | resources: this.getAvailableResources()
172 | }
173 | };
174 | break;
175 |
176 | case 'resources/read':
177 | response = await this.handleResourceRead(request);
178 | break;
179 |
180 | case 'prompts/list':
181 | response = {
182 | jsonrpc: '2.0',
183 | id: request.id,
184 | result: {
185 | prompts: this.getAvailablePrompts()
186 | }
187 | };
188 | break;
189 |
190 | default:
191 | if (!('id' in request)) {
192 | console.error(`Notification received: ${(request as any).method}`);
193 | return;
194 | }
195 |
196 | response = {
197 | jsonrpc: '2.0',
198 | id: request.id,
199 | error: {
200 | code: -32601,
201 | message: 'Method not found'
202 | }
203 | };
204 | }
205 |
206 | this.sendResponse(response);
207 | } catch (error) {
208 | const errorResponse: MCPResponse = {
209 | jsonrpc: '2.0',
210 | id: request.id,
211 | error: {
212 | code: -32603,
213 | message: error instanceof Error ? error.message : 'Internal error'
214 | }
215 | };
216 | this.sendResponse(errorResponse);
217 | }
218 | }
219 |
220 | private getAvailableTools() {
221 | return [
222 | {
223 | name: 'generate_text',
224 | description: 'Generate text using Google Gemini with advanced features',
225 | inputSchema: {
226 | type: 'object',
227 | properties: {
228 | prompt: {
229 | type: 'string',
230 | description: 'The prompt to send to Gemini'
231 | },
232 | model: {
233 | type: 'string',
234 | description: 'Specific Gemini model to use',
235 | enum: Object.keys(GEMINI_MODELS),
236 | default: 'gemini-2.5-flash'
237 | },
238 | systemInstruction: {
239 | type: 'string',
240 | description: 'System instruction to guide model behavior'
241 | },
242 | temperature: {
243 | type: 'number',
244 | description: 'Temperature for generation (0-2)',
245 | default: 0.7,
246 | minimum: 0,
247 | maximum: 2
248 | },
249 | maxTokens: {
250 | type: 'number',
251 | description: 'Maximum tokens to generate',
252 | default: 2048
253 | },
254 | topK: {
255 | type: 'number',
256 | description: 'Top-k sampling parameter',
257 | default: 40
258 | },
259 | topP: {
260 | type: 'number',
261 | description: 'Top-p (nucleus) sampling parameter',
262 | default: 0.95
263 | },
264 | jsonMode: {
265 | type: 'boolean',
266 | description: 'Enable JSON mode for structured output',
267 | default: false
268 | },
269 | jsonSchema: {
270 | type: 'string',
271 | description: 'JSON schema as a string for structured output (when jsonMode is true)'
272 | },
273 | grounding: {
274 | type: 'boolean',
275 | description: 'Enable Google Search grounding for up-to-date information',
276 | default: false
277 | },
278 | safetySettings: {
279 | type: 'string',
280 | description: 'Safety settings as JSON string for content filtering'
281 | },
282 | conversationId: {
283 | type: 'string',
284 | description: 'ID for maintaining conversation context'
285 | }
286 | },
287 | required: ['prompt']
288 | }
289 | },
290 | {
291 | name: 'analyze_image',
292 | description: 'Analyze images using Gemini vision capabilities',
293 | inputSchema: {
294 | type: 'object',
295 | properties: {
296 | prompt: {
297 | type: 'string',
298 | description: 'Question or instruction about the image'
299 | },
300 | imageUrl: {
301 | type: 'string',
302 | description: 'URL of the image to analyze'
303 | },
304 | imageBase64: {
305 | type: 'string',
306 | description: 'Base64-encoded image data (alternative to URL)'
307 | },
308 | model: {
309 | type: 'string',
310 | description: 'Vision-capable Gemini model',
311 | enum: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash'],
312 | default: 'gemini-2.5-flash'
313 | }
314 | },
315 | required: ['prompt']
316 | }
317 | },
318 | {
319 | name: 'count_tokens',
320 | description: 'Count tokens for a given text with a specific model',
321 | inputSchema: {
322 | type: 'object',
323 | properties: {
324 | text: {
325 | type: 'string',
326 | description: 'Text to count tokens for'
327 | },
328 | model: {
329 | type: 'string',
330 | description: 'Model to use for token counting',
331 | enum: Object.keys(GEMINI_MODELS),
332 | default: 'gemini-2.5-flash'
333 | }
334 | },
335 | required: ['text']
336 | }
337 | },
338 | {
339 | name: 'list_models',
340 | description: 'List all available Gemini models and their capabilities',
341 | inputSchema: {
342 | type: 'object',
343 | properties: {
344 | filter: {
345 | type: 'string',
346 | description: 'Filter models by capability',
347 | enum: ['all', 'thinking', 'vision', 'grounding', 'json_mode']
348 | }
349 | }
350 | }
351 | },
352 | {
353 | name: 'embed_text',
354 | description: 'Generate embeddings for text using Gemini embedding models',
355 | inputSchema: {
356 | type: 'object',
357 | properties: {
358 | text: {
359 | type: 'string',
360 | description: 'Text to generate embeddings for'
361 | },
362 | model: {
363 | type: 'string',
364 | description: 'Embedding model to use',
365 | enum: ['text-embedding-004', 'text-multilingual-embedding-002'],
366 | default: 'text-embedding-004'
367 | }
368 | },
369 | required: ['text']
370 | }
371 | },
372 | {
373 | name: 'get_help',
374 | description: 'Get help and usage information for the Gemini MCP server',
375 | inputSchema: {
376 | type: 'object',
377 | properties: {
378 | topic: {
379 | type: 'string',
380 | description: 'Help topic to get information about',
381 | enum: ['overview', 'tools', 'models', 'parameters', 'examples', 'quick-start'],
382 | default: 'overview'
383 | }
384 | }
385 | }
386 | }
387 | ];
388 | }
389 |
390 | private getAvailableResources() {
391 | return [
392 | {
393 | uri: 'gemini://models',
394 | name: 'Available Gemini Models',
395 | description: 'List of all available Gemini models and their capabilities',
396 | mimeType: 'application/json'
397 | },
398 | {
399 | uri: 'gemini://capabilities',
400 | name: 'API Capabilities',
401 | description: 'Detailed information about Gemini API capabilities',
402 | mimeType: 'text/markdown'
403 | },
404 | {
405 | uri: 'gemini://help/usage',
406 | name: 'Usage Guide',
407 | description: 'Complete guide on using all tools and features',
408 | mimeType: 'text/markdown'
409 | },
410 | {
411 | uri: 'gemini://help/parameters',
412 | name: 'Parameters Reference',
413 | description: 'Detailed documentation of all parameters',
414 | mimeType: 'text/markdown'
415 | },
416 | {
417 | uri: 'gemini://help/examples',
418 | name: 'Examples',
419 | description: 'Example usage patterns for common tasks',
420 | mimeType: 'text/markdown'
421 | }
422 | ];
423 | }
424 |
425 | private getAvailablePrompts() {
426 | return [
427 | {
428 | name: 'code_review',
429 | description: 'Comprehensive code review with Gemini 2.5 Pro',
430 | arguments: [
431 | {
432 | name: 'code',
433 | description: 'Code to review',
434 | required: true
435 | },
436 | {
437 | name: 'language',
438 | description: 'Programming language',
439 | required: false
440 | }
441 | ]
442 | },
443 | {
444 | name: 'explain_with_thinking',
445 | description: 'Deep explanation using Gemini 2.5 thinking capabilities',
446 | arguments: [
447 | {
448 | name: 'topic',
449 | description: 'Topic to explain',
450 | required: true
451 | },
452 | {
453 | name: 'level',
454 | description: 'Explanation level (beginner/intermediate/expert)',
455 | required: false
456 | }
457 | ]
458 | },
459 | {
460 | name: 'creative_writing',
461 | description: 'Creative writing with style control',
462 | arguments: [
463 | {
464 | name: 'prompt',
465 | description: 'Writing prompt',
466 | required: true
467 | },
468 | {
469 | name: 'style',
470 | description: 'Writing style',
471 | required: false
472 | },
473 | {
474 | name: 'length',
475 | description: 'Desired length',
476 | required: false
477 | }
478 | ]
479 | }
480 | ];
481 | }
482 |
483 | private async handleToolCall(request: MCPRequest): Promise<MCPResponse> {
484 | const { name, arguments: args } = request.params || {};
485 |
486 | switch (name) {
487 | case 'generate_text':
488 | return await this.generateText(request.id, args);
489 |
490 | case 'analyze_image':
491 | return await this.analyzeImage(request.id, args);
492 |
493 | case 'count_tokens':
494 | return await this.countTokens(request.id, args);
495 |
496 | case 'list_models':
497 | return this.listModels(request.id, args);
498 |
499 | case 'embed_text':
500 | return await this.embedText(request.id, args);
501 |
502 | case 'get_help':
503 | return this.getHelp(request.id, args);
504 |
505 | default:
506 | return {
507 | jsonrpc: '2.0',
508 | id: request.id,
509 | error: {
510 | code: -32601,
511 | message: `Unknown tool: ${name}`
512 | }
513 | };
514 | }
515 | }
516 |
517 | private async generateText(id: any, args: any): Promise<MCPResponse> {
518 | try {
519 | // Validate parameters
520 | const validatedArgs = Validator.validateToolParams(ToolSchemas.generateText, args);
521 |
522 | const model = validatedArgs.model || 'gemini-2.5-flash';
523 | logger.api(`Generating text with model: ${model}`);
524 | const modelInfo = GEMINI_MODELS[model as keyof typeof GEMINI_MODELS];
525 |
526 | if (!modelInfo) {
527 | throw new Error(`Unknown model: ${model}`);
528 | }
529 |
530 | // Build generation config
531 | const generationConfig: any = {
532 | temperature: validatedArgs.temperature || 0.7,
533 | maxOutputTokens: validatedArgs.maxTokens || 2048,
534 | topK: validatedArgs.topK || 40,
535 | topP: validatedArgs.topP || 0.95
536 | };
537 |
538 | // Add JSON mode if requested
539 | if (validatedArgs.jsonMode) {
540 | generationConfig.responseMimeType = 'application/json';
541 | if (validatedArgs.jsonSchema) {
542 | try {
543 | generationConfig.responseSchema = Validator.validateJSON(validatedArgs.jsonSchema);
544 | } catch (error) {
545 | logger.error('Invalid JSON schema provided:', error);
546 | throw new ValidationError('Invalid JSON schema format');
547 | }
548 | }
549 | }
550 |
551 | // Build the request
552 | const requestBody: any = {
553 | model,
554 | contents: [
555 | {
556 | parts: [
557 | {
558 | text: Validator.sanitizeString(validatedArgs.prompt)
559 | }
560 | ],
561 | role: 'user'
562 | }
563 | ],
564 | generationConfig
565 | };
566 |
567 | // Add system instruction if provided
568 | if (validatedArgs.systemInstruction) {
569 | requestBody.systemInstruction = {
570 | parts: [
571 | {
572 | text: Validator.sanitizeString(validatedArgs.systemInstruction)
573 | }
574 | ]
575 | };
576 | }
577 |
578 | // Add safety settings if provided
579 | if (args.safetySettings) {
580 | try {
581 | requestBody.safetySettings =
582 | typeof args.safetySettings === 'string'
583 | ? JSON.parse(args.safetySettings)
584 | : args.safetySettings;
585 | } catch (error) {
586 | console.error('Invalid safety settings JSON provided:', error);
587 | }
588 | }
589 |
590 | // Add grounding if requested and supported
591 | if (args.grounding && modelInfo.features.includes('grounding')) {
592 | requestBody.tools = [
593 | {
594 | googleSearch: {}
595 | }
596 | ];
597 | }
598 |
599 | // Handle conversation context
600 | if (args.conversationId) {
601 | const history = this.conversations.get(args.conversationId) || [];
602 | if (history.length > 0) {
603 | requestBody.contents = [...history, ...requestBody.contents];
604 | }
605 | }
606 |
607 | // Call the API using the new SDK format
608 | const result = await this.genAI.models.generateContent({
609 | model,
610 | ...requestBody
611 | });
612 | const text = result.text || '';
613 |
614 | // Update conversation history if needed
615 | if (args.conversationId) {
616 | const history = this.conversations.get(args.conversationId) || [];
617 | history.push(...requestBody.contents);
618 | history.push({
619 | parts: [
620 | {
621 | text
622 | }
623 | ],
624 | role: 'model'
625 | });
626 | this.conversations.set(args.conversationId, history);
627 | }
628 |
629 | return {
630 | jsonrpc: '2.0',
631 | id,
632 | result: {
633 | content: [
634 | {
635 | type: 'text',
636 | text
637 | }
638 | ],
639 | metadata: {
640 | model,
641 | tokensUsed: result.usageMetadata?.totalTokenCount,
642 | candidatesCount: result.candidates?.length || 1,
643 | finishReason: result.candidates?.[0]?.finishReason
644 | }
645 | }
646 | };
647 | } catch (error) {
648 | console.error('Error in generateText:', error);
649 | return {
650 | jsonrpc: '2.0',
651 | id,
652 | error: {
653 | code: -32603,
654 | message: error instanceof Error ? error.message : 'Internal error'
655 | }
656 | };
657 | }
658 | }
659 |
660 | private async analyzeImage(id: any, args: any): Promise<MCPResponse> {
661 | try {
662 | const model = args.model || 'gemini-2.5-flash';
663 |
664 | // Validate inputs
665 | if (!args.imageUrl && !args.imageBase64) {
666 | throw new Error('Either imageUrl or imageBase64 must be provided');
667 | }
668 |
669 | // Prepare image part
670 | let imagePart: any;
671 | if (args.imageUrl) {
672 | // For URL, we'd need to fetch and convert to base64
673 | // For now, we'll just pass the URL as instruction
674 | imagePart = {
675 | text: `[Image URL: ${args.imageUrl}]`
676 | };
677 | } else if (args.imageBase64) {
678 | // Log base64 data size for debugging
679 | console.error(`Image base64 length: ${args.imageBase64.length}`);
680 |
681 | // Extract MIME type and data
682 | const matches = args.imageBase64.match(/^data:(.+);base64,(.+)$/);
683 | if (matches) {
684 | console.error(`MIME type: ${matches[1]}, Data length: ${matches[2].length}`);
685 | imagePart = {
686 | inlineData: {
687 | mimeType: matches[1],
688 | data: matches[2]
689 | }
690 | };
691 | } else {
692 | // If no data URI format, assume raw base64
693 | console.error('Raw base64 data detected');
694 | imagePart = {
695 | inlineData: {
696 | mimeType: 'image/jpeg',
697 | data: args.imageBase64
698 | }
699 | };
700 | }
701 | }
702 |
703 | const result = await this.genAI.models.generateContent({
704 | model,
705 | contents: [
706 | {
707 | parts: [{ text: args.prompt }, imagePart],
708 | role: 'user'
709 | }
710 | ]
711 | });
712 |
713 | const text = result.text || '';
714 |
715 | return {
716 | jsonrpc: '2.0',
717 | id,
718 | result: {
719 | content: [
720 | {
721 | type: 'text',
722 | text
723 | }
724 | ]
725 | }
726 | };
727 | } catch (error) {
728 | console.error('Error in analyzeImage:', error);
729 | return {
730 | jsonrpc: '2.0',
731 | id,
732 | error: {
733 | code: -32603,
734 | message: `Image analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`
735 | }
736 | };
737 | }
738 | }
739 |
740 | private async countTokens(id: any, args: any): Promise<MCPResponse> {
741 | try {
742 | const model = args.model || 'gemini-2.5-flash';
743 |
744 | const result = await this.genAI.models.countTokens({
745 | model,
746 | contents: [
747 | {
748 | parts: [
749 | {
750 | text: args.text
751 | }
752 | ],
753 | role: 'user'
754 | }
755 | ]
756 | });
757 |
758 | return {
759 | jsonrpc: '2.0',
760 | id,
761 | result: {
762 | content: [
763 | {
764 | type: 'text',
765 | text: `Token count: ${result.totalTokens}`
766 | }
767 | ],
768 | metadata: {
769 | tokenCount: result.totalTokens,
770 | model
771 | }
772 | }
773 | };
774 | } catch (error) {
775 | return {
776 | jsonrpc: '2.0',
777 | id,
778 | error: {
779 | code: -32603,
780 | message: error instanceof Error ? error.message : 'Internal error'
781 | }
782 | };
783 | }
784 | }
785 |
786 | private listModels(id: any, args: any): MCPResponse {
787 | const filter = args?.filter || 'all';
788 | let models = Object.entries(GEMINI_MODELS);
789 |
790 | if (filter !== 'all') {
791 | models = models.filter(([_, info]) => {
792 | switch (filter) {
793 | case 'thinking':
794 | return 'thinking' in info && info.thinking === true;
795 | case 'vision':
796 | return info.features.includes('function_calling'); // All current models support vision
797 | case 'grounding':
798 | return info.features.includes('grounding');
799 | case 'json_mode':
800 | return info.features.includes('json_mode');
801 | default:
802 | return true;
803 | }
804 | });
805 | }
806 |
807 | const modelList = models.map(([name, info]) => ({
808 | name,
809 | ...info
810 | }));
811 |
812 | return {
813 | jsonrpc: '2.0',
814 | id,
815 | result: {
816 | content: [
817 | {
818 | type: 'text',
819 | text: JSON.stringify(modelList, null, 2)
820 | }
821 | ],
822 | metadata: {
823 | count: modelList.length,
824 | filter
825 | }
826 | }
827 | };
828 | }
829 |
830 | private async embedText(id: any, args: any): Promise<MCPResponse> {
831 | try {
832 | const model = args.model || 'text-embedding-004';
833 |
834 | const result = await this.genAI.models.embedContent({
835 | model,
836 | contents: args.text
837 | });
838 |
839 | return {
840 | jsonrpc: '2.0',
841 | id,
842 | result: {
843 | content: [
844 | {
845 | type: 'text',
846 | text: JSON.stringify({
847 | embedding: result.embeddings?.[0]?.values || [],
848 | model
849 | })
850 | }
851 | ],
852 | metadata: {
853 | model,
854 | dimensions: result.embeddings?.[0]?.values?.length || 0
855 | }
856 | }
857 | };
858 | } catch (error) {
859 | return {
860 | jsonrpc: '2.0',
861 | id,
862 | error: {
863 | code: -32603,
864 | message: error instanceof Error ? error.message : 'Internal error'
865 | }
866 | };
867 | }
868 | }
869 |
870 | private async handleResourceRead(request: MCPRequest): Promise<MCPResponse> {
871 | const uri = request.params?.uri;
872 |
873 | if (!uri) {
874 | return {
875 | jsonrpc: '2.0',
876 | id: request.id,
877 | error: {
878 | code: -32602,
879 | message: 'Missing required parameter: uri'
880 | }
881 | };
882 | }
883 |
884 | let content = '';
885 | let mimeType = 'text/plain';
886 |
887 | switch (uri) {
888 | case 'gemini://models':
889 | content = JSON.stringify(GEMINI_MODELS, null, 2);
890 | mimeType = 'application/json';
891 | break;
892 |
893 | case 'gemini://capabilities':
894 | content = `# Gemini API Capabilities
895 |
896 | ## Text Generation
897 | - All models support advanced text generation
898 | - System instructions for behavior control
899 | - Temperature, topK, topP for output control
900 | - Token limits vary by model (1M-2M)
901 |
902 | ## Thinking Models (2.5 Series)
903 | - Step-by-step reasoning before responding
904 | - Better accuracy for complex problems
905 | - Ideal for coding, analysis, and problem-solving
906 |
907 | ## JSON Mode
908 | - Structured output with schema validation
909 | - Available on all models
910 | - Ensures consistent response format
911 |
912 | ## Google Search Grounding
913 | - Real-time web search integration
914 | - Available on select models
915 | - Perfect for current events and facts
916 |
917 | ## Vision Capabilities
918 | - Image analysis and understanding
919 | - Available on most models
920 | - Supports URLs and base64 images
921 |
922 | ## Embeddings
923 | - Semantic text embeddings
924 | - Multiple models available
925 | - Multilingual support
926 |
927 | ## Safety Settings
928 | - Granular content filtering
929 | - Customizable thresholds
930 | - Per-category control
931 |
932 | ## Conversation Memory
933 | - Context retention across messages
934 | - Session-based conversations
935 | - Ideal for multi-turn interactions`;
936 | mimeType = 'text/markdown';
937 | break;
938 |
939 | case 'gemini://help/usage':
940 | content = `${this.getHelpContent('overview')}\n\n${this.getHelpContent('tools')}`;
941 | mimeType = 'text/markdown';
942 | break;
943 |
944 | case 'gemini://help/parameters':
945 | content = this.getHelpContent('parameters');
946 | mimeType = 'text/markdown';
947 | break;
948 |
949 | case 'gemini://help/examples':
950 | content = this.getHelpContent('examples');
951 | mimeType = 'text/markdown';
952 | break;
953 |
954 | default:
955 | return {
956 | jsonrpc: '2.0',
957 | id: request.id,
958 | error: {
959 | code: -32602,
960 | message: `Unknown resource: ${uri}`
961 | }
962 | };
963 | }
964 |
965 | return {
966 | jsonrpc: '2.0',
967 | id: request.id,
968 | result: {
969 | contents: [
970 | {
971 | uri,
972 | mimeType,
973 | text: content
974 | }
975 | ]
976 | }
977 | };
978 | }
979 |
980 | private getHelpContent(topic: string): string {
981 | // Extract help content generation to a separate method
982 | switch (topic) {
983 | case 'overview':
984 | return `# Gemini MCP Server Help
985 |
986 | Welcome to the Gemini MCP Server v4.1.0! This server provides access to Google's Gemini AI models through Claude Desktop.
987 |
988 | ## Available Tools
989 | 1. **generate_text** - Generate text with advanced features
990 | 2. **analyze_image** - Analyze images using vision models
991 | 3. **count_tokens** - Count tokens for cost estimation
992 | 4. **list_models** - List all available models
993 | 5. **embed_text** - Generate text embeddings
994 | 6. **get_help** - Get help on using this server
995 |
996 | ## Quick Start
997 | - "Use Gemini to explain [topic]"
998 | - "Analyze this image with Gemini"
999 | - "List all Gemini models"
1000 | - "Get help on parameters"
1001 |
1002 | ## Key Features
1003 | - Latest Gemini 2.5 models with thinking capabilities
1004 | - JSON mode for structured output
1005 | - Google Search grounding for current information
1006 | - System instructions for behavior control
1007 | - Conversation memory for context
1008 | - Safety settings customization
1009 |
1010 | Use "get help on tools" for detailed tool information.`;
1011 |
1012 | case 'tools':
1013 | return `# Available Tools
1014 |
1015 | ## 1. generate_text
1016 | Generate text using Gemini models with advanced features.
1017 |
1018 | **Parameters:**
1019 | - prompt (required): Your text prompt
1020 | - model: Choose from gemini-2.5-pro, gemini-2.5-flash, etc.
1021 | - temperature: 0-2 (default 0.7)
1022 | - maxTokens: Max output tokens (default 2048)
1023 | - systemInstruction: Guide model behavior
1024 | - jsonMode: Enable JSON output
1025 | - grounding: Enable Google Search
1026 | - conversationId: Maintain conversation context
1027 |
1028 | **Example:** "Use Gemini 2.5 Pro to explain quantum computing"
1029 |
1030 | ## 2. analyze_image
1031 | Analyze images using vision-capable models.
1032 |
1033 | **Parameters:**
1034 | - prompt (required): Question about the image
1035 | - imageUrl OR imageBase64 (required): Image source
1036 | - model: Vision-capable model (default gemini-2.5-flash)
1037 |
1038 | **Example:** "Analyze this architecture diagram"
1039 |
1040 | ## 3. count_tokens
1041 | Count tokens for text with a specific model.
1042 |
1043 | **Parameters:**
1044 | - text (required): Text to count
1045 | - model: Model for counting (default gemini-2.5-flash)
1046 |
1047 | **Example:** "Count tokens for this paragraph"
1048 |
1049 | ## 4. list_models
1050 | List available models with optional filtering.
1051 |
1052 | **Parameters:**
1053 | - filter: all, thinking, vision, grounding, json_mode
1054 |
1055 | **Example:** "List models with thinking capability"
1056 |
1057 | ## 5. embed_text
1058 | Generate embeddings for semantic search.
1059 |
1060 | **Parameters:**
1061 | - text (required): Text to embed
1062 | - model: text-embedding-004 or text-multilingual-embedding-002
1063 |
1064 | **Example:** "Generate embeddings for similarity search"
1065 |
1066 | ## 6. get_help
1067 | Get help on using this server.
1068 |
1069 | **Parameters:**
1070 | - topic: overview, tools, models, parameters, examples, quick-start
1071 |
1072 | **Example:** "Get help on parameters"`;
1073 |
1074 | case 'parameters':
1075 | return `# Parameter Reference
1076 |
1077 | ## generate_text Parameters
1078 |
1079 | **Required:**
1080 | - prompt (string): Your text prompt
1081 |
1082 | **Optional:**
1083 | - model (string): Model to use (default: gemini-2.5-flash)
1084 | - systemInstruction (string): System prompt for behavior
1085 | - temperature (0-2): Creativity level (default: 0.7)
1086 | - maxTokens (number): Max output tokens (default: 2048)
1087 | - topK (number): Top-k sampling (default: 40)
1088 | - topP (number): Nucleus sampling (default: 0.95)
1089 | - jsonMode (boolean): Enable JSON output
1090 | - jsonSchema (object): JSON schema for validation
1091 | - grounding (boolean): Enable Google Search
1092 | - conversationId (string): Conversation identifier
1093 | - safetySettings (array): Content filtering settings
1094 |
1095 | ## Temperature Guide
1096 | - 0.1-0.3: Precise, factual
1097 | - 0.5-0.8: Balanced (default 0.7)
1098 | - 1.0-1.5: Creative
1099 | - 1.5-2.0: Very creative
1100 |
1101 | ## JSON Mode Example
1102 | Enable jsonMode and provide jsonSchema:
1103 | {
1104 | "type": "object",
1105 | "properties": {
1106 | "sentiment": {"type": "string"},
1107 | "score": {"type": "number"}
1108 | }
1109 | }
1110 |
1111 | ## Safety Settings
1112 | Categories: HARASSMENT, HATE_SPEECH, SEXUALLY_EXPLICIT, DANGEROUS_CONTENT
1113 | Thresholds: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE`;
1114 |
1115 | case 'examples':
1116 | return `# Usage Examples
1117 |
1118 | ## Basic Text Generation
1119 | "Use Gemini to explain machine learning"
1120 |
1121 | ## With Specific Model
1122 | "Use Gemini 2.5 Pro to write a Python sorting function"
1123 |
1124 | ## With Temperature
1125 | "Use Gemini with temperature 1.5 to write a creative story"
1126 |
1127 | ## JSON Mode
1128 | "Use Gemini in JSON mode to analyze sentiment and return {sentiment, confidence, keywords}"
1129 |
1130 | ## With Grounding
1131 | "Use Gemini with grounding to research latest AI developments"
1132 |
1133 | ## System Instructions
1134 | "Use Gemini as a Python tutor to explain decorators"
1135 |
1136 | ## Conversation Context
1137 | "Start conversation 'chat-001' about web development"
1138 | "Continue chat-001 and ask about React hooks"
1139 |
1140 | ## Image Analysis
1141 | "Analyze this screenshot and describe the UI elements"
1142 |
1143 | ## Token Counting
1144 | "Count tokens for this document using gemini-2.5-pro"
1145 |
1146 | ## Complex Example
1147 | "Use Gemini 2.5 Pro to review this code with:
1148 | - System instruction: 'You are a security expert'
1149 | - Temperature: 0.3
1150 | - JSON mode with schema for findings
1151 | - Grounding for latest security practices"`;
1152 |
1153 | default:
1154 | return 'Unknown help topic.';
1155 | }
1156 | }
1157 |
1158 | private getHelp(id: any, args: any): MCPResponse {
1159 | const topic = args?.topic || 'overview';
1160 | let helpContent = '';
1161 |
1162 | switch (topic) {
1163 | case 'overview':
1164 | helpContent = this.getHelpContent('overview');
1165 | break;
1166 |
1167 | case 'tools':
1168 | helpContent = this.getHelpContent('tools');
1169 | break;
1170 |
1171 | case 'models':
1172 | helpContent = `# Available Gemini Models
1173 |
1174 | ## Thinking Models (Latest - 2.5 Series)
1175 | **gemini-2.5-pro**
1176 | - Most capable, best for complex reasoning
1177 | - 2M token context window
1178 | - Features: thinking, JSON mode, grounding, system instructions
1179 |
1180 | **gemini-2.5-flash** ⭐ Recommended
1181 | - Best balance of speed and capability
1182 | - 1M token context window
1183 | - Features: thinking, JSON mode, grounding, system instructions
1184 |
1185 | **gemini-2.5-flash-lite**
1186 | - Ultra-fast, cost-efficient
1187 | - 1M token context window
1188 | - Features: thinking, JSON mode, system instructions
1189 |
1190 | ## Standard Models (2.0 Series)
1191 | **gemini-2.0-flash**
1192 | - Fast and efficient
1193 | - 1M token context window
1194 | - Features: JSON mode, grounding, system instructions
1195 |
1196 | **gemini-2.0-flash-lite**
1197 | - Most cost-efficient
1198 | - 1M token context window
1199 | - Features: JSON mode, system instructions
1200 |
1201 | **gemini-2.0-pro-experimental**
1202 | - Excellent for coding
1203 | - 2M token context window
1204 | - Features: JSON mode, grounding, system instructions
1205 |
1206 | ## Model Selection Guide
1207 | - Complex reasoning: gemini-2.5-pro
1208 | - General use: gemini-2.5-flash
1209 | - Fast responses: gemini-2.5-flash-lite
1210 | - Cost-sensitive: gemini-2.0-flash-lite
1211 | - Coding tasks: gemini-2.0-pro-experimental`;
1212 | break;
1213 |
1214 | case 'parameters':
1215 | helpContent = this.getHelpContent('parameters');
1216 | break;
1217 |
1218 | case 'examples':
1219 | helpContent = this.getHelpContent('examples');
1220 | break;
1221 |
1222 | case 'quick-start':
1223 | helpContent = `# Quick Start Guide
1224 |
1225 | ## 1. Basic Usage
1226 | Just ask naturally:
1227 | - "Use Gemini to [your request]"
1228 | - "Ask Gemini about [topic]"
1229 |
1230 | ## 2. Common Tasks
1231 |
1232 | **Text Generation:**
1233 | "Use Gemini to write a function that sorts arrays"
1234 |
1235 | **Image Analysis:**
1236 | "What's in this image?" [attach image]
1237 |
1238 | **Model Info:**
1239 | "List all Gemini models"
1240 |
1241 | **Token Counting:**
1242 | "Count tokens for my prompt"
1243 |
1244 | ## 3. Advanced Features
1245 |
1246 | **JSON Output:**
1247 | "Use Gemini in JSON mode to extract key points"
1248 |
1249 | **Current Information:**
1250 | "Use Gemini with grounding to get latest news"
1251 |
1252 | **Conversations:**
1253 | "Start a chat with Gemini about Python"
1254 |
1255 | ## 4. Tips
1256 | - Use gemini-2.5-flash for most tasks
1257 | - Lower temperature for facts, higher for creativity
1258 | - Enable grounding for current information
1259 | - Use conversation IDs to maintain context
1260 |
1261 | ## Need More Help?
1262 | - "Get help on tools" - Detailed tool information
1263 | - "Get help on parameters" - All parameters explained
1264 | - "Get help on models" - Model selection guide`;
1265 | break;
1266 |
1267 | default:
1268 | helpContent =
1269 | 'Unknown help topic. Available topics: overview, tools, models, parameters, examples, quick-start';
1270 | }
1271 |
1272 | return {
1273 | jsonrpc: '2.0',
1274 | id,
1275 | result: {
1276 | content: [
1277 | {
1278 | type: 'text',
1279 | text: helpContent
1280 | }
1281 | ]
1282 | }
1283 | };
1284 | }
1285 |
1286 | private sendResponse(response: MCPResponse | any) {
1287 | const responseStr = JSON.stringify(response);
1288 | logger.response(
1289 | `Sending response for ID: ${response.id} ${response.error ? '(ERROR)' : '(SUCCESS)'}`
1290 | );
1291 | process.stdout.write(`${responseStr}\n`);
1292 | }
1293 | }
1294 |
1295 | // Start the server
1296 | try {
1297 | logger.startup('Starting Gemini MCP Server...');
1298 | logger.info('Environment configuration loaded');
1299 |
1300 | // Mask the API key for security (show only first 8 and last 4 characters)
1301 | const maskedKey =
1302 | config.geminiApiKey.length > 12
1303 | ? `${config.geminiApiKey.substring(0, 8)}...${config.geminiApiKey.substring(config.geminiApiKey.length - 4)}`
1304 | : '***masked***';
1305 |
1306 | logger.success(`API Key loaded: ${maskedKey}`);
1307 | logger.info(`Log level: ${config.logLevel}`);
1308 | logger.info(`Rate limiting: ${config.rateLimitEnabled ? 'enabled' : 'disabled'}`);
1309 |
1310 | if (config.rateLimitEnabled) {
1311 | logger.info(`Rate limit: ${config.rateLimitRequests} requests per ${config.rateLimitWindow}ms`);
1312 | }
1313 |
1314 | logger.startup('Initializing Gemini API connection...');
1315 |
1316 | new EnhancedStdioMCPServer(config.geminiApiKey);
1317 |
1318 | logger.success('Gemini MCP Server started successfully!');
1319 | logger.info('Server is ready to receive MCP requests');
1320 | logger.info('Listening on stdio interface...');
1321 | logger.success('You can now use the server with Claude Desktop or other MCP clients');
1322 |
1323 | // Graceful shutdown handling
1324 | process.on('SIGINT', () => {
1325 | logger.info('Received SIGINT, shutting down gracefully...');
1326 | rateLimiter.destroy();
1327 | process.exit(0);
1328 | });
1329 |
1330 | process.on('SIGTERM', () => {
1331 | logger.info('Received SIGTERM, shutting down gracefully...');
1332 | rateLimiter.destroy();
1333 | process.exit(0);
1334 | });
1335 | } catch (error) {
1336 | logger.error('Failed to start server:', error);
1337 | process.exit(1);
1338 | }
1339 |
```