#
tokens: 33874/50000 33/33 files
lines: off (toggle) GitHub
raw markdown copy
# Directory Structure

```
├── .env.example
├── .eslintrc.json
├── .github
│   └── workflows
│       ├── ci.yml
│       └── release.yml
├── .gitignore
├── .npmignore
├── .prettierignore
├── .prettierrc.json
├── Contribution-Guide.md
├── docker-compose.yml
├── Dockerfile
├── docs
│   ├── api.md
│   ├── configuration.md
│   └── troubleshooting.md
├── jest.config.js
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── scripts
│   ├── build.sh
│   ├── dev.sh
│   └── test.sh
├── src
│   ├── config
│   │   └── index.ts
│   ├── enhanced-stdio-server.ts
│   ├── types.ts
│   └── utils
│       ├── errors.ts
│       ├── logger.ts
│       ├── rateLimiter.ts
│       └── validation.ts
├── tests
│   ├── integration
│   │   └── gemini-api.test.ts
│   ├── setup.ts
│   └── unit
│       ├── config.test.ts
│       ├── errors.test.ts
│       └── validation.test.ts
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------

```
# Source files
src/
*.ts
!dist/**/*.d.ts

# Development files
.gitignore
.eslintrc*
.prettierrc*
tsconfig.json
jest.config.*

# Documentation (keep essential ones)
docs/
*.md
!README.md
!LICENSE
!CHANGELOG.md

# Test files
__tests__/
*.test.*
*.spec.*

# CI/CD
.github/

# IDE
.vscode/
.idea/

# Misc
.env
.env.*
*.log
.DS_Store
```

--------------------------------------------------------------------------------
/.prettierignore:
--------------------------------------------------------------------------------

```
# Dependencies
node_modules/

# Build output
dist/
build/
*.tsbuildinfo

# Coverage
coverage/

# Logs
*.log
logs/

# Environment files
.env
.env.local
.env.*.local

# Cache directories
.cache/
.parcel-cache/
.npm/
.eslintcache

# OS files
.DS_Store
Thumbs.db

# IDE files
.vscode/
.idea/

# Package files
package-lock.json
yarn.lock
pnpm-lock.yaml

# Generated files
CHANGELOG.md

```

--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------

```
# Dependencies
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*

# Build output
dist/
build/
*.tsbuildinfo

# Environment variables
.env
.env.local
.env.development.local
.env.test.local
.env.production.local

# Logs
logs/
*.log

# Test coverage
coverage/
.nyc_output
*.lcov

# IDE files
.vscode/
.idea/
*.swp
*.swo
*~

# OS files
.DS_Store
.DS_Store?
._*
Thumbs.db
ehthumbs.db

# Claude configuration
.claude/
claude_desktop_config*.json

# Temporary files
tmp/
temp/
test-mcp-schema.js

# Cache directories
.npm
.eslintcache
.cache
.parcel-cache
```

--------------------------------------------------------------------------------
/.prettierrc.json:
--------------------------------------------------------------------------------

```json
{
  "semi": true,
  "trailingComma": "none",
  "singleQuote": true,
  "printWidth": 100,
  "tabWidth": 2,
  "useTabs": false,
  "bracketSpacing": true,
  "bracketSameLine": false,
  "arrowParens": "avoid",
  "endOfLine": "lf",
  "overrides": [
    {
      "files": "*.json",
      "options": {
        "printWidth": 80,
        "tabWidth": 2
      }
    },
    {
      "files": "*.md",
      "options": {
        "printWidth": 80,
        "proseWrap": "always"
      }
    },
    {
      "files": "*.yml",
      "options": {
        "tabWidth": 2,
        "singleQuote": false
      }
    }
  ]
}

```

--------------------------------------------------------------------------------
/.env.example:
--------------------------------------------------------------------------------

```
GEMINI_API_KEY=your_gemini_api_key_here

# Logging level (default: info)
# Options: error, warn, info, debug
LOG_LEVEL=info

# Enable performance metrics (default: false)
ENABLE_METRICS=false

# Rate limiting configuration
RATE_LIMIT_ENABLED=true        # Enable/disable rate limiting (default: true)
RATE_LIMIT_REQUESTS=100        # Max requests per window (default: 100)
RATE_LIMIT_WINDOW=60000        # Time window in ms (default: 60000 = 1 minute)

# Request timeout in milliseconds (default: 30000 = 30 seconds)
REQUEST_TIMEOUT=30000

# Environment mode (default: production)
NODE_ENV=production
```

--------------------------------------------------------------------------------
/.eslintrc.json:
--------------------------------------------------------------------------------

```json
{
  "root": true,
  "parser": "@typescript-eslint/parser",
  "plugins": [
    "@typescript-eslint"
  ],
  "extends": [
    "eslint:recommended",
    "plugin:@typescript-eslint/recommended",
    "prettier"
  ],
  "parserOptions": {
    "ecmaVersion": 2022,
    "sourceType": "module",
    "project": "./tsconfig.json"
  },
  "env": {
    "node": true,
    "es2022": true
  },
  "rules": {
    "@typescript-eslint/no-unused-vars": ["error", { "argsIgnorePattern": "^_" }],
    "@typescript-eslint/no-explicit-any": "warn",
    "@typescript-eslint/explicit-function-return-type": "off",
    "@typescript-eslint/explicit-module-boundary-types": "off",
    "@typescript-eslint/no-non-null-assertion": "warn",
    "no-console": "off",
    "prefer-const": "error",
    "no-var": "error",
    "object-shorthand": "error",
    "prefer-template": "error",
    "no-duplicate-imports": "error",
    "eqeqeq": ["error", "always"],
    "no-eval": "error",
    "no-implied-eval": "error",
    "no-new-func": "error",
    "no-return-assign": "error",
    "no-self-compare": "error",
    "no-throw-literal": "error",
    "no-unused-expressions": "error",
    "radix": "error"
  },
  "overrides": [
    {
      "files": ["**/*.test.ts", "**/*.spec.ts"],
      "env": {
        "jest": true
      },
      "rules": {
        "@typescript-eslint/no-explicit-any": "off",
        "no-unused-expressions": "off"
      }
    }
  ],
  "ignorePatterns": [
    "dist/",
    "node_modules/",
    "coverage/",
    "*.js",
    "*.d.ts"
  ]
}

```

--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------

```markdown
# 🤖 MCP Server Gemini

<!-- [![npm version](https://badge.fury.io/js/mcp-server-gemini.svg)](https://badge.fury.io/js/mcp-server-gemini) -->
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
[![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?logo=typescript&logoColor=white)](https://www.typescriptlang.org/)
[![Node.js](https://img.shields.io/badge/Node.js-43853D?logo=node.js&logoColor=white)](https://nodejs.org/)
<!-- [![CI](https://github.com/gurr-i/mcp-server-gemini/workflows/CI/badge.svg)](https://github.com/gurr-i/mcp-server-gemini/actions) -->

A **state-of-the-art Model Context Protocol (MCP) server** that provides seamless integration with Google's Gemini AI models. This server enables Claude Desktop and other MCP-compatible clients to leverage the full power of Gemini's advanced AI capabilities.

## ✨ Features

### 🧠 **Latest Gemini Models**
- **Gemini 2.5 Pro** - Most capable thinking model for complex reasoning
- **Gemini 2.5 Flash** - Fast thinking model with best price/performance  
- **Gemini 2.0 Series** - Latest generation models with advanced features
- **Gemini 1.5 Series** - Proven, reliable models for production use

### 🚀 **Advanced Capabilities**
- **🧠 Thinking Models** - Gemini 2.5 series with step-by-step reasoning
- **🔍 Google Search Grounding** - Real-time web information integration
- **📊 JSON Mode** - Structured output with schema validation
- **🎯 System Instructions** - Behavior customization and control
- **👁️ Vision Support** - Image analysis and multimodal capabilities
- **💬 Conversation Memory** - Context preservation across interactions

### 🛠️ **Production Ready**
- **TypeScript** - Full type safety and modern development
- **Comprehensive Error Handling** - Robust error management and recovery
- **Rate Limiting** - Built-in protection against API abuse
- **Detailed Logging** - Comprehensive monitoring and debugging
- **Input Validation** - Secure parameter validation with Zod
- **Retry Logic** - Automatic retry with exponential backoff

## 🚀 Quick Start

### Prerequisites

- **Node.js 16+** ([Download](https://nodejs.org/))
- **Google AI Studio API Key** ([Get one here](https://aistudio.google.com/app/apikey))

### Installation

#### Option 1: Global Installation (Recommended)
```bash
npm install -g mcp-server-gemini
```

#### Option 2: Local Development
```bash
git clone https://github.com/gurr-i/mcp-server-gemini-pro.git
cd mcp-server-gemini-pro
npm install
npm run build
```

### Configuration

#### 1. Set up your API key

**Option A: Environment Variable**
```bash
export GEMINI_API_KEY="your_api_key_here"
```

**Option B: .env file**
```bash
echo "GEMINI_API_KEY=your_api_key_here" > .env
```

#### 2. Configure Claude Desktop

Add to your `claude_desktop_config.json`:

**For Global Installation:**
```json
{
  "mcpServers": {
    "gemini": {
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here"
      }
    }
  }
}
```

**For Local Installation:**
```json
{
  "mcpServers": {
    "gemini": {
      "command": "node",
      "args": ["/path/to/mcp-server-gemini-pro/dist/enhanced-stdio-server.js"],
      "env": {
        "GEMINI_API_KEY": "your_api_key_here"
      }
    }
  }
}
```

#### 3. Restart Claude Desktop

Close and restart Claude Desktop completely for changes to take effect.

## 💡 Usage Examples

Once configured, you can use Gemini through Claude Desktop with natural language:

### Basic Text Generation
```
"Use Gemini to explain quantum computing in simple terms"
"Generate a creative story about AI using Gemini 2.5 Pro"
```

### Advanced Features
```
"Use Gemini with JSON mode to extract key points from this text"
"Use Gemini with grounding to get the latest news about AI"
"Generate a Python function using Gemini's thinking capabilities"
```

### Image Analysis
```
"Analyze this image with Gemini" (attach image)
"What's in this screenshot using Gemini vision?"
```

### Development Tasks
```
"Use Gemini to review this code and suggest improvements"
"Generate comprehensive tests for this function using Gemini"
```

## ⚙️ Configuration

### Environment Variables

The server can be configured using environment variables or a `.env` file:

#### Required Configuration
```bash
# Google AI Studio API Key (required)
GEMINI_API_KEY=your_api_key_here
```

#### Optional Configuration
```bash
# Logging level (default: info)
# Options: error, warn, info, debug
LOG_LEVEL=info

# Enable performance metrics (default: false)
ENABLE_METRICS=false

# Rate limiting configuration
RATE_LIMIT_ENABLED=true        # Enable/disable rate limiting (default: true)
RATE_LIMIT_REQUESTS=100        # Max requests per window (default: 100)
RATE_LIMIT_WINDOW=60000        # Time window in ms (default: 60000 = 1 minute)

# Request timeout in milliseconds (default: 30000 = 30 seconds)
REQUEST_TIMEOUT=30000

# Environment mode (default: production)
NODE_ENV=production
```

### Environment Setup

#### Development Environment
```bash
# .env for development
GEMINI_API_KEY=your_api_key_here
NODE_ENV=development
LOG_LEVEL=debug
RATE_LIMIT_ENABLED=false
REQUEST_TIMEOUT=60000
```

#### Production Environment
```bash
# .env for production
GEMINI_API_KEY=your_api_key_here
NODE_ENV=production
LOG_LEVEL=warn
RATE_LIMIT_ENABLED=true
RATE_LIMIT_REQUESTS=100
RATE_LIMIT_WINDOW=60000
REQUEST_TIMEOUT=30000
ENABLE_METRICS=true
```

### Claude Desktop Configuration

#### Configuration File Locations
| OS | Path |
|----|------|
| **macOS** | `~/Library/Application Support/Claude/claude_desktop_config.json` |
| **Windows** | `%APPDATA%\Claude\claude_desktop_config.json` |
| **Linux** | `~/.config/Claude/claude_desktop_config.json` |

#### Basic Configuration
```json
{
  "mcpServers": {
    "gemini": {
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here"
      }
    }
  }
}
```

#### Advanced Configuration
```json
{
  "mcpServers": {
    "gemini": {
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here",
        "LOG_LEVEL": "info",
        "RATE_LIMIT_REQUESTS": "200",
        "REQUEST_TIMEOUT": "45000"
      }
    }
  }
}
```

#### Local Development Configuration
```json
{
  "mcpServers": {
    "gemini": {
      "command": "node",
      "args": ["/path/to/mcp-server-gemini-pro/dist/enhanced-stdio-server.js"],
      "cwd": "/path/to/mcp-server-gemini-pro",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here",
        "NODE_ENV": "development",
        "LOG_LEVEL": "debug"
      }
    }
  }
}
```

## 🛠️ Available Tools

| Tool | Description | Key Features |
|------|-------------|--------------|
| **generate_text** | Generate text with advanced features | Thinking models, JSON mode, grounding |
| **analyze_image** | Analyze images using vision models | Multi-modal understanding, detailed analysis |
| **count_tokens** | Count tokens for cost estimation | Accurate token counting for all models |
| **list_models** | List all available Gemini models | Real-time model availability and features |
| **embed_text** | Generate text embeddings | High-quality vector representations |
| **get_help** | Get usage help and documentation | Self-documenting with examples |

## 📊 Model Comparison

| Model | Context Window | Features | Best For | Speed |
|-------|----------------|----------|----------|-------|
| **gemini-2.5-pro** | 2M tokens | Thinking, JSON, Grounding | Complex reasoning, coding | Slower |
| **gemini-2.5-flash** ⭐ | 1M tokens | Thinking, JSON, Grounding | General purpose | Fast |
| **gemini-2.5-flash-lite** | 1M tokens | Thinking, JSON | High-throughput tasks | Fastest |
| **gemini-2.0-flash** | 1M tokens | JSON, Grounding | Standard tasks | Fast |
| **gemini-2.0-flash-lite** | 1M tokens | JSON | Simple tasks | Fastest |
| **gemini-2.0-pro-experimental** | 2M tokens | JSON, Grounding | Experimental features | Medium |
| **gemini-1.5-pro** | 2M tokens | JSON | Legacy support | Medium |
| **gemini-1.5-flash** | 1M tokens | JSON | Legacy support | Fast |

## 🔧 Development

### Prerequisites
- **Node.js 16+** ([Download](https://nodejs.org/))
- **npm 7+** (comes with Node.js)
- **Git** for version control
- **Google AI Studio API Key** ([Get one here](https://aistudio.google.com/app/apikey))

### Setup
```bash
# Clone the repository
git clone https://github.com/gurr-i/mcp-server-gemini-pro.git
cd mcp-server-gemini-pro

# Install dependencies
npm install

# Set up environment variables
cp .env.example .env
# Edit .env and add your GEMINI_API_KEY
```

### Available Scripts

#### Development
```bash
npm run dev          # Start development server with hot reload
npm run dev:watch    # Start with file watching (nodemon)
npm run build        # Build for production
npm run build:watch  # Build with watch mode
npm run clean        # Clean build directory
```

#### Testing
```bash
npm test             # Run all tests
npm run test:watch   # Run tests in watch mode
npm run test:coverage # Run tests with coverage report
npm run test:integration # Run integration tests (requires API key)
```

#### Code Quality
```bash
npm run lint         # Lint TypeScript code
npm run lint:fix     # Fix linting issues automatically
npm run format       # Format code with Prettier
npm run format:check # Check code formatting
npm run type-check   # Run TypeScript type checking
npm run validate     # Run all quality checks (lint + test + type-check)
```

#### Release & Distribution
```bash
npm run prepack      # Prepare package for publishing
npm run release      # Build, validate, and publish to npm
```

### Project Structure
```
mcp-server-gemini/
├── src/                          # Source code
│   ├── config/                   # Configuration management
│   │   └── index.ts             # Environment config with Zod validation
│   ├── utils/                   # Utility modules
│   │   ├── logger.ts           # Structured logging system
│   │   ├── errors.ts           # Custom error classes & handling
│   │   ├── validation.ts       # Input validation with Zod
│   │   └── rateLimiter.ts      # Rate limiting implementation
│   ├── enhanced-stdio-server.ts # Main MCP server implementation
│   └── types.ts                # TypeScript type definitions
├── tests/                       # Test suite
│   ├── unit/                   # Unit tests
│   │   ├── config.test.ts      # Configuration tests
│   │   ├── validation.test.ts  # Validation tests
│   │   └── errors.test.ts      # Error handling tests
│   ├── integration/            # Integration tests
│   │   └── gemini-api.test.ts  # Real API integration tests
│   └── setup.ts               # Test setup and utilities
├── docs/                       # Documentation
│   ├── api.md                 # API reference
│   ├── configuration.md       # Configuration guide
│   └── troubleshooting.md     # Troubleshooting guide
├── scripts/                    # Build and utility scripts
│   ├── build.sh              # Production build script
│   ├── dev.sh                # Development script
│   └── test.sh               # Test execution script
├── .github/workflows/         # GitHub Actions CI/CD
│   ├── ci.yml                # Continuous integration
│   └── release.yml           # Automated releases
├── dist/                      # Built output (generated)
├── coverage/                  # Test coverage reports (generated)
└── node_modules/             # Dependencies (generated)
```

## 🧪 Testing

### Test Suite Overview
The project includes comprehensive testing with unit tests, integration tests, and code coverage reporting.

### Running Tests

#### All Tests
```bash
npm test                    # Run all tests (unit tests only by default)
npm run test:watch         # Run tests in watch mode for development
npm run test:coverage      # Run tests with coverage report
```

#### Unit Tests
```bash
npm test -- --testPathPattern=unit    # Run only unit tests
npm test -- --testNamePattern="config" # Run specific test suites
```

#### Integration Tests
Integration tests require a valid `GEMINI_API_KEY` and make real API calls:

```bash
# Set API key and run integration tests
GEMINI_API_KEY=your_api_key_here npm run test:integration

# Or set in .env file and run
npm run test:integration
```

#### Test Coverage
```bash
npm run test:coverage      # Generate coverage report
open coverage/lcov-report/index.html  # View coverage report (macOS)
```

### Test Structure

#### Unit Tests (`tests/unit/`)
- **Configuration Tests**: Environment variable validation, config loading
- **Validation Tests**: Input validation, schema validation, sanitization
- **Error Handling Tests**: Custom error classes, error recovery, retry logic
- **Utility Tests**: Logger, rate limiter, helper functions

#### Integration Tests (`tests/integration/`)
- **Gemini API Tests**: Real API calls to test connectivity and functionality
- **Model Testing**: Verify all supported models work correctly
- **Feature Testing**: JSON mode, grounding, embeddings, token counting

### Writing Tests

#### Test File Structure
```typescript
// tests/unit/example.test.ts
import { describe, it, expect, beforeEach, afterEach } from '@jest/globals';
import { YourModule } from '../../src/your-module.js';

describe('YourModule', () => {
  beforeEach(() => {
    // Setup before each test
  });

  afterEach(() => {
    // Cleanup after each test
  });

  it('should do something', () => {
    // Test implementation
    expect(result).toBe(expected);
  });
});
```

#### Custom Matchers
The test suite includes custom Jest matchers:
```typescript
expect(response).toBeValidMCPResponse(); // Validates MCP response format
```

### Test Configuration
Tests are configured in `jest.config.js` with:
- **TypeScript Support**: Full ES modules and TypeScript compilation
- **Coverage Thresholds**: Minimum 70% coverage required
- **Test Timeout**: 30 seconds for integration tests
- **Setup Files**: Automatic test environment setup

## 🐳 Docker Deployment

### Using Docker

#### Build and Run
```bash
# Build the Docker image
docker build -t mcp-server-gemini .

# Run the container
docker run -d \
  --name mcp-server-gemini \
  -e GEMINI_API_KEY=your_api_key_here \
  -e LOG_LEVEL=info \
  mcp-server-gemini
```

#### Using Docker Compose
```bash
# Create .env file with your API key
echo "GEMINI_API_KEY=your_api_key_here" > .env

# Start the service
docker-compose up -d

# View logs
docker-compose logs -f

# Stop the service
docker-compose down
```

#### Development with Docker
```bash
# Start development environment
docker-compose --profile dev up

# This mounts source code for live reloading
```

### Environment-Specific Deployments

#### Production Deployment
```bash
# Production build
docker build --target production -t mcp-server-gemini:prod .

# Run with production settings
docker run -d \
  --name mcp-server-gemini-prod \
  --restart unless-stopped \
  -e GEMINI_API_KEY=your_api_key_here \
  -e NODE_ENV=production \
  -e LOG_LEVEL=warn \
  -e RATE_LIMIT_ENABLED=true \
  -e ENABLE_METRICS=true \
  mcp-server-gemini:prod
```

#### Health Checks
```bash
# Check container health
docker ps
docker logs mcp-server-gemini

# Manual health check
docker exec mcp-server-gemini node -e "console.log('Health check passed')"
```

## 🚀 Deployment Options

### 1. npm Global Installation
```bash
# Install globally
npm install -g mcp-server-gemini

# Run directly
GEMINI_API_KEY=your_key mcp-server-gemini
```

### 2. Local Installation
```bash
# Clone and build
git clone https://github.com/gurr-i/mcp-server-gemini-pro.git
cd mcp-server-gemini-pro
npm install
npm run build

# Run locally
GEMINI_API_KEY=your_key npm start
```

### 3. Docker Deployment
```bash
# Using Docker Hub (when published)
docker run -e GEMINI_API_KEY=your_key mcp-server-gemini-pro:latest

# Using local build
docker build -t mcp-server-gemini-pro .
docker run -e GEMINI_API_KEY=your_key mcp-server-gemini-pro
```

### 4. Process Manager (PM2)
```bash
# Install PM2
npm install -g pm2

# Create ecosystem file
cat > ecosystem.config.js << EOF
module.exports = {
  apps: [{
    name: 'mcp-server-gemini',
    script: './dist/enhanced-stdio-server.js',
    env: {
      NODE_ENV: 'production',
      GEMINI_API_KEY: 'your_api_key_here',
      LOG_LEVEL: 'info'
    }
  }]
}
EOF

# Start with PM2
pm2 start ecosystem.config.js
pm2 save
pm2 startup
```

## 🔧 Troubleshooting

### Common Issues

#### 1. Server Won't Start
```bash
# Check if API key is set
echo $GEMINI_API_KEY

# Verify .env file exists and is readable
cat .env | grep GEMINI_API_KEY

# Check file permissions
ls -la .env
chmod 600 .env
```

#### 2. API Key Issues
```bash
# Test API key manually
curl -H "Content-Type: application/json" \
     -d '{"contents":[{"parts":[{"text":"Hello"}]}]}' \
     -X POST "https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent?key=YOUR_API_KEY"
```

#### 3. Claude Desktop Integration
```bash
# Verify config file location (macOS)
ls -la ~/Library/Application\ Support/Claude/claude_desktop_config.json

# Validate JSON syntax
cat claude_desktop_config.json | jq .

# Check server installation
which mcp-server-gemini
npm list -g mcp-server-gemini
```

#### 4. Rate Limiting
```bash
# Temporarily disable rate limiting
export RATE_LIMIT_ENABLED=false

# Increase limits
export RATE_LIMIT_REQUESTS=1000
export RATE_LIMIT_WINDOW=60000
```

### Debug Mode
```bash
# Enable debug logging
export LOG_LEVEL=debug
npm run dev

# Or for production
export LOG_LEVEL=debug
npm start
```

### Getting Help
- 🐛 [Report Issues](https://github.com/gurr-i/mcp-server-gemini-pro/issues)
- 💬 [Discussions](https://github.com/gurr-i/mcp-server-gemini-pro/discussions)
- 📚 [Documentation](docs/)

## 🔒 Security

### API Key Security
- **Never commit API keys** to version control
- **Use environment variables** or secure secret management
- **Rotate keys regularly** for production use
- **Use different keys** for development and production

### Rate Limiting
- **Enable rate limiting** in production (`RATE_LIMIT_ENABLED=true`)
- **Configure appropriate limits** based on your usage patterns
- **Monitor API usage** to prevent quota exhaustion

### Input Validation
- All inputs are **automatically validated** and sanitized
- **XSS and injection protection** built-in
- **Schema validation** for all tool parameters

### Container Security
- Runs as **non-root user** in Docker
- **Read-only filesystem** with minimal privileges
- **Security scanning** in CI/CD pipeline

## 📚 Documentation

- [API Documentation](docs/api.md)
- [Configuration Guide](docs/configuration.md)
- [Troubleshooting](docs/troubleshooting.md)
- [Contributing Guide](CONTRIBUTING.md)

## 🤝 Contributing

We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.

### Development Workflow
1. Fork the repository
2. Create a feature branch
3. Make your changes
4. Add tests
5. Run `npm run validate`
6. Submit a pull request

## 📄 License

MIT License - see [LICENSE](LICENSE) file for details.

## 🙏 Acknowledgments

- Google AI for the Gemini API
- Anthropic for the Model Context Protocol
- The open-source community for inspiration and feedback

## 📞 Support

- 🐛 [Report Issues](https://github.com/gurr-i/mcp-server-gemini-pro/issues)
- 💬 [Discussions](https://github.com/gurr-i/mcp-server-gemini-pro/discussions)
- 📧 [Email Support](mailto:[email protected])

---

<div align="center">
  <strong>Made with ❤️ By Gurveer for the AI development community</strong>
</div>

```

--------------------------------------------------------------------------------
/tsconfig.json:
--------------------------------------------------------------------------------

```json
{
  "compilerOptions": {
    "target": "ES2022",
    "module": "NodeNext",
    "moduleResolution": "NodeNext",
    "outDir": "./dist",
    "rootDir": "./src",
    "strict": true,
    "esModuleInterop": true,
    "skipLibCheck": true,
    "forceConsistentCasingInFileNames": true,
    "sourceMap": true
  },
  "ts-node": {
    "esm": true
  },
  "include": ["src/**/*"],
  "exclude": ["node_modules"]
}
```

--------------------------------------------------------------------------------
/scripts/build.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash

# Build script for MCP Server Gemini
set -e

echo "🔧 Building MCP Server Gemini..."

# Clean previous build
echo "🧹 Cleaning previous build..."
rm -rf dist/

# Type check
echo "🔍 Type checking..."
npx tsc --noEmit

# Build
echo "🏗️  Building TypeScript..."
npx tsc

# Copy additional files
echo "📋 Copying additional files..."
cp package.json dist/
cp README.md dist/
cp LICENSE dist/

echo "✅ Build completed successfully!"
echo "📦 Output directory: dist/"

```

--------------------------------------------------------------------------------
/scripts/test.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash

# Test script for MCP Server Gemini
set -e

echo "🧪 Running tests for MCP Server Gemini..."

# Run linting
echo "🔍 Running ESLint..."
npx eslint src/**/*.ts --fix

# Run type checking
echo "📝 Type checking..."
npx tsc --noEmit

# Run unit tests
echo "🧪 Running unit tests..."
npx jest --coverage

# Run integration tests if API key is available
if [ -n "$GEMINI_API_KEY" ]; then
    echo "🔗 Running integration tests..."
    npx jest --testPathPattern=integration
else
    echo "⚠️  Skipping integration tests (GEMINI_API_KEY not set)"
fi

echo "✅ All tests completed successfully!"

```

--------------------------------------------------------------------------------
/scripts/dev.sh:
--------------------------------------------------------------------------------

```bash
#!/bin/bash

# Development script for MCP Server Gemini
set -e

echo "🚀 Starting MCP Server Gemini in development mode..."

# Check if .env exists
if [ ! -f .env ]; then
    echo "⚠️  .env file not found. Creating template..."
    echo "GEMINI_API_KEY=your_api_key_here" > .env
    echo "📝 Please edit .env file with your actual API key"
    exit 1
fi

# Check if API key is set
if ! grep -q "^GEMINI_API_KEY=.*[^=]$" .env; then
    echo "❌ GEMINI_API_KEY not set in .env file"
    echo "💡 Please add your Gemini API key to .env file"
    exit 1
fi

echo "✅ Environment configured"
echo "🔧 Starting development server with hot reload..."

# Start with ts-node and watch mode
npx nodemon --exec "node --loader ts-node/esm src/enhanced-stdio-server.ts" --ext ts --watch src/

```

--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------

```dockerfile
# Generated by https://smithery.ai. See: https://smithery.ai/docs/config#dockerfile
# Use a Node.js image
FROM node:18-alpine AS builder

# Set the working directory
WORKDIR /app

# Copy package.json and package-lock.json
COPY package.json package-lock.json ./

# Install dependencies
RUN npm install

# Copy the rest of the application code
COPY . .

# Build the application
RUN npm run build

# Use a smaller Node.js image for the release
FROM node:18-slim AS release

# Set the working directory
WORKDIR /app

# Copy the build from the builder stage
COPY --from=builder /app/dist /app/dist
COPY --from=builder /app/package.json /app/package-lock.json /app/

# Install production dependencies only
RUN npm ci --production

# Expose the necessary port
EXPOSE 3005

# Command to run the application
CMD ["node", "dist/index.js"]
```

--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------

```markdown
# Troubleshooting Guide

## Common Issues

### Connection Problems

1. Port Already in Use
```bash
Error: EADDRINUSE: address already in use :::3005
```
Solution:
- Check if another process is using port 3005
- Kill the existing process
- Change the port number

2. WebSocket Connection Failed
```
Error: Connection refused
```
Solution:
- Verify server is running
- Check firewall settings
- Confirm correct port

### API Issues

1. Invalid API Key
```
Error: Invalid API key provided
```
Solution:
- Check GEMINI_API_KEY environment variable
- Verify API key is valid
- Regenerate API key if needed

2. Rate Limiting
```
Error: Resource exhausted
```
Solution:
- Implement backoff strategy
- Check quota limits
- Upgrade API tier if needed

## Protocol Errors

1. Invalid Message Format
```json
Error: Parse error (-32700)
```
Solution:
- Check JSON syntax
- Verify message format
- Validate against schema

2. Method Not Found
```json
Error: Method not found (-32601)
```
Solution:
- Check method name
- Verify protocol version
- Update capabilities

## Debugging Steps

1. Enable Debug Mode
```bash
DEBUG=true npm start
```

2. Check Logs
```bash
tail -f debug.log
```

3. Monitor WebSocket Traffic
```bash
wscat -c ws://localhost:3005
```

## Getting Help

1. Check Documentation
- Review implementation notes
- Check protocol specification
- Read troubleshooting guide

2. Open Issues
- Search existing issues
- Provide error details
- Include reproduction steps

3. Community Support
- Join discussions
- Ask questions
- Share solutions

```

--------------------------------------------------------------------------------
/jest.config.js:
--------------------------------------------------------------------------------

```javascript
/** @type {import('jest').Config} */
module.exports = {
  // Use ts-jest preset for TypeScript support
  preset: 'ts-jest/presets/default-esm',

  // Test environment
  testEnvironment: 'node',

  // Enable ESM support
  extensionsToTreatAsEsm: ['.ts'],

  // Module name mapping for ESM imports
  moduleNameMapping: {
    '^(\\.{1,2}/.*)\\.js$': '$1',
  },

  // Transform configuration
  transform: {
    '^.+\\.ts$': ['ts-jest', {
      useESM: true,
      tsconfig: {
        module: 'ESNext',
        moduleResolution: 'node'
      }
    }]
  },

  // Test file patterns
  testMatch: [
    '**/tests/**/*.test.ts',
    '**/__tests__/**/*.test.ts'
  ],

  // Test roots
  roots: ['<rootDir>/src', '<rootDir>/tests'],

  // Ignore patterns
  testPathIgnorePatterns: [
    '/node_modules/',
    '/dist/',
    '/build/'
  ],

  // Coverage configuration
  collectCoverage: true,
  coverageDirectory: 'coverage',
  coverageReporters: [
    'text',
    'lcov',
    'html',
    'json-summary'
  ],

  // Coverage collection patterns
  collectCoverageFrom: [
    'src/**/*.ts',
    '!src/**/*.d.ts',
    '!src/**/*.test.ts',
    '!src/**/__tests__/**',
    '!src/types.ts'
  ],

  // Coverage thresholds
  coverageThreshold: {
    global: {
      branches: 70,
      functions: 70,
      lines: 70,
      statements: 70
    }
  },

  // Test timeout
  testTimeout: 30000,

  // Verbose output
  verbose: true,

  // Clear mocks between tests
  clearMocks: true,

  // Restore mocks after each test
  restoreMocks: true,

  // Module file extensions
  moduleFileExtensions: ['ts', 'js', 'json', 'node']
};
```

--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------

```yaml
name: Release

on:
  push:
    tags:
      - 'v*'

jobs:
  test:
    runs-on: ubuntu-latest
    
    steps:
    - name: Checkout code
      uses: actions/checkout@v4
    
    - name: Use Node.js 20.x
      uses: actions/setup-node@v4
      with:
        node-version: 20.x
        cache: 'npm'
    
    - name: Install dependencies
      run: npm ci
    
    - name: Run full validation
      run: npm run validate
      env:
        GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
    
    - name: Build project
      run: npm run build

  publish:
    needs: test
    runs-on: ubuntu-latest
    
    steps:
    - name: Checkout code
      uses: actions/checkout@v4
    
    - name: Use Node.js 20.x
      uses: actions/setup-node@v4
      with:
        node-version: 20.x
        cache: 'npm'
        registry-url: 'https://registry.npmjs.org'
    
    - name: Install dependencies
      run: npm ci
    
    - name: Build project
      run: npm run build
    
    - name: Publish to npm
      run: npm publish
      env:
        NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
    
    - name: Create GitHub Release
      uses: actions/create-release@v1
      env:
        GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
      with:
        tag_name: ${{ github.ref }}
        release_name: Release ${{ github.ref }}
        draft: false
        prerelease: false
        body: |
          ## Changes
          
          See [CHANGELOG.md](CHANGELOG.md) for detailed changes.
          
          ## Installation
          
          ```bash
          npm install -g mcp-server-gemini@${{ github.ref_name }}
          ```
          
          ## Docker
          
          ```bash
          docker pull mcp-server-gemini:${{ github.ref_name }}
          ```

```

--------------------------------------------------------------------------------
/tests/setup.ts:
--------------------------------------------------------------------------------

```typescript
import 'dotenv/config';

// Set test environment
process.env.NODE_ENV = 'test';

// Mock console methods to reduce noise in tests
const originalConsoleError = console.error;
const originalConsoleWarn = console.warn;
const originalConsoleLog = console.log;

beforeAll(() => {
  // Suppress console output during tests unless explicitly needed
  console.error = jest.fn();
  console.warn = jest.fn();
  console.log = jest.fn();
});

afterAll(() => {
  // Restore original console methods
  console.error = originalConsoleError;
  console.warn = originalConsoleWarn;
  console.log = originalConsoleLog;
});

// Global test timeout
jest.setTimeout(30000);

// Mock timers for tests that need them
beforeEach(() => {
  jest.clearAllTimers();
});

// Clean up after each test
afterEach(() => {
  jest.clearAllMocks();
  jest.restoreAllMocks();
});

// Global error handler for unhandled promise rejections
process.on('unhandledRejection', (reason, promise) => {
  console.error('Unhandled Rejection at:', promise, 'reason:', reason);
});

// Extend Jest matchers if needed
expect.extend({
  toBeValidMCPResponse(received) {
    const pass = 
      received &&
      typeof received === 'object' &&
      received.jsonrpc === '2.0' &&
      received.id !== undefined &&
      (received.result !== undefined || received.error !== undefined);

    if (pass) {
      return {
        message: () => `expected ${JSON.stringify(received)} not to be a valid MCP response`,
        pass: true,
      };
    } else {
      return {
        message: () => `expected ${JSON.stringify(received)} to be a valid MCP response`,
        pass: false,
      };
    }
  },
});

// Type declaration for custom matcher
declare global {
  namespace jest {
    interface Matchers<R> {
      toBeValidMCPResponse(): R;
    }
  }
}

```

--------------------------------------------------------------------------------
/src/config/index.ts:
--------------------------------------------------------------------------------

```typescript
import 'dotenv/config';
import { z } from 'zod';

// Configuration schema validation
const ConfigSchema = z.object({
  // API Configuration
  geminiApiKey: z.string().min(1, 'GEMINI_API_KEY is required'),

  // Server Configuration
  logLevel: z.enum(['error', 'warn', 'info', 'debug']).default('info'),
  enableMetrics: z.boolean().default(false),

  // Rate Limiting
  rateLimitEnabled: z.boolean().default(true),
  rateLimitRequests: z.number().default(100),
  rateLimitWindow: z.number().default(60000), // 1 minute

  // Timeouts
  requestTimeout: z.number().default(30000), // 30 seconds

  // Development
  isDevelopment: z.boolean().default(false)
});

export type Config = z.infer<typeof ConfigSchema>;

/**
 * Load and validate configuration from environment variables
 */
export function loadConfig(): Config {
  const rawConfig = {
    geminiApiKey: process.env.GEMINI_API_KEY,
    logLevel: process.env.LOG_LEVEL,
    enableMetrics: process.env.ENABLE_METRICS === 'true',
    rateLimitEnabled: process.env.RATE_LIMIT_ENABLED !== 'false',
    rateLimitRequests: process.env.RATE_LIMIT_REQUESTS
      ? parseInt(process.env.RATE_LIMIT_REQUESTS, 10)
      : undefined,
    rateLimitWindow: process.env.RATE_LIMIT_WINDOW
      ? parseInt(process.env.RATE_LIMIT_WINDOW, 10)
      : undefined,
    requestTimeout: process.env.REQUEST_TIMEOUT
      ? parseInt(process.env.REQUEST_TIMEOUT, 10)
      : undefined,
    isDevelopment: process.env.NODE_ENV === 'development'
  };

  try {
    return ConfigSchema.parse(rawConfig);
  } catch (error) {
    if (error instanceof z.ZodError) {
      const issues = error.issues
        .map(issue => `${issue.path.join('.')}: ${issue.message}`)
        .join('\n');
      throw new Error(`Configuration validation failed:\n${issues}`);
    }
    throw error;
  }
}

// Export singleton config instance
export const config = loadConfig();

```

--------------------------------------------------------------------------------
/src/utils/logger.ts:
--------------------------------------------------------------------------------

```typescript
import { config } from '../config/index.js';

export enum LogLevel {
  ERROR = 0,
  WARN = 1,
  INFO = 2,
  DEBUG = 3
}

const LOG_LEVEL_MAP: Record<string, LogLevel> = {
  error: LogLevel.ERROR,
  warn: LogLevel.WARN,
  info: LogLevel.INFO,
  debug: LogLevel.DEBUG
};

class Logger {
  private currentLevel: LogLevel;

  constructor() {
    this.currentLevel = LOG_LEVEL_MAP[config.logLevel] ?? LogLevel.INFO;
  }

  private formatMessage(level: string, message: string, meta?: any): string {
    const timestamp = new Date().toISOString();
    const metaStr = meta ? ` ${JSON.stringify(meta)}` : '';
    return `[${timestamp}] ${level.padEnd(5)} ${message}${metaStr}`;
  }

  private log(level: LogLevel, levelName: string, message: string, meta?: any): void {
    if (level <= this.currentLevel) {
      const formattedMessage = this.formatMessage(levelName, message, meta);

      // Use stderr for logging to avoid interfering with MCP protocol on stdout
      if (level === LogLevel.ERROR) {
        console.error(formattedMessage);
      } else {
        console.error(formattedMessage);
      }
    }
  }

  error(message: string, meta?: any): void {
    this.log(LogLevel.ERROR, '❌ ERROR', message, meta);
  }

  warn(message: string, meta?: any): void {
    this.log(LogLevel.WARN, '⚠️  WARN', message, meta);
  }

  info(message: string, meta?: any): void {
    this.log(LogLevel.INFO, 'ℹ️  INFO', message, meta);
  }

  debug(message: string, meta?: any): void {
    this.log(LogLevel.DEBUG, '🐛 DEBUG', message, meta);
  }

  // Convenience methods with emojis for better UX
  startup(message: string, meta?: any): void {
    this.info(`🚀 ${message}`, meta);
  }

  success(message: string, meta?: any): void {
    this.info(`✅ ${message}`, meta);
  }

  request(message: string, meta?: any): void {
    this.debug(`📨 ${message}`, meta);
  }

  response(message: string, meta?: any): void {
    this.debug(`📤 ${message}`, meta);
  }

  api(message: string, meta?: any): void {
    this.debug(`🤖 ${message}`, meta);
  }

  security(message: string, meta?: any): void {
    this.warn(`🔒 ${message}`, meta);
  }
}

export const logger = new Logger();

```

--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------

```yaml
version: '3.8'

services:
  mcp-server-gemini:
    build:
      context: .
      dockerfile: Dockerfile
      target: production
    image: mcp-server-gemini:latest
    container_name: mcp-server-gemini
    restart: unless-stopped
    
    # Environment configuration
    environment:
      - NODE_ENV=production
      - LOG_LEVEL=info
      - RATE_LIMIT_ENABLED=true
      - RATE_LIMIT_REQUESTS=100
      - RATE_LIMIT_WINDOW=60000
      - REQUEST_TIMEOUT=30000
    
    # Load environment variables from file
    env_file:
      - .env
    
    # Resource limits
    deploy:
      resources:
        limits:
          memory: 512M
          cpus: '0.5'
        reservations:
          memory: 256M
          cpus: '0.25'
    
    # Health check
    healthcheck:
      test: ["CMD", "node", "-e", "console.log('Health check passed')"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 40s
    
    # Logging configuration
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "3"
    
    # Security options
    security_opt:
      - no-new-privileges:true
    
    # Read-only root filesystem (except for tmp)
    read_only: true
    tmpfs:
      - /tmp:noexec,nosuid,size=100m
    
    # Drop all capabilities and add only necessary ones
    cap_drop:
      - ALL
    cap_add:
      - SETGID
      - SETUID
    
    # Use non-root user
    user: "1001:1001"

  # Development service
  mcp-server-gemini-dev:
    build:
      context: .
      dockerfile: Dockerfile
      target: builder
    image: mcp-server-gemini:dev
    container_name: mcp-server-gemini-dev
    restart: "no"
    
    environment:
      - NODE_ENV=development
      - LOG_LEVEL=debug
      - RATE_LIMIT_ENABLED=false
      - REQUEST_TIMEOUT=60000
    
    env_file:
      - .env
    
    # Mount source code for development
    volumes:
      - ./src:/app/src:ro
      - ./package.json:/app/package.json:ro
      - ./tsconfig.json:/app/tsconfig.json:ro
    
    # Override command for development
    command: ["npm", "run", "dev"]
    
    profiles:
      - dev

# Networks
networks:
  default:
    name: mcp-server-gemini-network
    driver: bridge

# Volumes for persistent data (if needed)
volumes:
  logs:
    driver: local

```

--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------

```yaml
name: CI

on:
  push:
    branches: [ main, develop ]
  pull_request:
    branches: [ main, develop ]

jobs:
  test:
    runs-on: ubuntu-latest
    
    strategy:
      matrix:
        node-version: [16.x, 18.x, 20.x]
    
    steps:
    - name: Checkout code
      uses: actions/checkout@v4
    
    - name: Use Node.js ${{ matrix.node-version }}
      uses: actions/setup-node@v4
      with:
        node-version: ${{ matrix.node-version }}
        cache: 'npm'
    
    - name: Install dependencies
      run: npm ci
    
    - name: Run type checking
      run: npm run type-check
    
    - name: Run linting
      run: npm run lint
    
    - name: Run formatting check
      run: npm run format:check
    
    - name: Run unit tests
      run: npm test -- --testPathIgnorePatterns=integration
    
    - name: Run integration tests
      if: matrix.node-version == '20.x'
      env:
        GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
      run: npm run test:integration
    
    - name: Build project
      run: npm run build
    
    - name: Upload coverage to Codecov
      if: matrix.node-version == '20.x'
      uses: codecov/codecov-action@v3
      with:
        file: ./coverage/lcov.info
        flags: unittests
        name: codecov-umbrella

  security:
    runs-on: ubuntu-latest
    
    steps:
    - name: Checkout code
      uses: actions/checkout@v4
    
    - name: Use Node.js 20.x
      uses: actions/setup-node@v4
      with:
        node-version: 20.x
        cache: 'npm'
    
    - name: Install dependencies
      run: npm ci
    
    - name: Run security audit
      run: npm audit --audit-level=moderate
    
    - name: Check for vulnerabilities
      run: npx audit-ci --moderate

  build-and-test:
    runs-on: ${{ matrix.os }}
    
    strategy:
      matrix:
        os: [ubuntu-latest, windows-latest, macos-latest]
        node-version: [20.x]
    
    steps:
    - name: Checkout code
      uses: actions/checkout@v4
    
    - name: Use Node.js ${{ matrix.node-version }}
      uses: actions/setup-node@v4
      with:
        node-version: ${{ matrix.node-version }}
        cache: 'npm'
    
    - name: Install dependencies
      run: npm ci
    
    - name: Build project
      run: npm run build
    
    - name: Test built server
      run: |
        timeout 10s node dist/enhanced-stdio-server.js || true
      shell: bash
      env:
        GEMINI_API_KEY: test-key

```

--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------

```json
{
  "name": "mcp-server-gemini",
  "version": "1.0.0",
  "description": "A state-of-the-art Model Context Protocol (MCP) server that provides seamless integration with Google's Gemini AI models. This server enables Claude Desktop and other MCP-compatible clients to leverage the full power of Gemini's advanced AI capabilities.",
  "main": "dist/enhanced-stdio-server.js",
  "bin": {
    "mcp-server-gemini": "./dist/enhanced-stdio-server.js"
  },
  "engines": {
    "node": ">=16.0.0",
    "npm": ">=7.0.0"
  },
  "scripts": {
    "build": "npm run clean && tsc",
    "build:watch": "tsc --watch",
    "clean": "rimraf dist",
    "prepare": "npm run build",
    "start": "node dist/enhanced-stdio-server.js",
    "dev": "node --loader ts-node/esm src/enhanced-stdio-server.ts",
    "dev:watch": "nodemon --exec \"node --loader ts-node/esm src/enhanced-stdio-server.ts\" --ext ts --watch src/",
    "test": "jest",
    "test:watch": "jest --watch",
    "test:coverage": "jest --coverage",
    "test:integration": "jest --testPathPattern=integration",
    "lint": "eslint src/**/*.ts",
    "lint:fix": "eslint src/**/*.ts --fix",
    "format": "prettier --write src/**/*.ts",
    "format:check": "prettier --check src/**/*.ts",
    "type-check": "tsc --noEmit",
    "validate": "npm run type-check && npm run lint && npm run test",
    "prepack": "npm run validate && npm run build",
    "release": "npm run validate && npm run build && npm publish"
  },
  "type": "module",
  "dependencies": {
    "@google/genai": "^1.8.0",
    "dotenv": "^16.4.5",
    "zod": "^3.22.4"
  },
  "devDependencies": {
    "@types/jest": "^29.5.0",
    "@types/node": "^20.10.5",
    "@typescript-eslint/eslint-plugin": "^6.21.0",
    "@typescript-eslint/parser": "^6.21.0",
    "eslint": "^8.0.0",
    "eslint-config-prettier": "^10.1.8",
    "jest": "^29.5.0",
    "nodemon": "^3.0.2",
    "prettier": "^3.0.0",
    "rimraf": "^5.0.5",
    "ts-jest": "^29.1.0",
    "ts-node": "^10.9.2",
    "typescript": "^5.3.3"
  },
  "keywords": [
    "mcp",
    "model-context-protocol",
    "gemini",
    "google-gemini",
    "ai",
    "llm",
    "claude-desktop",
    "cursor",
    "windsurf",
    "typescript"
  ],
  "author": "Gurveer",
  "license": "MIT",
  "repository": {
    "type": "git",
    "url": "git+https://github.com/gurr-i/mcp-server-gemini-pro.git"
  },
  "bugs": {
    "url": "https://github.com/gurr-i/mcp-server-gemini/issues"
  },
  "homepage": "https://github.com/gurr-i/mcp-server-gemini#readme",
  "files": [
    "dist",
    "README.md",
    "LICENSE",
    "CHANGELOG.md"
  ]
}

```

--------------------------------------------------------------------------------
/Contribution-Guide.md:
--------------------------------------------------------------------------------

```markdown
# Contributing to Gemini MCP Server

Thank you for your interest in contributing to the Gemini MCP Server! This document provides guidelines for contributing to the project.

## Code of Conduct

This project follows a standard code of conduct. Please be respectful and constructive in all interactions.

## How to Contribute

### Reporting Issues

1. Check if the issue already exists in the [issue tracker](https://github.com/gurr-i/mcp-server-gemini/issues)
2. If not, create a new issue with:
   - Clear description of the problem
   - Steps to reproduce
   - Expected behavior
   - Actual behavior
   - Your environment (OS, MCP client, Node.js version)
   - Relevant logs or error messages

### Suggesting Enhancements

1. Check if the enhancement has already been suggested
2. Create a new issue with the `enhancement` label
3. Describe the feature and why it would be useful
4. Provide examples of how it would work

### Pull Requests

1. Fork the repository
2. Create a new branch: `git checkout -b feature/your-feature-name`
3. Make your changes
4. Write or update tests if applicable
5. Update documentation if needed
6. Commit your changes with clear commit messages
7. Push to your fork
8. Create a pull request

#### Pull Request Guidelines

- Keep PRs focused - one feature or fix per PR
- Follow the existing code style
- Update the README.md if you're adding new features
- Add tests for new functionality
- Make sure all tests pass: `npm test`
- Update type definitions if changing APIs

## Development Setup

```bash
# Clone your fork
git clone https://github.com/YOUR_USERNAME/mcp-server-gemini.git
cd mcp-server-gemini

# Install dependencies
npm install

# Run in development mode
npm run dev

# Run tests
npm test

# Build the project
npm run build

# Lint the code
npm run lint
```

## Code Style

- TypeScript with strict mode
- ESM modules
- Use async/await over callbacks
- Add JSDoc comments for public APIs
- Follow the existing patterns in the codebase

## Testing

- Write tests for new features
- Ensure existing tests pass
- Test with multiple MCP clients if possible
- Test error cases and edge conditions

## Documentation

- Update README.md for new features
- Add JSDoc comments for new functions
- Update USAGE_GUIDE.md if adding new tools
- Update PARAMETERS_REFERENCE.md for new parameters

## Release Process

Maintainers will:
1. Review and merge PRs
2. Update version in package.json
3. Update CHANGELOG.md
4. Create a new release on GitHub
5. Publish to npm if applicable

## Questions?

Feel free to open an issue for any questions about contributing!
```

--------------------------------------------------------------------------------
/tests/unit/config.test.ts:
--------------------------------------------------------------------------------

```typescript
import { loadConfig } from '../../src/config/index.js';

describe('Configuration', () => {
  const originalEnv = process.env;

  beforeEach(() => {
    jest.resetModules();
    process.env = { ...originalEnv };
  });

  afterAll(() => {
    process.env = originalEnv;
  });

  describe('loadConfig', () => {
    it('should load valid configuration', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      
      const config = loadConfig();
      
      expect(config.geminiApiKey).toBe('test-api-key');
      expect(config.logLevel).toBe('info');
      expect(config.rateLimitEnabled).toBe(true);
    });

    it('should throw error for missing API key', () => {
      delete process.env.GEMINI_API_KEY;
      
      expect(() => loadConfig()).toThrow('GEMINI_API_KEY is required');
    });

    it('should use custom log level', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      process.env.LOG_LEVEL = 'debug';
      
      const config = loadConfig();
      
      expect(config.logLevel).toBe('debug');
    });

    it('should parse numeric values correctly', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      process.env.RATE_LIMIT_REQUESTS = '200';
      process.env.RATE_LIMIT_WINDOW = '120000';
      process.env.REQUEST_TIMEOUT = '60000';
      
      const config = loadConfig();
      
      expect(config.rateLimitRequests).toBe(200);
      expect(config.rateLimitWindow).toBe(120000);
      expect(config.requestTimeout).toBe(60000);
    });

    it('should parse boolean values correctly', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      process.env.ENABLE_METRICS = 'true';
      process.env.RATE_LIMIT_ENABLED = 'false';
      
      const config = loadConfig();
      
      expect(config.enableMetrics).toBe(true);
      expect(config.rateLimitEnabled).toBe(false);
    });

    it('should detect development environment', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      process.env.NODE_ENV = 'development';
      
      const config = loadConfig();
      
      expect(config.isDevelopment).toBe(true);
    });

    it('should throw error for invalid log level', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      process.env.LOG_LEVEL = 'invalid';
      
      expect(() => loadConfig()).toThrow('Configuration validation failed');
    });

    it('should throw error for invalid numeric values', () => {
      process.env.GEMINI_API_KEY = 'test-api-key';
      process.env.RATE_LIMIT_REQUESTS = 'not-a-number';
      
      expect(() => loadConfig()).toThrow('Configuration validation failed');
    });
  });
});

```

--------------------------------------------------------------------------------
/src/utils/rateLimiter.ts:
--------------------------------------------------------------------------------

```typescript
import { config } from '../config/index.js';
import { RateLimitError } from './errors.js';
import { logger } from './logger.js';

interface RateLimitEntry {
  count: number;
  resetTime: number;
}

/**
 * Simple in-memory rate limiter using sliding window
 */
export class RateLimiter {
  private requests = new Map<string, RateLimitEntry>();
  private cleanupInterval: NodeJS.Timeout;

  constructor(
    private maxRequests: number = config.rateLimitRequests,
    private windowMs: number = config.rateLimitWindow
  ) {
    // Clean up expired entries every minute
    this.cleanupInterval = setInterval(() => {
      this.cleanup();
    }, 60000);
  }

  /**
   * Check if request is allowed for the given identifier
   */
  checkLimit(identifier: string = 'default'): void {
    if (!config.rateLimitEnabled) {
      return;
    }

    const now = Date.now();
    const entry = this.requests.get(identifier);

    if (!entry) {
      // First request for this identifier
      this.requests.set(identifier, {
        count: 1,
        resetTime: now + this.windowMs
      });
      return;
    }

    if (now >= entry.resetTime) {
      // Window has expired, reset
      entry.count = 1;
      entry.resetTime = now + this.windowMs;
      return;
    }

    if (entry.count >= this.maxRequests) {
      const resetIn = Math.ceil((entry.resetTime - now) / 1000);
      logger.security(`Rate limit exceeded for ${identifier}`, {
        count: entry.count,
        limit: this.maxRequests,
        resetIn
      });

      throw new RateLimitError(`Rate limit exceeded. Try again in ${resetIn} seconds.`);
    }

    entry.count++;
  }

  /**
   * Get current usage for identifier
   */
  getUsage(identifier: string = 'default'): { count: number; limit: number; resetTime: number } {
    const entry = this.requests.get(identifier);
    const now = Date.now();

    if (!entry || now >= entry.resetTime) {
      return {
        count: 0,
        limit: this.maxRequests,
        resetTime: now + this.windowMs
      };
    }

    return {
      count: entry.count,
      limit: this.maxRequests,
      resetTime: entry.resetTime
    };
  }

  /**
   * Clean up expired entries
   */
  private cleanup(): void {
    const now = Date.now();
    let cleaned = 0;

    for (const [identifier, entry] of this.requests.entries()) {
      if (now >= entry.resetTime) {
        this.requests.delete(identifier);
        cleaned++;
      }
    }

    if (cleaned > 0) {
      logger.debug(`Cleaned up ${cleaned} expired rate limit entries`);
    }
  }

  /**
   * Reset rate limit for identifier
   */
  reset(identifier: string = 'default'): void {
    this.requests.delete(identifier);
    logger.debug(`Reset rate limit for ${identifier}`);
  }

  /**
   * Destroy the rate limiter and clean up resources
   */
  destroy(): void {
    if (this.cleanupInterval) {
      clearInterval(this.cleanupInterval);
    }
    this.requests.clear();
  }
}

// Export singleton instance
export const rateLimiter = new RateLimiter();

```

--------------------------------------------------------------------------------
/src/utils/errors.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Custom error classes for the MCP Server
 */

export class MCPError extends Error {
  constructor(
    message: string,
    public code: number = -32603,
    public data?: any
  ) {
    super(message);
    this.name = 'MCPError';
  }

  toMCPResponse(id: any) {
    return {
      jsonrpc: '2.0',
      id,
      error: {
        code: this.code,
        message: this.message,
        ...(this.data && { data: this.data })
      }
    };
  }
}

export class ValidationError extends MCPError {
  constructor(message: string, data?: any) {
    super(message, -32602, data);
    this.name = 'ValidationError';
  }
}

export class AuthenticationError extends MCPError {
  constructor(message: string = 'Invalid API key') {
    super(message, -32001);
    this.name = 'AuthenticationError';
  }
}

export class RateLimitError extends MCPError {
  constructor(message: string = 'Rate limit exceeded') {
    super(message, -32002);
    this.name = 'RateLimitError';
  }
}

export class TimeoutError extends MCPError {
  constructor(message: string = 'Request timeout') {
    super(message, -32003);
    this.name = 'TimeoutError';
  }
}

export class GeminiAPIError extends MCPError {
  constructor(
    message: string,
    public originalError?: any
  ) {
    super(message, -32603);
    this.name = 'GeminiAPIError';
    this.data = originalError;
  }
}

/**
 * Error handler utility functions
 */
export class ErrorHandler {
  static handleGeminiError(error: any): GeminiAPIError {
    if (error?.error) {
      const geminiError = error.error;
      let message = 'Gemini API error';

      if (geminiError.message) {
        message = geminiError.message;
      } else if (geminiError.status) {
        message = `Gemini API error: ${geminiError.status}`;
      }

      return new GeminiAPIError(message, geminiError);
    }

    return new GeminiAPIError('Unknown Gemini API error', error);
  }

  static isRetryableError(error: any): boolean {
    if (error instanceof GeminiAPIError) {
      const status = error.originalError?.status;
      // Retry on server errors and rate limits
      return (
        status === 'UNAVAILABLE' ||
        status === 'RESOURCE_EXHAUSTED' ||
        status === 'INTERNAL' ||
        error.originalError?.code === 503 ||
        error.originalError?.code === 429
      );
    }
    return false;
  }

  static getRetryDelay(attempt: number): number {
    // Exponential backoff: 1s, 2s, 4s, 8s, 16s
    return Math.min(1000 * Math.pow(2, attempt), 16000);
  }
}

/**
 * Async retry utility with exponential backoff
 */
export async function withRetry<T>(
  operation: () => Promise<T>,
  maxAttempts: number = 3,
  baseDelay: number = 1000
): Promise<T> {
  let lastError: any;

  for (let attempt = 0; attempt < maxAttempts; attempt++) {
    try {
      return await operation();
    } catch (error) {
      lastError = error;

      if (attempt === maxAttempts - 1 || !ErrorHandler.isRetryableError(error)) {
        throw error;
      }

      const delay = baseDelay * Math.pow(2, attempt);
      await new Promise(resolve => setTimeout(resolve, delay));
    }
  }

  throw lastError;
}

```

--------------------------------------------------------------------------------
/src/types.ts:
--------------------------------------------------------------------------------

```typescript
// Base MCP Protocol Types
export interface MCPRequest {
  jsonrpc: '2.0';
  id: string | number;
  method: string;
  params?: any;
}

export interface MCPResponse {
  jsonrpc: '2.0';
  id: string | number;
  result?: any;
  error?: {
    code: number;
    message: string;
    data?: any;
  };
}

export interface MCPError {
  code: number;
  message: string;
  data?: any;
}

// Connection Management
export interface ConnectionState {
  connectedAt: Date;
  lastMessageAt: Date;
  initialized: boolean;
  activeRequests: Set<string | number>;
  ip: string;
}

// Notification Types
export interface NotificationMessage {
  jsonrpc: '2.0';
  method: string;
  params?: any;
}

export interface ErrorNotification extends NotificationMessage {
  method: 'notifications/error';
  params: {
    code: number;
    message: string;
    data?: any;
  };
}

export interface ProgressParams {
  progressToken: string | number;
  progress: number;
  total?: number;
}

export interface ProgressNotification extends NotificationMessage {
  method: 'notifications/progress';
  params: ProgressParams;
}

// Request Types
export interface GenerateRequest extends MCPRequest {
  method: 'generate';
  params: {
    prompt: string;
    temperature?: number;
    maxTokens?: number;
    stopSequences?: string[];
  };
}

export interface GenerateResponse extends MCPResponse {
  result: {
    type: 'completion';
    content: string;
    metadata: {
      model: string;
      provider: string;
      temperature?: number;
      maxTokens?: number;
      stopSequences?: string[];
    };
  };
}

export interface StreamRequest extends MCPRequest {
  method: 'stream';
  params: {
    prompt: string;
    temperature?: number;
    maxTokens?: number;
    stopSequences?: string[];
  };
}

export interface StreamResponse extends MCPResponse {
  result: {
    type: 'stream';
    content: string;
    done: boolean;
  };
}

export interface CancelRequest extends MCPRequest {
  method: 'cancel';
  params: {
    requestId: string | number;
  };
}

export interface ConfigureRequest extends MCPRequest {
  method: 'configure';
  params: {
    configuration: {
      model?: string;
      temperature?: number;
      maxTokens?: number;
      stopSequences?: string[];
      timeout?: number;
    };
  };
}

// Server Configuration
export interface ServerInfo {
  name: string;
  version: string;
}

export interface ServerCapabilities {
  experimental?: Record<string, any>;
  prompts?: {
    listChanged?: boolean;
  };
  resources?: {
    subscribe?: boolean;
    listChanged?: boolean;
  };
  tools?: {
    listChanged?: boolean;
  };
  logging?: Record<string, any>;
}

export interface InitializeResult {
  protocolVersion: string;
  serverInfo: ServerInfo;
  capabilities: ServerCapabilities;
}

// Lifecycle Types
export interface ShutdownRequest extends MCPRequest {
  method: 'shutdown';
}

export interface ExitNotification extends NotificationMessage {
  method: 'exit';
}

// Resource and Prompt References (for interfaces)
export interface ResourceReference {
  type: 'resource';
  uri: string;
}

export interface PromptReference {
  type: 'prompt';
  name: string;
}

export interface CompletionArgument {
  name: string;
  value: string;
}

export interface Completion {
  values: Array<{
    value: string;
    description?: string;
  }>;
  total?: number;
  hasMore?: boolean;
}

```

--------------------------------------------------------------------------------
/src/utils/validation.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import { ValidationError } from './errors.js';

/**
 * Common validation schemas
 */
export const CommonSchemas = {
  // Gemini model names
  geminiModel: z.enum([
    'gemini-2.5-pro',
    'gemini-2.5-flash',
    'gemini-2.5-flash-lite',
    'gemini-2.0-flash',
    'gemini-2.0-flash-lite',
    'gemini-2.0-pro-experimental',
    'gemini-1.5-pro',
    'gemini-1.5-flash'
  ]),

  // Temperature range
  temperature: z.number().min(0).max(2),

  // Token limits
  maxTokens: z.number().min(1).max(8192),

  // Top-k and top-p parameters
  topK: z.number().min(1).max(100),
  topP: z.number().min(0).max(1),

  // Conversation ID
  conversationId: z.string().min(1).max(100),

  // JSON schema string
  jsonSchema: z.string().refine(val => {
    try {
      JSON.parse(val);
      return true;
    } catch {
      return false;
    }
  }, 'Must be valid JSON'),

  // Safety settings string
  safetySettings: z.string().refine(val => {
    try {
      const parsed = JSON.parse(val);
      return Array.isArray(parsed);
    } catch {
      return false;
    }
  }, 'Must be valid JSON array'),

  // Base64 image data
  base64Image: z
    .string()
    .regex(/^data:image\/(png|jpeg|jpg|gif|webp);base64,/, 'Must be valid base64 image data'),

  // URL validation
  imageUrl: z.string().url('Must be a valid URL')
};

/**
 * Tool parameter validation schemas
 */
export const ToolSchemas = {
  generateText: z.object({
    prompt: z.string().min(1, 'Prompt is required'),
    model: CommonSchemas.geminiModel.optional(),
    systemInstruction: z.string().optional(),
    temperature: CommonSchemas.temperature.optional(),
    maxTokens: CommonSchemas.maxTokens.optional(),
    topK: CommonSchemas.topK.optional(),
    topP: CommonSchemas.topP.optional(),
    jsonMode: z.boolean().optional(),
    jsonSchema: CommonSchemas.jsonSchema.optional(),
    grounding: z.boolean().optional(),
    safetySettings: CommonSchemas.safetySettings.optional(),
    conversationId: CommonSchemas.conversationId.optional()
  }),

  analyzeImage: z
    .object({
      prompt: z.string().min(1, 'Prompt is required'),
      imageUrl: CommonSchemas.imageUrl.optional(),
      imageBase64: CommonSchemas.base64Image.optional(),
      model: z.enum(['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash']).optional()
    })
    .refine(
      data => data.imageUrl || data.imageBase64,
      'Either imageUrl or imageBase64 must be provided'
    ),

  countTokens: z.object({
    text: z.string().min(1, 'Text is required'),
    model: CommonSchemas.geminiModel.optional()
  }),

  listModels: z.object({
    filter: z.enum(['all', 'thinking', 'vision', 'grounding', 'json_mode']).optional()
  }),

  embedText: z.object({
    text: z.string().min(1, 'Text is required'),
    model: z.enum(['text-embedding-004', 'text-multilingual-embedding-002']).optional()
  }),

  getHelp: z.object({
    topic: z
      .enum(['overview', 'tools', 'models', 'parameters', 'examples', 'quick-start'])
      .optional()
  })
};

/**
 * Validation utility class
 */
export class Validator {
  /**
   * Validate tool parameters
   */
  static validateToolParams<T>(schema: z.ZodSchema<T>, params: unknown): T {
    try {
      return schema.parse(params);
    } catch (error) {
      if (error instanceof z.ZodError) {
        const issues = error.issues
          .map(issue => `${issue.path.join('.')}: ${issue.message}`)
          .join(', ');
        throw new ValidationError(`Invalid parameters: ${issues}`);
      }
      throw error;
    }
  }

  /**
   * Sanitize string input
   */
  static sanitizeString(input: string, maxLength: number = 10000): string {
    if (typeof input !== 'string') {
      throw new ValidationError('Input must be a string');
    }

    if (input.length > maxLength) {
      throw new ValidationError(`Input too long (max ${maxLength} characters)`);
    }

    // Remove null bytes and other control characters except newlines and tabs
    // eslint-disable-next-line no-control-regex
    return input.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, '');
  }

  /**
   * Validate JSON string
   */
  static validateJSON(jsonString: string): any {
    try {
      return JSON.parse(jsonString);
    } catch (error) {
      throw new ValidationError('Invalid JSON format');
    }
  }

  /**
   * Validate MCP request structure
   */
  static validateMCPRequest(request: any): void {
    if (!request || typeof request !== 'object') {
      throw new ValidationError('Request must be an object');
    }

    if (request.jsonrpc !== '2.0') {
      throw new ValidationError('Invalid JSON-RPC version');
    }

    if (typeof request.method !== 'string') {
      throw new ValidationError('Method must be a string');
    }

    if (request.id === undefined || request.id === null) {
      throw new ValidationError('Request ID is required');
    }
  }
}

```

--------------------------------------------------------------------------------
/docs/configuration.md:
--------------------------------------------------------------------------------

```markdown
# Configuration Guide

## Environment Variables

The MCP Server Gemini can be configured using environment variables or a `.env` file.

### Required Configuration

| Variable | Required | Description | Example |
|----------|----------|-------------|---------|
| `GEMINI_API_KEY` | ✅ | Your Google AI Studio API key | `AIzaSyBCmjkUwSC6409pyCSq6qHd-XMelU` |

### Optional Configuration

| Variable | Default | Description | Example |
|----------|---------|-------------|---------|
| `LOG_LEVEL` | `info` | Logging level (error, warn, info, debug) | `debug` |
| `ENABLE_METRICS` | `false` | Enable performance metrics | `true` |
| `RATE_LIMIT_ENABLED` | `true` | Enable rate limiting | `false` |
| `RATE_LIMIT_REQUESTS` | `100` | Max requests per window | `200` |
| `RATE_LIMIT_WINDOW` | `60000` | Rate limit window in ms | `120000` |
| `REQUEST_TIMEOUT` | `30000` | Request timeout in ms | `60000` |
| `NODE_ENV` | `production` | Environment mode | `development` |

### Example .env File

```bash
# Required
GEMINI_API_KEY=your_api_key_here

# Optional - Logging
LOG_LEVEL=info
ENABLE_METRICS=false

# Optional - Rate Limiting
RATE_LIMIT_ENABLED=true
RATE_LIMIT_REQUESTS=100
RATE_LIMIT_WINDOW=60000

# Optional - Timeouts
REQUEST_TIMEOUT=30000

# Optional - Development
NODE_ENV=production
```

## MCP Client Configuration

### Claude Desktop

#### Configuration File Locations

| OS | Path |
|----|------|
| **macOS** | `~/Library/Application Support/Claude/claude_desktop_config.json` |
| **Windows** | `%APPDATA%\Claude\claude_desktop_config.json` |
| **Linux** | `~/.config/Claude/claude_desktop_config.json` |

#### Basic Configuration

```json
{
  "mcpServers": {
    "gemini": {
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here"
      }
    }
  }
}
```

#### Advanced Configuration

```json
{
  "mcpServers": {
    "gemini": {
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here",
        "LOG_LEVEL": "debug",
        "RATE_LIMIT_REQUESTS": "200",
        "REQUEST_TIMEOUT": "60000"
      }
    }
  }
}
```

#### Local Development Configuration

```json
{
  "mcpServers": {
    "gemini": {
      "command": "node",
      "args": ["/path/to/mcp-server-gemini/dist/enhanced-stdio-server.js"],
      "cwd": "/path/to/mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here",
        "NODE_ENV": "development",
        "LOG_LEVEL": "debug"
      }
    }
  }
}
```

### Cursor IDE

Add to your Cursor MCP configuration:

```json
{
  "mcpServers": {
    "gemini": {
      "type": "stdio",
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here"
      }
    }
  }
}
```

### Windsurf

Configure in Windsurf settings:

```json
{
  "mcp": {
    "servers": {
      "gemini": {
        "command": "mcp-server-gemini",
        "env": {
          "GEMINI_API_KEY": "your_api_key_here"
        }
      }
    }
  }
}
```

## Security Configuration

### API Key Management

#### Best Practices
1. **Never commit API keys** to version control
2. **Use environment variables** or secure secret management
3. **Rotate keys regularly** for production use
4. **Use different keys** for development and production

#### Secure Storage Options

**Option 1: Environment Variables**
```bash
export GEMINI_API_KEY="your_api_key_here"
```

**Option 2: .env File (Development)**
```bash
# .env
GEMINI_API_KEY=your_api_key_here
```

**Option 3: System Keychain (macOS)**
```bash
security add-generic-password -a "mcp-gemini" -s "gemini-api-key" -w "your_api_key_here"
```

**Option 4: Docker Secrets**
```yaml
# docker-compose.yml
services:
  mcp-server:
    image: mcp-server-gemini
    secrets:
      - gemini_api_key
secrets:
  gemini_api_key:
    external: true
```

### Rate Limiting Configuration

Configure rate limiting to protect your API quota:

```bash
# Conservative settings
RATE_LIMIT_ENABLED=true
RATE_LIMIT_REQUESTS=50
RATE_LIMIT_WINDOW=60000

# High-throughput settings
RATE_LIMIT_ENABLED=true
RATE_LIMIT_REQUESTS=500
RATE_LIMIT_WINDOW=60000

# Disable for development
RATE_LIMIT_ENABLED=false
```

## Performance Configuration

### Timeout Settings

```bash
# Conservative (stable connections)
REQUEST_TIMEOUT=30000

# Aggressive (fast networks)
REQUEST_TIMEOUT=10000

# Patient (complex requests)
REQUEST_TIMEOUT=120000
```

### Logging Configuration

```bash
# Production
LOG_LEVEL=warn
ENABLE_METRICS=true

# Development
LOG_LEVEL=debug
ENABLE_METRICS=false

# Debugging
LOG_LEVEL=debug
ENABLE_METRICS=true
```

## Troubleshooting Configuration

### Common Issues

#### 1. API Key Not Found
```bash
# Check if environment variable is set
echo $GEMINI_API_KEY

# Verify .env file exists and is readable
cat .env | grep GEMINI_API_KEY
```

#### 2. Permission Errors
```bash
# Check file permissions
ls -la .env

# Fix permissions
chmod 600 .env
```

#### 3. Rate Limiting Issues
```bash
# Temporarily disable rate limiting
RATE_LIMIT_ENABLED=false

# Increase limits
RATE_LIMIT_REQUESTS=1000
RATE_LIMIT_WINDOW=60000
```

### Debug Configuration

Enable debug mode for troubleshooting:

```json
{
  "mcpServers": {
    "gemini": {
      "command": "mcp-server-gemini",
      "env": {
        "GEMINI_API_KEY": "your_api_key_here",
        "LOG_LEVEL": "debug",
        "NODE_ENV": "development"
      }
    }
  }
}
```

## Validation

The server validates all configuration on startup. Invalid configuration will result in clear error messages:

```
Configuration validation failed:
geminiApiKey: GEMINI_API_KEY is required
rateLimitRequests: Expected number, received string
```

## Configuration Schema

The server uses Zod for configuration validation. See `src/config/index.ts` for the complete schema definition.

## Environment-Specific Configurations

### Development
```bash
NODE_ENV=development
LOG_LEVEL=debug
RATE_LIMIT_ENABLED=false
REQUEST_TIMEOUT=60000
```

### Production
```bash
NODE_ENV=production
LOG_LEVEL=warn
RATE_LIMIT_ENABLED=true
RATE_LIMIT_REQUESTS=100
REQUEST_TIMEOUT=30000
ENABLE_METRICS=true
```

### Testing
```bash
NODE_ENV=test
LOG_LEVEL=error
RATE_LIMIT_ENABLED=false
REQUEST_TIMEOUT=10000
```

```

--------------------------------------------------------------------------------
/tests/integration/gemini-api.test.ts:
--------------------------------------------------------------------------------

```typescript
import { GoogleGenAI } from '@google/genai';
import { config } from '../../src/config/index.js';

// Skip integration tests if no API key is provided
const describeIf = (condition: boolean) => condition ? describe : describe.skip;

describeIf(!!process.env.GEMINI_API_KEY)('Gemini API Integration', () => {
  let genAI: GoogleGenAI;

  beforeAll(() => {
    genAI = new GoogleGenAI({ apiKey: config.geminiApiKey });
  });

  describe('Text Generation', () => {
    it('should generate text with gemini-2.5-flash', async () => {
      const result = await genAI.models.generateContent({
        model: 'gemini-2.5-flash',
        contents: [{
          parts: [{ text: 'Say hello in exactly 3 words.' }],
          role: 'user'
        }]
      });

      expect(result).toBeDefined();
      expect(result.candidates).toBeDefined();
      expect(result.candidates!.length).toBeGreaterThan(0);
      expect(result.candidates![0].content?.parts?.[0]?.text).toBeDefined();
    }, 30000);

    it('should generate text with system instruction', async () => {
      const result = await genAI.models.generateContent({
        model: 'gemini-2.5-flash',
        systemInstruction: {
          parts: [{ text: 'You are a helpful assistant that always responds with exactly one word.' }]
        },
        contents: [{
          parts: [{ text: 'What is the capital of France?' }],
          role: 'user'
        }]
      });

      expect(result).toBeDefined();
      expect(result.candidates).toBeDefined();
      expect(result.candidates!.length).toBeGreaterThan(0);
      
      const responseText = result.candidates![0].content?.parts?.[0]?.text;
      expect(responseText).toBeDefined();
      expect(responseText!.trim().split(/\s+/).length).toBeLessThanOrEqual(2); // Allow for some flexibility
    }, 30000);

    it('should generate JSON output', async () => {
      const result = await genAI.models.generateContent({
        model: 'gemini-2.5-flash',
        generationConfig: {
          responseMimeType: 'application/json',
          responseSchema: {
            type: 'object',
            properties: {
              answer: { type: 'string' },
              confidence: { type: 'number' }
            },
            required: ['answer', 'confidence']
          }
        },
        contents: [{
          parts: [{ text: 'What is 2+2? Respond with your answer and confidence level.' }],
          role: 'user'
        }]
      });

      expect(result).toBeDefined();
      expect(result.candidates).toBeDefined();
      
      const responseText = result.candidates![0].content?.parts?.[0]?.text;
      expect(responseText).toBeDefined();
      
      // Should be valid JSON
      const jsonResponse = JSON.parse(responseText!);
      expect(jsonResponse.answer).toBeDefined();
      expect(jsonResponse.confidence).toBeDefined();
      expect(typeof jsonResponse.confidence).toBe('number');
    }, 30000);
  });

  describe('Token Counting', () => {
    it('should count tokens for text', async () => {
      const result = await genAI.models.countTokens({
        model: 'gemini-2.5-flash',
        contents: 'This is a test message for token counting.'
      });

      expect(result).toBeDefined();
      expect(result.totalTokens).toBeGreaterThan(0);
      expect(typeof result.totalTokens).toBe('number');
    }, 10000);

    it('should count tokens for longer text', async () => {
      const longText = 'This is a longer test message. '.repeat(100);
      const result = await genAI.models.countTokens({
        model: 'gemini-2.5-flash',
        contents: longText
      });

      expect(result).toBeDefined();
      expect(result.totalTokens).toBeGreaterThan(100); // Should be significantly more tokens
    }, 10000);
  });

  describe('Model Listing', () => {
    it('should list available models', async () => {
      const result = await genAI.models.list();

      expect(result).toBeDefined();
      expect(result.models).toBeDefined();
      expect(Array.isArray(result.models)).toBe(true);
      expect(result.models!.length).toBeGreaterThan(0);

      // Check that we have some expected models
      const modelNames = result.models!.map(model => model.name);
      expect(modelNames.some(name => name?.includes('gemini'))).toBe(true);
    }, 10000);
  });

  describe('Embeddings', () => {
    it('should generate embeddings', async () => {
      const result = await genAI.models.embedContent({
        model: 'text-embedding-004',
        contents: 'This is a test text for embedding generation.'
      });

      expect(result).toBeDefined();
      expect(result.embeddings).toBeDefined();
      expect(Array.isArray(result.embeddings)).toBe(true);
      expect(result.embeddings!.length).toBeGreaterThan(0);
      
      const embedding = result.embeddings![0];
      expect(embedding.values).toBeDefined();
      expect(Array.isArray(embedding.values)).toBe(true);
      expect(embedding.values!.length).toBeGreaterThan(0);
      
      // Check that values are numbers
      embedding.values!.forEach(value => {
        expect(typeof value).toBe('number');
      });
    }, 15000);
  });

  describe('Error Handling', () => {
    it('should handle invalid model name', async () => {
      await expect(
        genAI.models.generateContent({
          model: 'invalid-model-name',
          contents: [{
            parts: [{ text: 'Test' }],
            role: 'user'
          }]
        })
      ).rejects.toThrow();
    }, 10000);

    it('should handle empty content', async () => {
      await expect(
        genAI.models.generateContent({
          model: 'gemini-2.5-flash',
          contents: [{
            parts: [{ text: '' }],
            role: 'user'
          }]
        })
      ).rejects.toThrow();
    }, 10000);
  });

  describe('Rate Limiting', () => {
    it('should handle multiple concurrent requests', async () => {
      const requests = Array.from({ length: 5 }, (_, i) =>
        genAI.models.generateContent({
          model: 'gemini-2.5-flash',
          contents: [{
            parts: [{ text: `Test request ${i + 1}` }],
            role: 'user'
          }]
        })
      );

      const results = await Promise.allSettled(requests);
      
      // At least some requests should succeed
      const successful = results.filter(result => result.status === 'fulfilled');
      expect(successful.length).toBeGreaterThan(0);
      
      // Check that successful results have the expected structure
      successful.forEach(result => {
        if (result.status === 'fulfilled') {
          expect(result.value.candidates).toBeDefined();
          expect(result.value.candidates!.length).toBeGreaterThan(0);
        }
      });
    }, 60000);
  });
});

```

--------------------------------------------------------------------------------
/docs/api.md:
--------------------------------------------------------------------------------

```markdown
# API Documentation

## Overview

The MCP Server Gemini provides 6 powerful tools for interacting with Google's Gemini AI models through the Model Context Protocol.

## Tools

### 1. generate_text

Generate text using Gemini models with advanced features.

#### Parameters

| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `prompt` | string | ✅ | - | The text prompt to send to Gemini |
| `model` | string | ❌ | `gemini-2.5-flash` | Gemini model to use |
| `systemInstruction` | string | ❌ | - | System instruction to guide behavior |
| `temperature` | number | ❌ | `0.7` | Creativity level (0-2) |
| `maxTokens` | number | ❌ | `2048` | Maximum tokens to generate |
| `topK` | number | ❌ | `40` | Top-k sampling parameter |
| `topP` | number | ❌ | `0.95` | Top-p (nucleus) sampling |
| `jsonMode` | boolean | ❌ | `false` | Enable structured JSON output |
| `jsonSchema` | string | ❌ | - | JSON schema for validation (when jsonMode=true) |
| `grounding` | boolean | ❌ | `false` | Enable Google Search grounding |
| `safetySettings` | string | ❌ | - | Safety settings as JSON string |
| `conversationId` | string | ❌ | - | ID for conversation context |

#### Example Usage

```javascript
// Basic text generation
{
  "prompt": "Explain quantum computing in simple terms",
  "model": "gemini-2.5-flash",
  "temperature": 0.7
}

// JSON mode with schema
{
  "prompt": "Extract key information from this text: ...",
  "jsonMode": true,
  "jsonSchema": "{\"type\":\"object\",\"properties\":{\"summary\":{\"type\":\"string\"},\"keyPoints\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}}}"
}

// With grounding for current information
{
  "prompt": "What are the latest developments in AI?",
  "grounding": true,
  "model": "gemini-2.5-pro"
}
```

### 2. analyze_image

Analyze images using Gemini's vision capabilities.

#### Parameters

| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `prompt` | string | ✅ | - | Question or instruction about the image |
| `imageUrl` | string | ❌* | - | URL of the image to analyze |
| `imageBase64` | string | ❌* | - | Base64-encoded image data |
| `model` | string | ❌ | `gemini-2.5-flash` | Vision-capable model |

*Either `imageUrl` or `imageBase64` must be provided.

#### Example Usage

```javascript
// Analyze image from URL
{
  "prompt": "What's in this image?",
  "imageUrl": "https://example.com/image.jpg",
  "model": "gemini-2.5-pro"
}

// Analyze base64 image
{
  "prompt": "Describe the technical diagram",
  "imageBase64": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAA..."
}
```

### 3. count_tokens

Count tokens for cost estimation and planning.

#### Parameters

| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `text` | string | ✅ | - | Text to count tokens for |
| `model` | string | ❌ | `gemini-2.5-flash` | Model to use for counting |

#### Example Usage

```javascript
{
  "text": "This is a sample text to count tokens for cost estimation.",
  "model": "gemini-2.5-pro"
}
```

### 4. list_models

List all available Gemini models and their capabilities.

#### Parameters

| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `filter` | string | ❌ | `all` | Filter models by capability |

#### Filter Options
- `all` - All available models
- `thinking` - Models with thinking capabilities
- `vision` - Models with vision support
- `grounding` - Models with Google Search grounding
- `json_mode` - Models supporting JSON mode

#### Example Usage

```javascript
// List all models
{
  "filter": "all"
}

// List only thinking models
{
  "filter": "thinking"
}
```

### 5. embed_text

Generate text embeddings using Gemini embedding models.

#### Parameters

| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `text` | string | ✅ | - | Text to generate embeddings for |
| `model` | string | ❌ | `text-embedding-004` | Embedding model to use |

#### Available Embedding Models
- `text-embedding-004` - Latest embedding model
- `text-multilingual-embedding-002` - Multilingual support

#### Example Usage

```javascript
{
  "text": "This is a sample text for embedding generation.",
  "model": "text-embedding-004"
}
```

### 6. get_help

Get help and usage information for the server.

#### Parameters

| Parameter | Type | Required | Default | Description |
|-----------|------|----------|---------|-------------|
| `topic` | string | ❌ | `overview` | Help topic to get information about |

#### Available Topics
- `overview` - General overview and quick start
- `tools` - Detailed tool information
- `models` - Model selection guide
- `parameters` - Parameter explanations
- `examples` - Usage examples
- `quick-start` - Quick start guide

#### Example Usage

```javascript
// Get overview
{
  "topic": "overview"
}

// Get tool details
{
  "topic": "tools"
}
```

## Response Format

All tools return responses in the standard MCP format:

```javascript
{
  "jsonrpc": "2.0",
  "id": "request-id",
  "result": {
    "content": [
      {
        "type": "text",
        "text": "Response content here"
      }
    ],
    "metadata": {
      // Additional metadata
    }
  }
}
```

## Error Handling

Errors are returned in standard MCP error format:

```javascript
{
  "jsonrpc": "2.0",
  "id": "request-id",
  "error": {
    "code": -32603,
    "message": "Error description",
    "data": {
      // Additional error details
    }
  }
}
```

### Common Error Codes

| Code | Description |
|------|-------------|
| `-32602` | Invalid parameters |
| `-32603` | Internal error |
| `-32001` | Authentication error |
| `-32002` | Rate limit exceeded |
| `-32003` | Request timeout |

## Rate Limiting

The server implements rate limiting to protect against abuse:

- **Default**: 100 requests per minute
- **Configurable**: Set via environment variables
- **Per-client**: Rate limits are applied per client connection

## Best Practices

### Model Selection
- Use `gemini-2.5-flash` for general purposes
- Use `gemini-2.5-pro` for complex reasoning
- Use `gemini-2.5-flash-lite` for high-throughput tasks

### Parameter Optimization
- Lower temperature (0.1-0.3) for factual content
- Higher temperature (0.8-1.2) for creative content
- Use `maxTokens` to control response length and costs

### Error Handling
- Implement retry logic for transient errors
- Handle rate limiting gracefully
- Validate parameters before sending requests

### Performance
- Use conversation IDs to maintain context
- Cache embeddings when possible
- Monitor token usage for cost optimization

```

--------------------------------------------------------------------------------
/tests/unit/errors.test.ts:
--------------------------------------------------------------------------------

```typescript
import {
  MCPError,
  ValidationError,
  AuthenticationError,
  RateLimitError,
  TimeoutError,
  GeminiAPIError,
  ErrorHandler,
  withRetry
} from '../../src/utils/errors.js';

describe('Error Classes', () => {
  describe('MCPError', () => {
    it('should create error with default code', () => {
      const error = new MCPError('Test error');

      expect(error.message).toBe('Test error');
      expect(error.code).toBe(-32603);
      expect(error.name).toBe('MCPError');
    });

    it('should create error with custom code and data', () => {
      const error = new MCPError('Test error', -32602, { field: 'value' });

      expect(error.code).toBe(-32602);
      expect(error.data).toEqual({ field: 'value' });
    });

    it('should convert to MCP response format', () => {
      const error = new MCPError('Test error', -32602, { field: 'value' });
      const response = error.toMCPResponse('test-id');

      expect(response).toEqual({
        jsonrpc: '2.0',
        id: 'test-id',
        error: {
          code: -32602,
          message: 'Test error',
          data: { field: 'value' }
        }
      });
    });
  });

  describe('ValidationError', () => {
    it('should create validation error with correct code', () => {
      const error = new ValidationError('Invalid parameter');

      expect(error.message).toBe('Invalid parameter');
      expect(error.code).toBe(-32602);
      expect(error.name).toBe('ValidationError');
    });
  });

  describe('AuthenticationError', () => {
    it('should create authentication error with default message', () => {
      const error = new AuthenticationError();

      expect(error.message).toBe('Invalid API key');
      expect(error.code).toBe(-32001);
      expect(error.name).toBe('AuthenticationError');
    });

    it('should create authentication error with custom message', () => {
      const error = new AuthenticationError('Custom auth error');

      expect(error.message).toBe('Custom auth error');
    });
  });

  describe('RateLimitError', () => {
    it('should create rate limit error with default message', () => {
      const error = new RateLimitError();

      expect(error.message).toBe('Rate limit exceeded');
      expect(error.code).toBe(-32002);
      expect(error.name).toBe('RateLimitError');
    });
  });

  describe('TimeoutError', () => {
    it('should create timeout error with default message', () => {
      const error = new TimeoutError();

      expect(error.message).toBe('Request timeout');
      expect(error.code).toBe(-32003);
      expect(error.name).toBe('TimeoutError');
    });
  });

  describe('GeminiAPIError', () => {
    it('should create Gemini API error', () => {
      const originalError = { status: 'INVALID_ARGUMENT' };
      const error = new GeminiAPIError('API error', originalError);

      expect(error.message).toBe('API error');
      expect(error.code).toBe(-32603);
      expect(error.name).toBe('GeminiAPIError');
      expect(error.originalError).toBe(originalError);
      expect(error.data).toBe(originalError);
    });
  });
});

describe('ErrorHandler', () => {
  describe('handleGeminiError', () => {
    it('should handle error with message', () => {
      const geminiError = {
        error: {
          message: 'API key not valid'
        }
      };

      const result = ErrorHandler.handleGeminiError(geminiError);

      expect(result).toBeInstanceOf(GeminiAPIError);
      expect(result.message).toBe('API key not valid');
      expect(result.originalError).toBe(geminiError.error);
    });

    it('should handle error with status', () => {
      const geminiError = {
        error: {
          status: 'UNAVAILABLE'
        }
      };

      const result = ErrorHandler.handleGeminiError(geminiError);

      expect(result.message).toBe('Gemini API error: UNAVAILABLE');
    });

    it('should handle unknown error format', () => {
      const geminiError = { unknown: 'format' };

      const result = ErrorHandler.handleGeminiError(geminiError);

      expect(result.message).toBe('Unknown Gemini API error');
      expect(result.originalError).toBe(geminiError);
    });
  });

  describe('isRetryableError', () => {
    it('should identify retryable Gemini API errors', () => {
      const retryableErrors = [
        new GeminiAPIError('Error', { status: 'UNAVAILABLE' }),
        new GeminiAPIError('Error', { status: 'RESOURCE_EXHAUSTED' }),
        new GeminiAPIError('Error', { status: 'INTERNAL' }),
        new GeminiAPIError('Error', { code: 503 }),
        new GeminiAPIError('Error', { code: 429 })
      ];

      retryableErrors.forEach(error => {
        expect(ErrorHandler.isRetryableError(error)).toBe(true);
      });
    });

    it('should identify non-retryable errors', () => {
      const nonRetryableErrors = [
        new GeminiAPIError('Error', { status: 'INVALID_ARGUMENT' }),
        new GeminiAPIError('Error', { code: 400 }),
        new ValidationError('Invalid input'),
        new Error('Generic error')
      ];

      nonRetryableErrors.forEach(error => {
        expect(ErrorHandler.isRetryableError(error)).toBe(false);
      });
    });
  });

  describe('getRetryDelay', () => {
    it('should calculate exponential backoff delays', () => {
      expect(ErrorHandler.getRetryDelay(0)).toBe(1000);
      expect(ErrorHandler.getRetryDelay(1)).toBe(2000);
      expect(ErrorHandler.getRetryDelay(2)).toBe(4000);
      expect(ErrorHandler.getRetryDelay(3)).toBe(8000);
      expect(ErrorHandler.getRetryDelay(4)).toBe(16000);
    });

    it('should cap delay at maximum', () => {
      expect(ErrorHandler.getRetryDelay(10)).toBe(16000);
    });
  });
});

describe('withRetry', () => {
  beforeEach(() => {
    jest.clearAllMocks();
  });

  it('should succeed on first attempt', async () => {
    const operation = jest.fn().mockResolvedValue('success');

    const result = await withRetry(operation, 3);

    expect(result).toBe('success');
    expect(operation).toHaveBeenCalledTimes(1);
  });

  it('should retry on retryable error', async () => {
    const retryableError = new GeminiAPIError('Error', { status: 'UNAVAILABLE' });
    const operation = jest.fn()
      .mockRejectedValueOnce(retryableError)
      .mockRejectedValueOnce(retryableError)
      .mockResolvedValue('success');

    const result = await withRetry(operation, 3);

    expect(result).toBe('success');
    expect(operation).toHaveBeenCalledTimes(3);
  });

  it('should not retry on non-retryable error', async () => {
    const nonRetryableError = new ValidationError('Invalid input');
    const operation = jest.fn().mockRejectedValue(nonRetryableError);

    await expect(withRetry(operation, 3)).rejects.toThrow(ValidationError);
    expect(operation).toHaveBeenCalledTimes(1);
  });

  it('should throw last error after max attempts', async () => {
    const retryableError = new GeminiAPIError('Error', { status: 'UNAVAILABLE' });
    const operation = jest.fn().mockRejectedValue(retryableError);

    await expect(withRetry(operation, 3)).rejects.toThrow(GeminiAPIError);
    expect(operation).toHaveBeenCalledTimes(3);
  });

  it('should wait between retries', async () => {
    const retryableError = new GeminiAPIError('Error', { status: 'UNAVAILABLE' });
    const operation = jest.fn()
      .mockRejectedValueOnce(retryableError)
      .mockResolvedValue('success');

    const startTime = Date.now();
    await withRetry(operation, 3, 100);
    const endTime = Date.now();

    expect(endTime - startTime).toBeGreaterThanOrEqual(100);
    expect(operation).toHaveBeenCalledTimes(2);
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/validation.test.ts:
--------------------------------------------------------------------------------

```typescript
import { Validator, ToolSchemas, CommonSchemas } from '../../src/utils/validation.js';
import { ValidationError } from '../../src/utils/errors.js';

describe('Validation', () => {
  describe('CommonSchemas', () => {
    describe('geminiModel', () => {
      it('should accept valid Gemini models', () => {
        const validModels = [
          'gemini-2.5-pro',
          'gemini-2.5-flash',
          'gemini-2.5-flash-lite',
          'gemini-2.0-flash',
          'gemini-1.5-pro'
        ];

        validModels.forEach(model => {
          expect(() => CommonSchemas.geminiModel.parse(model)).not.toThrow();
        });
      });

      it('should reject invalid models', () => {
        const invalidModels = ['gpt-4', 'claude-3', 'invalid-model'];

        invalidModels.forEach(model => {
          expect(() => CommonSchemas.geminiModel.parse(model)).toThrow();
        });
      });
    });

    describe('temperature', () => {
      it('should accept valid temperature values', () => {
        const validTemperatures = [0, 0.5, 1.0, 1.5, 2.0];

        validTemperatures.forEach(temp => {
          expect(() => CommonSchemas.temperature.parse(temp)).not.toThrow();
        });
      });

      it('should reject invalid temperature values', () => {
        const invalidTemperatures = [-0.1, 2.1, 'not-a-number'];

        invalidTemperatures.forEach(temp => {
          expect(() => CommonSchemas.temperature.parse(temp)).toThrow();
        });
      });
    });

    describe('jsonSchema', () => {
      it('should accept valid JSON strings', () => {
        const validSchemas = [
          '{"type": "object"}',
          '{"type": "string", "enum": ["a", "b"]}',
          '[]'
        ];

        validSchemas.forEach(schema => {
          expect(() => CommonSchemas.jsonSchema.parse(schema)).not.toThrow();
        });
      });

      it('should reject invalid JSON strings', () => {
        const invalidSchemas = [
          '{invalid json}',
          'not json at all',
          '{"unclosed": '
        ];

        invalidSchemas.forEach(schema => {
          expect(() => CommonSchemas.jsonSchema.parse(schema)).toThrow();
        });
      });
    });
  });

  describe('ToolSchemas', () => {
    describe('generateText', () => {
      it('should accept valid parameters', () => {
        const validParams = {
          prompt: 'Test prompt',
          model: 'gemini-2.5-flash',
          temperature: 0.7,
          maxTokens: 1000
        };

        expect(() => ToolSchemas.generateText.parse(validParams)).not.toThrow();
      });

      it('should require prompt', () => {
        const invalidParams = {
          model: 'gemini-2.5-flash'
        };

        expect(() => ToolSchemas.generateText.parse(invalidParams)).toThrow();
      });

      it('should reject empty prompt', () => {
        const invalidParams = {
          prompt: ''
        };

        expect(() => ToolSchemas.generateText.parse(invalidParams)).toThrow();
      });
    });

    describe('analyzeImage', () => {
      it('should accept valid parameters with imageUrl', () => {
        const validParams = {
          prompt: 'What is in this image?',
          imageUrl: 'https://example.com/image.jpg'
        };

        expect(() => ToolSchemas.analyzeImage.parse(validParams)).not.toThrow();
      });

      it('should accept valid parameters with imageBase64', () => {
        const validParams = {
          prompt: 'What is in this image?',
          imageBase64: 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8/5+hHgAHggJ/PchI7wAAAABJRU5ErkJggg=='
        };

        expect(() => ToolSchemas.analyzeImage.parse(validParams)).not.toThrow();
      });

      it('should require either imageUrl or imageBase64', () => {
        const invalidParams = {
          prompt: 'What is in this image?'
        };

        expect(() => ToolSchemas.analyzeImage.parse(invalidParams)).toThrow();
      });

      it('should reject invalid imageUrl', () => {
        const invalidParams = {
          prompt: 'What is in this image?',
          imageUrl: 'not-a-url'
        };

        expect(() => ToolSchemas.analyzeImage.parse(invalidParams)).toThrow();
      });
    });
  });

  describe('Validator', () => {
    describe('validateToolParams', () => {
      it('should validate and return parsed parameters', () => {
        const params = {
          prompt: 'Test prompt',
          temperature: 0.7
        };

        const result = Validator.validateToolParams(ToolSchemas.generateText, params);

        expect(result.prompt).toBe('Test prompt');
        expect(result.temperature).toBe(0.7);
      });

      it('should throw ValidationError for invalid parameters', () => {
        const params = {
          prompt: '',
          temperature: 3.0
        };

        expect(() => {
          Validator.validateToolParams(ToolSchemas.generateText, params);
        }).toThrow(ValidationError);
      });
    });

    describe('sanitizeString', () => {
      it('should return clean string unchanged', () => {
        const input = 'This is a clean string with\nnewlines and\ttabs.';
        const result = Validator.sanitizeString(input);

        expect(result).toBe(input);
      });

      it('should remove control characters', () => {
        const input = 'String with\x00null\x01control\x1fcharacters';
        const result = Validator.sanitizeString(input);

        expect(result).toBe('String withnullcontrolcharacters');
      });

      it('should enforce maximum length', () => {
        const input = 'a'.repeat(100);

        expect(() => {
          Validator.sanitizeString(input, 50);
        }).toThrow(ValidationError);
      });

      it('should throw error for non-string input', () => {
        expect(() => {
          Validator.sanitizeString(123 as any);
        }).toThrow(ValidationError);
      });
    });

    describe('validateJSON', () => {
      it('should parse valid JSON', () => {
        const jsonString = '{"key": "value", "number": 42}';
        const result = Validator.validateJSON(jsonString);

        expect(result).toEqual({ key: 'value', number: 42 });
      });

      it('should throw ValidationError for invalid JSON', () => {
        const invalidJson = '{invalid json}';

        expect(() => {
          Validator.validateJSON(invalidJson);
        }).toThrow(ValidationError);
      });
    });

    describe('validateMCPRequest', () => {
      it('should accept valid MCP request', () => {
        const validRequest = {
          jsonrpc: '2.0',
          id: 1,
          method: 'tools/list'
        };

        expect(() => {
          Validator.validateMCPRequest(validRequest);
        }).not.toThrow();
      });

      it('should reject request without jsonrpc', () => {
        const invalidRequest = {
          id: 1,
          method: 'tools/list'
        };

        expect(() => {
          Validator.validateMCPRequest(invalidRequest);
        }).toThrow(ValidationError);
      });

      it('should reject request with wrong jsonrpc version', () => {
        const invalidRequest = {
          jsonrpc: '1.0',
          id: 1,
          method: 'tools/list'
        };

        expect(() => {
          Validator.validateMCPRequest(invalidRequest);
        }).toThrow(ValidationError);
      });

      it('should reject request without method', () => {
        const invalidRequest = {
          jsonrpc: '2.0',
          id: 1
        };

        expect(() => {
          Validator.validateMCPRequest(invalidRequest);
        }).toThrow(ValidationError);
      });

      it('should reject request without id', () => {
        const invalidRequest = {
          jsonrpc: '2.0',
          method: 'tools/list'
        };

        expect(() => {
          Validator.validateMCPRequest(invalidRequest);
        }).toThrow(ValidationError);
      });
    });
  });
});

```

--------------------------------------------------------------------------------
/src/enhanced-stdio-server.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node
import { GoogleGenAI } from '@google/genai';
import { createInterface } from 'readline';
import { MCPRequest, MCPResponse } from './types.js';
import { config } from './config/index.js';
import { logger } from './utils/logger.js';
import { rateLimiter } from './utils/rateLimiter.js';
import { MCPError, ValidationError } from './utils/errors.js';
import { Validator, ToolSchemas } from './utils/validation.js';

// Increase max buffer size for large images (10MB)
if (process.stdin.setEncoding) {
  process.stdin.setEncoding('utf8');
}

// Available Gemini models as of July 2025
const GEMINI_MODELS = {
  // Thinking models (2.5 series) - latest and most capable
  'gemini-2.5-pro': {
    description: 'Most capable thinking model, best for complex reasoning and coding',
    features: ['thinking', 'function_calling', 'json_mode', 'grounding', 'system_instructions'],
    contextWindow: 2000000, // 2M tokens
    thinking: true
  },
  'gemini-2.5-flash': {
    description: 'Fast thinking model with best price/performance ratio',
    features: ['thinking', 'function_calling', 'json_mode', 'grounding', 'system_instructions'],
    contextWindow: 1000000, // 1M tokens
    thinking: true
  },
  'gemini-2.5-flash-lite': {
    description: 'Ultra-fast, cost-efficient thinking model for high-throughput tasks',
    features: ['thinking', 'function_calling', 'json_mode', 'system_instructions'],
    contextWindow: 1000000,
    thinking: true
  },

  // 2.0 series
  'gemini-2.0-flash': {
    description: 'Fast, efficient model with 1M context window',
    features: ['function_calling', 'json_mode', 'grounding', 'system_instructions'],
    contextWindow: 1000000
  },
  'gemini-2.0-flash-lite': {
    description: 'Most cost-efficient model for simple tasks',
    features: ['function_calling', 'json_mode', 'system_instructions'],
    contextWindow: 1000000
  },
  'gemini-2.0-pro-experimental': {
    description: 'Experimental model with 2M context, excellent for coding',
    features: ['function_calling', 'json_mode', 'grounding', 'system_instructions'],
    contextWindow: 2000000
  },

  // Legacy models (for compatibility)
  'gemini-1.5-pro': {
    description: 'Previous generation pro model',
    features: ['function_calling', 'json_mode', 'system_instructions'],
    contextWindow: 2000000
  },
  'gemini-1.5-flash': {
    description: 'Previous generation fast model',
    features: ['function_calling', 'json_mode', 'system_instructions'],
    contextWindow: 1000000
  }
};

class EnhancedStdioMCPServer {
  private genAI: GoogleGenAI;
  private conversations: Map<string, any[]> = new Map();

  constructor(apiKey: string) {
    logger.startup('Connecting to Google Gemini API...');
    this.genAI = new GoogleGenAI({ apiKey });
    logger.success('Gemini API client initialized');
    this.setupStdioInterface();
    logger.success('Stdio interface configured');
  }

  private setupStdioInterface() {
    const rl = createInterface({
      input: process.stdin,
      output: process.stdout,
      terminal: false,
      // Increase max line length for large image data
      crlfDelay: Infinity
    });

    rl.on('line', line => {
      if (line.trim()) {
        try {
          const request: MCPRequest = JSON.parse(line);

          // Validate MCP request structure
          Validator.validateMCPRequest(request);

          // Apply rate limiting
          rateLimiter.checkLimit();

          logger.request(`Received request: ${request.method} (ID: ${request.id})`);
          this.handleRequest(request);
        } catch (error) {
          logger.error('Failed to parse or validate message:', error);

          // Send error response if we can determine the request ID
          try {
            const partialRequest = JSON.parse(line);
            if (partialRequest.id) {
              const errorResponse =
                error instanceof MCPError
                  ? error.toMCPResponse(partialRequest.id)
                  : new MCPError('Invalid request format').toMCPResponse(partialRequest.id);
              this.sendResponse(errorResponse);
            }
          } catch {
            // If we can't parse at all, just log the error
          }
        }
      }
    });

    process.stdin.on('error', err => {
      console.error('stdin error:', err);
    });
  }

  private async handleRequest(request: MCPRequest) {
    console.error('Handling request:', request.method);
    try {
      let response: MCPResponse;

      switch (request.method) {
        case 'initialize':
          response = {
            jsonrpc: '2.0',
            id: request.id,
            result: {
              protocolVersion: '2024-11-05',
              serverInfo: {
                name: 'mcp-server-gemini-enhanced',
                version: '4.1.0'
              },
              capabilities: {
                tools: {},
                resources: {},
                prompts: {}
              }
            }
          };
          break;

        case 'tools/list':
          response = {
            jsonrpc: '2.0',
            id: request.id,
            result: {
              tools: this.getAvailableTools()
            }
          };
          break;

        case 'tools/call':
          response = await this.handleToolCall(request);
          break;

        case 'resources/list':
          response = {
            jsonrpc: '2.0',
            id: request.id,
            result: {
              resources: this.getAvailableResources()
            }
          };
          break;

        case 'resources/read':
          response = await this.handleResourceRead(request);
          break;

        case 'prompts/list':
          response = {
            jsonrpc: '2.0',
            id: request.id,
            result: {
              prompts: this.getAvailablePrompts()
            }
          };
          break;

        default:
          if (!('id' in request)) {
            console.error(`Notification received: ${(request as any).method}`);
            return;
          }

          response = {
            jsonrpc: '2.0',
            id: request.id,
            error: {
              code: -32601,
              message: 'Method not found'
            }
          };
      }

      this.sendResponse(response);
    } catch (error) {
      const errorResponse: MCPResponse = {
        jsonrpc: '2.0',
        id: request.id,
        error: {
          code: -32603,
          message: error instanceof Error ? error.message : 'Internal error'
        }
      };
      this.sendResponse(errorResponse);
    }
  }

  private getAvailableTools() {
    return [
      {
        name: 'generate_text',
        description: 'Generate text using Google Gemini with advanced features',
        inputSchema: {
          type: 'object',
          properties: {
            prompt: {
              type: 'string',
              description: 'The prompt to send to Gemini'
            },
            model: {
              type: 'string',
              description: 'Specific Gemini model to use',
              enum: Object.keys(GEMINI_MODELS),
              default: 'gemini-2.5-flash'
            },
            systemInstruction: {
              type: 'string',
              description: 'System instruction to guide model behavior'
            },
            temperature: {
              type: 'number',
              description: 'Temperature for generation (0-2)',
              default: 0.7,
              minimum: 0,
              maximum: 2
            },
            maxTokens: {
              type: 'number',
              description: 'Maximum tokens to generate',
              default: 2048
            },
            topK: {
              type: 'number',
              description: 'Top-k sampling parameter',
              default: 40
            },
            topP: {
              type: 'number',
              description: 'Top-p (nucleus) sampling parameter',
              default: 0.95
            },
            jsonMode: {
              type: 'boolean',
              description: 'Enable JSON mode for structured output',
              default: false
            },
            jsonSchema: {
              type: 'string',
              description: 'JSON schema as a string for structured output (when jsonMode is true)'
            },
            grounding: {
              type: 'boolean',
              description: 'Enable Google Search grounding for up-to-date information',
              default: false
            },
            safetySettings: {
              type: 'string',
              description: 'Safety settings as JSON string for content filtering'
            },
            conversationId: {
              type: 'string',
              description: 'ID for maintaining conversation context'
            }
          },
          required: ['prompt']
        }
      },
      {
        name: 'analyze_image',
        description: 'Analyze images using Gemini vision capabilities',
        inputSchema: {
          type: 'object',
          properties: {
            prompt: {
              type: 'string',
              description: 'Question or instruction about the image'
            },
            imageUrl: {
              type: 'string',
              description: 'URL of the image to analyze'
            },
            imageBase64: {
              type: 'string',
              description: 'Base64-encoded image data (alternative to URL)'
            },
            model: {
              type: 'string',
              description: 'Vision-capable Gemini model',
              enum: ['gemini-2.5-pro', 'gemini-2.5-flash', 'gemini-2.0-flash'],
              default: 'gemini-2.5-flash'
            }
          },
          required: ['prompt']
        }
      },
      {
        name: 'count_tokens',
        description: 'Count tokens for a given text with a specific model',
        inputSchema: {
          type: 'object',
          properties: {
            text: {
              type: 'string',
              description: 'Text to count tokens for'
            },
            model: {
              type: 'string',
              description: 'Model to use for token counting',
              enum: Object.keys(GEMINI_MODELS),
              default: 'gemini-2.5-flash'
            }
          },
          required: ['text']
        }
      },
      {
        name: 'list_models',
        description: 'List all available Gemini models and their capabilities',
        inputSchema: {
          type: 'object',
          properties: {
            filter: {
              type: 'string',
              description: 'Filter models by capability',
              enum: ['all', 'thinking', 'vision', 'grounding', 'json_mode']
            }
          }
        }
      },
      {
        name: 'embed_text',
        description: 'Generate embeddings for text using Gemini embedding models',
        inputSchema: {
          type: 'object',
          properties: {
            text: {
              type: 'string',
              description: 'Text to generate embeddings for'
            },
            model: {
              type: 'string',
              description: 'Embedding model to use',
              enum: ['text-embedding-004', 'text-multilingual-embedding-002'],
              default: 'text-embedding-004'
            }
          },
          required: ['text']
        }
      },
      {
        name: 'get_help',
        description: 'Get help and usage information for the Gemini MCP server',
        inputSchema: {
          type: 'object',
          properties: {
            topic: {
              type: 'string',
              description: 'Help topic to get information about',
              enum: ['overview', 'tools', 'models', 'parameters', 'examples', 'quick-start'],
              default: 'overview'
            }
          }
        }
      }
    ];
  }

  private getAvailableResources() {
    return [
      {
        uri: 'gemini://models',
        name: 'Available Gemini Models',
        description: 'List of all available Gemini models and their capabilities',
        mimeType: 'application/json'
      },
      {
        uri: 'gemini://capabilities',
        name: 'API Capabilities',
        description: 'Detailed information about Gemini API capabilities',
        mimeType: 'text/markdown'
      },
      {
        uri: 'gemini://help/usage',
        name: 'Usage Guide',
        description: 'Complete guide on using all tools and features',
        mimeType: 'text/markdown'
      },
      {
        uri: 'gemini://help/parameters',
        name: 'Parameters Reference',
        description: 'Detailed documentation of all parameters',
        mimeType: 'text/markdown'
      },
      {
        uri: 'gemini://help/examples',
        name: 'Examples',
        description: 'Example usage patterns for common tasks',
        mimeType: 'text/markdown'
      }
    ];
  }

  private getAvailablePrompts() {
    return [
      {
        name: 'code_review',
        description: 'Comprehensive code review with Gemini 2.5 Pro',
        arguments: [
          {
            name: 'code',
            description: 'Code to review',
            required: true
          },
          {
            name: 'language',
            description: 'Programming language',
            required: false
          }
        ]
      },
      {
        name: 'explain_with_thinking',
        description: 'Deep explanation using Gemini 2.5 thinking capabilities',
        arguments: [
          {
            name: 'topic',
            description: 'Topic to explain',
            required: true
          },
          {
            name: 'level',
            description: 'Explanation level (beginner/intermediate/expert)',
            required: false
          }
        ]
      },
      {
        name: 'creative_writing',
        description: 'Creative writing with style control',
        arguments: [
          {
            name: 'prompt',
            description: 'Writing prompt',
            required: true
          },
          {
            name: 'style',
            description: 'Writing style',
            required: false
          },
          {
            name: 'length',
            description: 'Desired length',
            required: false
          }
        ]
      }
    ];
  }

  private async handleToolCall(request: MCPRequest): Promise<MCPResponse> {
    const { name, arguments: args } = request.params || {};

    switch (name) {
      case 'generate_text':
        return await this.generateText(request.id, args);

      case 'analyze_image':
        return await this.analyzeImage(request.id, args);

      case 'count_tokens':
        return await this.countTokens(request.id, args);

      case 'list_models':
        return this.listModels(request.id, args);

      case 'embed_text':
        return await this.embedText(request.id, args);

      case 'get_help':
        return this.getHelp(request.id, args);

      default:
        return {
          jsonrpc: '2.0',
          id: request.id,
          error: {
            code: -32601,
            message: `Unknown tool: ${name}`
          }
        };
    }
  }

  private async generateText(id: any, args: any): Promise<MCPResponse> {
    try {
      // Validate parameters
      const validatedArgs = Validator.validateToolParams(ToolSchemas.generateText, args);

      const model = validatedArgs.model || 'gemini-2.5-flash';
      logger.api(`Generating text with model: ${model}`);
      const modelInfo = GEMINI_MODELS[model as keyof typeof GEMINI_MODELS];

      if (!modelInfo) {
        throw new Error(`Unknown model: ${model}`);
      }

      // Build generation config
      const generationConfig: any = {
        temperature: validatedArgs.temperature || 0.7,
        maxOutputTokens: validatedArgs.maxTokens || 2048,
        topK: validatedArgs.topK || 40,
        topP: validatedArgs.topP || 0.95
      };

      // Add JSON mode if requested
      if (validatedArgs.jsonMode) {
        generationConfig.responseMimeType = 'application/json';
        if (validatedArgs.jsonSchema) {
          try {
            generationConfig.responseSchema = Validator.validateJSON(validatedArgs.jsonSchema);
          } catch (error) {
            logger.error('Invalid JSON schema provided:', error);
            throw new ValidationError('Invalid JSON schema format');
          }
        }
      }

      // Build the request
      const requestBody: any = {
        model,
        contents: [
          {
            parts: [
              {
                text: Validator.sanitizeString(validatedArgs.prompt)
              }
            ],
            role: 'user'
          }
        ],
        generationConfig
      };

      // Add system instruction if provided
      if (validatedArgs.systemInstruction) {
        requestBody.systemInstruction = {
          parts: [
            {
              text: Validator.sanitizeString(validatedArgs.systemInstruction)
            }
          ]
        };
      }

      // Add safety settings if provided
      if (args.safetySettings) {
        try {
          requestBody.safetySettings =
            typeof args.safetySettings === 'string'
              ? JSON.parse(args.safetySettings)
              : args.safetySettings;
        } catch (error) {
          console.error('Invalid safety settings JSON provided:', error);
        }
      }

      // Add grounding if requested and supported
      if (args.grounding && modelInfo.features.includes('grounding')) {
        requestBody.tools = [
          {
            googleSearch: {}
          }
        ];
      }

      // Handle conversation context
      if (args.conversationId) {
        const history = this.conversations.get(args.conversationId) || [];
        if (history.length > 0) {
          requestBody.contents = [...history, ...requestBody.contents];
        }
      }

      // Call the API using the new SDK format
      const result = await this.genAI.models.generateContent({
        model,
        ...requestBody
      });
      const text = result.text || '';

      // Update conversation history if needed
      if (args.conversationId) {
        const history = this.conversations.get(args.conversationId) || [];
        history.push(...requestBody.contents);
        history.push({
          parts: [
            {
              text
            }
          ],
          role: 'model'
        });
        this.conversations.set(args.conversationId, history);
      }

      return {
        jsonrpc: '2.0',
        id,
        result: {
          content: [
            {
              type: 'text',
              text
            }
          ],
          metadata: {
            model,
            tokensUsed: result.usageMetadata?.totalTokenCount,
            candidatesCount: result.candidates?.length || 1,
            finishReason: result.candidates?.[0]?.finishReason
          }
        }
      };
    } catch (error) {
      console.error('Error in generateText:', error);
      return {
        jsonrpc: '2.0',
        id,
        error: {
          code: -32603,
          message: error instanceof Error ? error.message : 'Internal error'
        }
      };
    }
  }

  private async analyzeImage(id: any, args: any): Promise<MCPResponse> {
    try {
      const model = args.model || 'gemini-2.5-flash';

      // Validate inputs
      if (!args.imageUrl && !args.imageBase64) {
        throw new Error('Either imageUrl or imageBase64 must be provided');
      }

      // Prepare image part
      let imagePart: any;
      if (args.imageUrl) {
        // For URL, we'd need to fetch and convert to base64
        // For now, we'll just pass the URL as instruction
        imagePart = {
          text: `[Image URL: ${args.imageUrl}]`
        };
      } else if (args.imageBase64) {
        // Log base64 data size for debugging
        console.error(`Image base64 length: ${args.imageBase64.length}`);

        // Extract MIME type and data
        const matches = args.imageBase64.match(/^data:(.+);base64,(.+)$/);
        if (matches) {
          console.error(`MIME type: ${matches[1]}, Data length: ${matches[2].length}`);
          imagePart = {
            inlineData: {
              mimeType: matches[1],
              data: matches[2]
            }
          };
        } else {
          // If no data URI format, assume raw base64
          console.error('Raw base64 data detected');
          imagePart = {
            inlineData: {
              mimeType: 'image/jpeg',
              data: args.imageBase64
            }
          };
        }
      }

      const result = await this.genAI.models.generateContent({
        model,
        contents: [
          {
            parts: [{ text: args.prompt }, imagePart],
            role: 'user'
          }
        ]
      });

      const text = result.text || '';

      return {
        jsonrpc: '2.0',
        id,
        result: {
          content: [
            {
              type: 'text',
              text
            }
          ]
        }
      };
    } catch (error) {
      console.error('Error in analyzeImage:', error);
      return {
        jsonrpc: '2.0',
        id,
        error: {
          code: -32603,
          message: `Image analysis failed: ${error instanceof Error ? error.message : 'Unknown error'}`
        }
      };
    }
  }

  private async countTokens(id: any, args: any): Promise<MCPResponse> {
    try {
      const model = args.model || 'gemini-2.5-flash';

      const result = await this.genAI.models.countTokens({
        model,
        contents: [
          {
            parts: [
              {
                text: args.text
              }
            ],
            role: 'user'
          }
        ]
      });

      return {
        jsonrpc: '2.0',
        id,
        result: {
          content: [
            {
              type: 'text',
              text: `Token count: ${result.totalTokens}`
            }
          ],
          metadata: {
            tokenCount: result.totalTokens,
            model
          }
        }
      };
    } catch (error) {
      return {
        jsonrpc: '2.0',
        id,
        error: {
          code: -32603,
          message: error instanceof Error ? error.message : 'Internal error'
        }
      };
    }
  }

  private listModels(id: any, args: any): MCPResponse {
    const filter = args?.filter || 'all';
    let models = Object.entries(GEMINI_MODELS);

    if (filter !== 'all') {
      models = models.filter(([_, info]) => {
        switch (filter) {
          case 'thinking':
            return 'thinking' in info && info.thinking === true;
          case 'vision':
            return info.features.includes('function_calling'); // All current models support vision
          case 'grounding':
            return info.features.includes('grounding');
          case 'json_mode':
            return info.features.includes('json_mode');
          default:
            return true;
        }
      });
    }

    const modelList = models.map(([name, info]) => ({
      name,
      ...info
    }));

    return {
      jsonrpc: '2.0',
      id,
      result: {
        content: [
          {
            type: 'text',
            text: JSON.stringify(modelList, null, 2)
          }
        ],
        metadata: {
          count: modelList.length,
          filter
        }
      }
    };
  }

  private async embedText(id: any, args: any): Promise<MCPResponse> {
    try {
      const model = args.model || 'text-embedding-004';

      const result = await this.genAI.models.embedContent({
        model,
        contents: args.text
      });

      return {
        jsonrpc: '2.0',
        id,
        result: {
          content: [
            {
              type: 'text',
              text: JSON.stringify({
                embedding: result.embeddings?.[0]?.values || [],
                model
              })
            }
          ],
          metadata: {
            model,
            dimensions: result.embeddings?.[0]?.values?.length || 0
          }
        }
      };
    } catch (error) {
      return {
        jsonrpc: '2.0',
        id,
        error: {
          code: -32603,
          message: error instanceof Error ? error.message : 'Internal error'
        }
      };
    }
  }

  private async handleResourceRead(request: MCPRequest): Promise<MCPResponse> {
    const uri = request.params?.uri;

    if (!uri) {
      return {
        jsonrpc: '2.0',
        id: request.id,
        error: {
          code: -32602,
          message: 'Missing required parameter: uri'
        }
      };
    }

    let content = '';
    let mimeType = 'text/plain';

    switch (uri) {
      case 'gemini://models':
        content = JSON.stringify(GEMINI_MODELS, null, 2);
        mimeType = 'application/json';
        break;

      case 'gemini://capabilities':
        content = `# Gemini API Capabilities

## Text Generation
- All models support advanced text generation
- System instructions for behavior control
- Temperature, topK, topP for output control
- Token limits vary by model (1M-2M)

## Thinking Models (2.5 Series)
- Step-by-step reasoning before responding
- Better accuracy for complex problems
- Ideal for coding, analysis, and problem-solving

## JSON Mode
- Structured output with schema validation
- Available on all models
- Ensures consistent response format

## Google Search Grounding
- Real-time web search integration
- Available on select models
- Perfect for current events and facts

## Vision Capabilities
- Image analysis and understanding
- Available on most models
- Supports URLs and base64 images

## Embeddings
- Semantic text embeddings
- Multiple models available
- Multilingual support

## Safety Settings
- Granular content filtering
- Customizable thresholds
- Per-category control

## Conversation Memory
- Context retention across messages
- Session-based conversations
- Ideal for multi-turn interactions`;
        mimeType = 'text/markdown';
        break;

      case 'gemini://help/usage':
        content = `${this.getHelpContent('overview')}\n\n${this.getHelpContent('tools')}`;
        mimeType = 'text/markdown';
        break;

      case 'gemini://help/parameters':
        content = this.getHelpContent('parameters');
        mimeType = 'text/markdown';
        break;

      case 'gemini://help/examples':
        content = this.getHelpContent('examples');
        mimeType = 'text/markdown';
        break;

      default:
        return {
          jsonrpc: '2.0',
          id: request.id,
          error: {
            code: -32602,
            message: `Unknown resource: ${uri}`
          }
        };
    }

    return {
      jsonrpc: '2.0',
      id: request.id,
      result: {
        contents: [
          {
            uri,
            mimeType,
            text: content
          }
        ]
      }
    };
  }

  private getHelpContent(topic: string): string {
    // Extract help content generation to a separate method
    switch (topic) {
      case 'overview':
        return `# Gemini MCP Server Help

Welcome to the Gemini MCP Server v4.1.0! This server provides access to Google's Gemini AI models through Claude Desktop.

## Available Tools
1. **generate_text** - Generate text with advanced features
2. **analyze_image** - Analyze images using vision models
3. **count_tokens** - Count tokens for cost estimation
4. **list_models** - List all available models
5. **embed_text** - Generate text embeddings
6. **get_help** - Get help on using this server

## Quick Start
- "Use Gemini to explain [topic]"
- "Analyze this image with Gemini"
- "List all Gemini models"
- "Get help on parameters"

## Key Features
- Latest Gemini 2.5 models with thinking capabilities
- JSON mode for structured output
- Google Search grounding for current information
- System instructions for behavior control
- Conversation memory for context
- Safety settings customization

Use "get help on tools" for detailed tool information.`;

      case 'tools':
        return `# Available Tools

## 1. generate_text
Generate text using Gemini models with advanced features.

**Parameters:**
- prompt (required): Your text prompt
- model: Choose from gemini-2.5-pro, gemini-2.5-flash, etc.
- temperature: 0-2 (default 0.7)
- maxTokens: Max output tokens (default 2048)
- systemInstruction: Guide model behavior
- jsonMode: Enable JSON output
- grounding: Enable Google Search
- conversationId: Maintain conversation context

**Example:** "Use Gemini 2.5 Pro to explain quantum computing"

## 2. analyze_image
Analyze images using vision-capable models.

**Parameters:**
- prompt (required): Question about the image
- imageUrl OR imageBase64 (required): Image source
- model: Vision-capable model (default gemini-2.5-flash)

**Example:** "Analyze this architecture diagram"

## 3. count_tokens
Count tokens for text with a specific model.

**Parameters:**
- text (required): Text to count
- model: Model for counting (default gemini-2.5-flash)

**Example:** "Count tokens for this paragraph"

## 4. list_models
List available models with optional filtering.

**Parameters:**
- filter: all, thinking, vision, grounding, json_mode

**Example:** "List models with thinking capability"

## 5. embed_text
Generate embeddings for semantic search.

**Parameters:**
- text (required): Text to embed
- model: text-embedding-004 or text-multilingual-embedding-002

**Example:** "Generate embeddings for similarity search"

## 6. get_help
Get help on using this server.

**Parameters:**
- topic: overview, tools, models, parameters, examples, quick-start

**Example:** "Get help on parameters"`;

      case 'parameters':
        return `# Parameter Reference

## generate_text Parameters

**Required:**
- prompt (string): Your text prompt

**Optional:**
- model (string): Model to use (default: gemini-2.5-flash)
- systemInstruction (string): System prompt for behavior
- temperature (0-2): Creativity level (default: 0.7)
- maxTokens (number): Max output tokens (default: 2048)
- topK (number): Top-k sampling (default: 40)
- topP (number): Nucleus sampling (default: 0.95)
- jsonMode (boolean): Enable JSON output
- jsonSchema (object): JSON schema for validation
- grounding (boolean): Enable Google Search
- conversationId (string): Conversation identifier
- safetySettings (array): Content filtering settings

## Temperature Guide
- 0.1-0.3: Precise, factual
- 0.5-0.8: Balanced (default 0.7)
- 1.0-1.5: Creative
- 1.5-2.0: Very creative

## JSON Mode Example
Enable jsonMode and provide jsonSchema:
{
  "type": "object",
  "properties": {
    "sentiment": {"type": "string"},
    "score": {"type": "number"}
  }
}

## Safety Settings
Categories: HARASSMENT, HATE_SPEECH, SEXUALLY_EXPLICIT, DANGEROUS_CONTENT
Thresholds: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE`;

      case 'examples':
        return `# Usage Examples

## Basic Text Generation
"Use Gemini to explain machine learning"

## With Specific Model
"Use Gemini 2.5 Pro to write a Python sorting function"

## With Temperature
"Use Gemini with temperature 1.5 to write a creative story"

## JSON Mode
"Use Gemini in JSON mode to analyze sentiment and return {sentiment, confidence, keywords}"

## With Grounding
"Use Gemini with grounding to research latest AI developments"

## System Instructions
"Use Gemini as a Python tutor to explain decorators"

## Conversation Context
"Start conversation 'chat-001' about web development"
"Continue chat-001 and ask about React hooks"

## Image Analysis
"Analyze this screenshot and describe the UI elements"

## Token Counting
"Count tokens for this document using gemini-2.5-pro"

## Complex Example
"Use Gemini 2.5 Pro to review this code with:
- System instruction: 'You are a security expert'
- Temperature: 0.3
- JSON mode with schema for findings
- Grounding for latest security practices"`;

      default:
        return 'Unknown help topic.';
    }
  }

  private getHelp(id: any, args: any): MCPResponse {
    const topic = args?.topic || 'overview';
    let helpContent = '';

    switch (topic) {
      case 'overview':
        helpContent = this.getHelpContent('overview');
        break;

      case 'tools':
        helpContent = this.getHelpContent('tools');
        break;

      case 'models':
        helpContent = `# Available Gemini Models

## Thinking Models (Latest - 2.5 Series)
**gemini-2.5-pro**
- Most capable, best for complex reasoning
- 2M token context window
- Features: thinking, JSON mode, grounding, system instructions

**gemini-2.5-flash** ⭐ Recommended
- Best balance of speed and capability
- 1M token context window
- Features: thinking, JSON mode, grounding, system instructions

**gemini-2.5-flash-lite**
- Ultra-fast, cost-efficient
- 1M token context window
- Features: thinking, JSON mode, system instructions

## Standard Models (2.0 Series)
**gemini-2.0-flash**
- Fast and efficient
- 1M token context window
- Features: JSON mode, grounding, system instructions

**gemini-2.0-flash-lite**
- Most cost-efficient
- 1M token context window
- Features: JSON mode, system instructions

**gemini-2.0-pro-experimental**
- Excellent for coding
- 2M token context window
- Features: JSON mode, grounding, system instructions

## Model Selection Guide
- Complex reasoning: gemini-2.5-pro
- General use: gemini-2.5-flash
- Fast responses: gemini-2.5-flash-lite
- Cost-sensitive: gemini-2.0-flash-lite
- Coding tasks: gemini-2.0-pro-experimental`;
        break;

      case 'parameters':
        helpContent = this.getHelpContent('parameters');
        break;

      case 'examples':
        helpContent = this.getHelpContent('examples');
        break;

      case 'quick-start':
        helpContent = `# Quick Start Guide

## 1. Basic Usage
Just ask naturally:
- "Use Gemini to [your request]"
- "Ask Gemini about [topic]"

## 2. Common Tasks

**Text Generation:**
"Use Gemini to write a function that sorts arrays"

**Image Analysis:**
"What's in this image?" [attach image]

**Model Info:**
"List all Gemini models"

**Token Counting:**
"Count tokens for my prompt"

## 3. Advanced Features

**JSON Output:**
"Use Gemini in JSON mode to extract key points"

**Current Information:**
"Use Gemini with grounding to get latest news"

**Conversations:**
"Start a chat with Gemini about Python"

## 4. Tips
- Use gemini-2.5-flash for most tasks
- Lower temperature for facts, higher for creativity
- Enable grounding for current information
- Use conversation IDs to maintain context

## Need More Help?
- "Get help on tools" - Detailed tool information
- "Get help on parameters" - All parameters explained
- "Get help on models" - Model selection guide`;
        break;

      default:
        helpContent =
          'Unknown help topic. Available topics: overview, tools, models, parameters, examples, quick-start';
    }

    return {
      jsonrpc: '2.0',
      id,
      result: {
        content: [
          {
            type: 'text',
            text: helpContent
          }
        ]
      }
    };
  }

  private sendResponse(response: MCPResponse | any) {
    const responseStr = JSON.stringify(response);
    logger.response(
      `Sending response for ID: ${response.id} ${response.error ? '(ERROR)' : '(SUCCESS)'}`
    );
    process.stdout.write(`${responseStr}\n`);
  }
}

// Start the server
try {
  logger.startup('Starting Gemini MCP Server...');
  logger.info('Environment configuration loaded');

  // Mask the API key for security (show only first 8 and last 4 characters)
  const maskedKey =
    config.geminiApiKey.length > 12
      ? `${config.geminiApiKey.substring(0, 8)}...${config.geminiApiKey.substring(config.geminiApiKey.length - 4)}`
      : '***masked***';

  logger.success(`API Key loaded: ${maskedKey}`);
  logger.info(`Log level: ${config.logLevel}`);
  logger.info(`Rate limiting: ${config.rateLimitEnabled ? 'enabled' : 'disabled'}`);

  if (config.rateLimitEnabled) {
    logger.info(`Rate limit: ${config.rateLimitRequests} requests per ${config.rateLimitWindow}ms`);
  }

  logger.startup('Initializing Gemini API connection...');

  new EnhancedStdioMCPServer(config.geminiApiKey);

  logger.success('Gemini MCP Server started successfully!');
  logger.info('Server is ready to receive MCP requests');
  logger.info('Listening on stdio interface...');
  logger.success('You can now use the server with Claude Desktop or other MCP clients');

  // Graceful shutdown handling
  process.on('SIGINT', () => {
    logger.info('Received SIGINT, shutting down gracefully...');
    rateLimiter.destroy();
    process.exit(0);
  });

  process.on('SIGTERM', () => {
    logger.info('Received SIGTERM, shutting down gracefully...');
    rateLimiter.destroy();
    process.exit(0);
  });
} catch (error) {
  logger.error('Failed to start server:', error);
  process.exit(1);
}

```