This is page 2 of 6. Use http://codebase.md/bsmi021/mcp-gemini-server?page={x} to view the full context.
# Directory Structure
```
├── .env.example
├── .eslintignore
├── .eslintrc.json
├── .gitignore
├── .prettierrc.json
├── Dockerfile
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── review-prompt.txt
├── scripts
│ ├── gemini-review.sh
│ └── run-with-health-check.sh
├── smithery.yaml
├── src
│ ├── config
│ │ └── ConfigurationManager.ts
│ ├── createServer.ts
│ ├── index.ts
│ ├── resources
│ │ └── system-prompt.md
│ ├── server.ts
│ ├── services
│ │ ├── ExampleService.ts
│ │ ├── gemini
│ │ │ ├── GeminiCacheService.ts
│ │ │ ├── GeminiChatService.ts
│ │ │ ├── GeminiContentService.ts
│ │ │ ├── GeminiGitDiffService.ts
│ │ │ ├── GeminiPromptTemplates.ts
│ │ │ ├── GeminiTypes.ts
│ │ │ ├── GeminiUrlContextService.ts
│ │ │ ├── GeminiValidationSchemas.ts
│ │ │ ├── GitHubApiService.ts
│ │ │ ├── GitHubUrlParser.ts
│ │ │ └── ModelMigrationService.ts
│ │ ├── GeminiService.ts
│ │ ├── index.ts
│ │ ├── mcp
│ │ │ ├── index.ts
│ │ │ └── McpClientService.ts
│ │ ├── ModelSelectionService.ts
│ │ ├── session
│ │ │ ├── index.ts
│ │ │ ├── InMemorySessionStore.ts
│ │ │ ├── SessionStore.ts
│ │ │ └── SQLiteSessionStore.ts
│ │ └── SessionService.ts
│ ├── tools
│ │ ├── exampleToolParams.ts
│ │ ├── geminiCacheParams.ts
│ │ ├── geminiCacheTool.ts
│ │ ├── geminiChatParams.ts
│ │ ├── geminiChatTool.ts
│ │ ├── geminiCodeReviewParams.ts
│ │ ├── geminiCodeReviewTool.ts
│ │ ├── geminiGenerateContentConsolidatedParams.ts
│ │ ├── geminiGenerateContentConsolidatedTool.ts
│ │ ├── geminiGenerateImageParams.ts
│ │ ├── geminiGenerateImageTool.ts
│ │ ├── geminiGenericParamSchemas.ts
│ │ ├── geminiRouteMessageParams.ts
│ │ ├── geminiRouteMessageTool.ts
│ │ ├── geminiUrlAnalysisTool.ts
│ │ ├── index.ts
│ │ ├── mcpClientParams.ts
│ │ ├── mcpClientTool.ts
│ │ ├── registration
│ │ │ ├── index.ts
│ │ │ ├── registerAllTools.ts
│ │ │ ├── ToolAdapter.ts
│ │ │ └── ToolRegistry.ts
│ │ ├── schemas
│ │ │ ├── BaseToolSchema.ts
│ │ │ ├── CommonSchemas.ts
│ │ │ ├── index.ts
│ │ │ ├── ToolSchemas.ts
│ │ │ └── writeToFileParams.ts
│ │ └── writeToFileTool.ts
│ ├── types
│ │ ├── exampleServiceTypes.ts
│ │ ├── geminiServiceTypes.ts
│ │ ├── gitdiff-parser.d.ts
│ │ ├── googleGenAI.d.ts
│ │ ├── googleGenAITypes.ts
│ │ ├── index.ts
│ │ ├── micromatch.d.ts
│ │ ├── modelcontextprotocol-sdk.d.ts
│ │ ├── node-fetch.d.ts
│ │ └── serverTypes.ts
│ └── utils
│ ├── errors.ts
│ ├── filePathSecurity.ts
│ ├── FileSecurityService.ts
│ ├── geminiErrors.ts
│ ├── healthCheck.ts
│ ├── index.ts
│ ├── logger.ts
│ ├── RetryService.ts
│ ├── ToolError.ts
│ └── UrlSecurityService.ts
├── tests
│ ├── .env.test.example
│ ├── basic-router.test.vitest.ts
│ ├── e2e
│ │ ├── clients
│ │ │ └── mcp-test-client.ts
│ │ ├── README.md
│ │ └── streamableHttpTransport.test.vitest.ts
│ ├── integration
│ │ ├── dummyMcpServerSse.ts
│ │ ├── dummyMcpServerStdio.ts
│ │ ├── geminiRouterIntegration.test.vitest.ts
│ │ ├── mcpClientIntegration.test.vitest.ts
│ │ ├── multiModelIntegration.test.vitest.ts
│ │ └── urlContextIntegration.test.vitest.ts
│ ├── tsconfig.test.json
│ ├── unit
│ │ ├── config
│ │ │ └── ConfigurationManager.multimodel.test.vitest.ts
│ │ ├── server
│ │ │ └── transportLogic.test.vitest.ts
│ │ ├── services
│ │ │ ├── gemini
│ │ │ │ ├── GeminiChatService.test.vitest.ts
│ │ │ │ ├── GeminiGitDiffService.test.vitest.ts
│ │ │ │ ├── geminiImageGeneration.test.vitest.ts
│ │ │ │ ├── GeminiPromptTemplates.test.vitest.ts
│ │ │ │ ├── GeminiUrlContextService.test.vitest.ts
│ │ │ │ ├── GeminiValidationSchemas.test.vitest.ts
│ │ │ │ ├── GitHubApiService.test.vitest.ts
│ │ │ │ ├── GitHubUrlParser.test.vitest.ts
│ │ │ │ └── ThinkingBudget.test.vitest.ts
│ │ │ ├── mcp
│ │ │ │ └── McpClientService.test.vitest.ts
│ │ │ ├── ModelSelectionService.test.vitest.ts
│ │ │ └── session
│ │ │ └── SQLiteSessionStore.test.vitest.ts
│ │ ├── tools
│ │ │ ├── geminiCacheTool.test.vitest.ts
│ │ │ ├── geminiChatTool.test.vitest.ts
│ │ │ ├── geminiCodeReviewTool.test.vitest.ts
│ │ │ ├── geminiGenerateContentConsolidatedTool.test.vitest.ts
│ │ │ ├── geminiGenerateImageTool.test.vitest.ts
│ │ │ ├── geminiRouteMessageTool.test.vitest.ts
│ │ │ ├── mcpClientTool.test.vitest.ts
│ │ │ ├── mcpToolsTests.test.vitest.ts
│ │ │ └── schemas
│ │ │ ├── BaseToolSchema.test.vitest.ts
│ │ │ ├── ToolParamSchemas.test.vitest.ts
│ │ │ └── ToolSchemas.test.vitest.ts
│ │ └── utils
│ │ ├── errors.test.vitest.ts
│ │ ├── FileSecurityService.test.vitest.ts
│ │ ├── FileSecurityService.vitest.ts
│ │ ├── FileSecurityServiceBasics.test.vitest.ts
│ │ ├── healthCheck.test.vitest.ts
│ │ ├── RetryService.test.vitest.ts
│ │ └── UrlSecurityService.test.vitest.ts
│ └── utils
│ ├── assertions.ts
│ ├── debug-error.ts
│ ├── env-check.ts
│ ├── environment.ts
│ ├── error-helpers.ts
│ ├── express-mocks.ts
│ ├── integration-types.ts
│ ├── mock-types.ts
│ ├── test-fixtures.ts
│ ├── test-generators.ts
│ ├── test-setup.ts
│ └── vitest.d.ts
├── tsconfig.json
├── tsconfig.test.json
├── vitest-globals.d.ts
├── vitest.config.ts
└── vitest.setup.ts
```
# Files
--------------------------------------------------------------------------------
/src/tools/registration/registerAllTools.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tool Registration - Central registration point for all tools
*
* This file uses the new ToolRegistry system to register all tools.
*/
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { ToolRegistry } from "./ToolRegistry.js";
import {
adaptServerOnlyTool,
adaptGeminiServiceTool,
adaptNewGeminiServiceToolObject,
adaptNewMcpClientServiceToolObject,
} from "./ToolAdapter.js";
import { logger } from "../../utils/logger.js";
import { GeminiService } from "../../services/GeminiService.js";
import { McpClientService } from "../../services/mcp/McpClientService.js";
// Import tool registration functions
import { geminiGenerateContentConsolidatedTool } from "../geminiGenerateContentConsolidatedTool.js";
import { geminiChatTool } from "../geminiChatTool.js";
import { geminiRouteMessageTool } from "../geminiRouteMessageTool.js";
// Image generation tools
import { geminiGenerateImageTool } from "../geminiGenerateImageTool.js";
// --- Cache Tools ---
import { geminiCacheTool } from "../geminiCacheTool.js";
// Code review tools
import {
geminiCodeReviewTool,
geminiCodeReviewStreamTool,
} from "../geminiCodeReviewTool.js";
import type { GeminiCodeReviewArgs } from "../geminiCodeReviewParams.js";
// URL Context tools
import { geminiUrlAnalysisTool } from "../geminiUrlAnalysisTool.js";
// MCP tools
import { mcpClientTool } from "../mcpClientTool.js";
// File utils tool
import { writeToFileTool } from "../writeToFileTool.js";
/**
* Register all tools with the MCP server using the new registry system
* @param server MCP server instance
* @returns McpClientService instance for managing connections
*/
export function registerAllTools(server: McpServer): McpClientService {
logger.info("Initializing services and tool registry...");
// Create service instances
const geminiService = new GeminiService();
const mcpClientService = new McpClientService();
// Create the tool registry
const registry = new ToolRegistry(geminiService, mcpClientService);
try {
// Register all tools with appropriate adapters
// Note: Example tool removed as per refactoring
// Content generation tools
registry.registerTool(
adaptGeminiServiceTool(
geminiGenerateContentConsolidatedTool,
"geminiGenerateContentConsolidatedTool"
)
);
// Chat tools
registry.registerTool(
adaptGeminiServiceTool(geminiChatTool, "geminiChatTool")
);
registry.registerTool(
adaptGeminiServiceTool(geminiRouteMessageTool, "geminiRouteMessageTool")
);
// Image generation tools
registry.registerTool(
adaptNewGeminiServiceToolObject(geminiGenerateImageTool)
);
// Cache management tools
registry.registerTool(
adaptGeminiServiceTool(geminiCacheTool, "geminiCacheTool")
);
// URL Context tools
registry.registerTool(
adaptGeminiServiceTool(geminiUrlAnalysisTool, "geminiUrlAnalysisTool")
);
// Code review tools
registry.registerTool(
adaptNewGeminiServiceToolObject(geminiCodeReviewTool)
);
// Note: geminiCodeReviewStreamTool returns an AsyncGenerator, not a Promise
// We need to wrap it to collect all chunks into a single response
registry.registerTool(
adaptNewGeminiServiceToolObject({
...geminiCodeReviewStreamTool,
execute: async (args: GeminiCodeReviewArgs, service: GeminiService) => {
const results = [];
const generator = await geminiCodeReviewStreamTool.execute(
args,
service
);
for await (const chunk of generator) {
results.push(chunk);
}
// Return the last chunk which should contain the complete result
return results[results.length - 1];
},
})
);
// MCP client tool
registry.registerTool(
adaptNewMcpClientServiceToolObject({
...mcpClientTool,
execute: mcpClientTool.execute, // No cast needed
})
);
// File utility tools
registry.registerTool(
adaptServerOnlyTool(writeToFileTool, "writeToFileTool")
);
// Register all tools with the server
registry.registerAllTools(server);
} catch (error) {
logger.error(
"Error registering tools:",
error instanceof Error ? error.message : String(error)
);
}
// Return the McpClientService instance for use in graceful shutdown
return mcpClientService;
}
```
--------------------------------------------------------------------------------
/src/services/gemini/GitHubUrlParser.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Parser for GitHub URLs to extract repository, branch, PR, and issue information.
* Supports various GitHub URL formats including repository, branch, PR, PR files, and issue URLs.
*/
export class GitHubUrlParser {
/**
* Repository URL format
* Example: https://github.com/bsmi021/mcp-gemini-server
*/
private static repoUrlPattern =
/^https?:\/\/github\.com\/([^/]+)\/([^/]+)\/?$/;
/**
* Branch URL format
* Example: https://github.com/bsmi021/mcp-gemini-server/tree/feature/add-reasoning-effort-option
*/
private static branchUrlPattern =
/^https?:\/\/github\.com\/([^/]+)\/([^/]+)\/tree\/([^/]+(?:\/[^/]+)*)\/?$/;
/**
* Pull request URL format
* Example: https://github.com/bsmi021/mcp-gemini-server/pull/2
*/
private static prUrlPattern =
/^https?:\/\/github\.com\/([^/]+)\/([^/]+)\/pull\/(\d+)\/?$/;
/**
* Pull request files URL format
* Example: https://github.com/bsmi021/mcp-gemini-server/pull/2/files
*/
private static prFilesUrlPattern =
/^https?:\/\/github\.com\/([^/]+)\/([^/]+)\/pull\/(\d+)\/files\/?$/;
/**
* Issue URL format
* Example: https://github.com/bsmi021/mcp-gemini-server/issues/5
*/
private static issueUrlPattern =
/^https?:\/\/github\.com\/([^/]+)\/([^/]+)\/issues\/(\d+)\/?$/;
/**
* Parse a GitHub URL to extract repository, branch, PR, or issue information
*
* @param url GitHub URL to parse
* @returns Object with parsed URL components or null if the URL is not a valid GitHub URL
*/
public static parse(url: string): ParsedGitHubUrl | null {
// Try matching repository URL
let match = url.match(this.repoUrlPattern);
if (match) {
return {
type: "repository",
owner: match[1],
repo: match[2],
};
}
// Try matching branch URL
match = url.match(this.branchUrlPattern);
if (match) {
return {
type: "branch",
owner: match[1],
repo: match[2],
branch: match[3],
};
}
// Try matching PR files URL first (more specific)
match = url.match(this.prFilesUrlPattern);
if (match) {
return {
type: "pr_files",
owner: match[1],
repo: match[2],
prNumber: match[3],
filesView: true,
};
}
// Try matching PR URL
match = url.match(this.prUrlPattern);
if (match) {
return {
type: "pull_request",
owner: match[1],
repo: match[2],
prNumber: match[3],
};
}
// Try matching issue URL
match = url.match(this.issueUrlPattern);
if (match) {
return {
type: "issue",
owner: match[1],
repo: match[2],
issueNumber: match[3],
};
}
// Not a recognized GitHub URL format
return null;
}
/**
* Validate if a URL is a recognized GitHub URL
*
* @param url URL to validate
* @returns True if the URL is a valid GitHub URL, false otherwise
*/
public static isValidGitHubUrl(url: string): boolean {
return this.parse(url) !== null;
}
/**
* Get the API endpoint for the GitHub URL
*
* @param url GitHub URL
* @returns API endpoint for the URL or null if not a valid GitHub URL
*/
public static getApiEndpoint(url: string): string | null {
const parsed = this.parse(url);
if (!parsed) {
return null;
}
const { owner, repo } = parsed;
switch (parsed.type) {
case "repository":
return `repos/${owner}/${repo}`;
case "branch":
return `repos/${owner}/${repo}/branches/${encodeURIComponent(parsed.branch!)}`;
case "pull_request":
case "pr_files":
return `repos/${owner}/${repo}/pulls/${parsed.prNumber}`;
case "issue":
return `repos/${owner}/${repo}/issues/${parsed.issueNumber}`;
default:
return null;
}
}
/**
* Extract repository information from a GitHub URL
*
* @param url GitHub URL
* @returns Object with owner and repo name or null if not a valid GitHub URL
*/
public static getRepositoryInfo(
url: string
): { owner: string; repo: string } | null {
const parsed = this.parse(url);
if (!parsed) {
return null;
}
return {
owner: parsed.owner,
repo: parsed.repo,
};
}
}
/**
* Interface for parsed GitHub URL components
*/
export interface ParsedGitHubUrl {
type: "repository" | "branch" | "pull_request" | "pr_files" | "issue";
owner: string;
repo: string;
branch?: string;
prNumber?: string;
issueNumber?: string;
filesView?: boolean;
}
```
--------------------------------------------------------------------------------
/src/tools/geminiRouteMessageParams.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from "zod";
// --- Reusable Schemas ---
// Based on src/tools/geminiGenerateContentParams.ts
const SafetySettingSchema = z
.object({
category: z
.enum([
"HARM_CATEGORY_UNSPECIFIED",
"HARM_CATEGORY_HATE_SPEECH",
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
"HARM_CATEGORY_HARASSMENT",
"HARM_CATEGORY_DANGEROUS_CONTENT",
])
.describe("Category of harmful content to apply safety settings for."),
threshold: z
.enum([
"HARM_BLOCK_THRESHOLD_UNSPECIFIED",
"BLOCK_LOW_AND_ABOVE",
"BLOCK_MEDIUM_AND_ABOVE",
"BLOCK_ONLY_HIGH",
"BLOCK_NONE",
])
.describe(
"Threshold for blocking harmful content. Higher thresholds block more content."
),
})
.describe(
"Setting for controlling content safety for a specific harm category."
);
// Schema for thinking configuration
const ThinkingConfigSchema = z
.object({
thinkingBudget: z
.number()
.int()
.min(0)
.max(24576)
.optional()
.describe(
"Controls the amount of reasoning the model performs. Range: 0-24576. Lower values provide faster responses, higher values improve complex reasoning."
),
reasoningEffort: z
.enum(["none", "low", "medium", "high"])
.optional()
.describe(
"Simplified control over model reasoning. Options: none (0 tokens), low (1K tokens), medium (8K tokens), high (24K tokens)."
),
})
.optional()
.describe("Optional configuration for controlling model reasoning.");
// Based on src/tools/geminiGenerateContentParams.ts
const GenerationConfigSchema = z
.object({
temperature: z
.number()
.min(0)
.max(1)
.optional()
.describe(
"Controls randomness. Lower values (~0.2) make output more deterministic, higher values (~0.8) make it more creative. Default varies by model."
),
topP: z
.number()
.min(0)
.max(1)
.optional()
.describe(
"Nucleus sampling parameter. The model considers only tokens with probability mass summing to this value. Default varies by model."
),
topK: z
.number()
.int()
.min(1)
.optional()
.describe(
"Top-k sampling parameter. The model considers the k most probable tokens. Default varies by model."
),
maxOutputTokens: z
.number()
.int()
.min(1)
.optional()
.describe("Maximum number of tokens to generate in the response."),
stopSequences: z
.array(z.string())
.optional()
.describe("Sequences where the API will stop generating further tokens."),
thinkingConfig: ThinkingConfigSchema,
})
.describe("Optional configuration for controlling the generation process.");
// System instruction schema for Content object
const SystemInstructionSchema = z
.object({
parts: z.array(
z.object({
text: z.string(),
})
),
})
.optional()
.describe("Optional. A system instruction to guide the model's behavior.");
// --- Tool Definition ---
export const GEMINI_ROUTE_MESSAGE_TOOL_NAME = "gemini_route_message";
export const GEMINI_ROUTE_MESSAGE_TOOL_DESCRIPTION = `Routes a message to the most appropriate model from a provided list based on message content. Returns the model's response along with which model was selected.`;
export const GEMINI_ROUTE_MESSAGE_PARAMS = {
message: z
.string()
.min(1)
.describe(
"Required. The text message to be routed to the most appropriate model."
),
models: z
.array(z.string().min(1))
.min(1)
.describe(
"Required. Array of model names to consider for routing (e.g., ['gemini-1.5-flash', 'gemini-1.5-pro']). The first model in the list will be used for routing decisions."
),
routingPrompt: z
.string()
.min(1)
.optional()
.describe(
"Optional. Custom prompt to use for routing decisions. If not provided, a default routing prompt will be used."
),
defaultModel: z
.string()
.min(1)
.optional()
.describe(
"Optional. Model to fall back to if routing fails. If not provided and routing fails, an error will be thrown."
),
generationConfig: GenerationConfigSchema.optional().describe(
"Optional. Generation configuration settings to apply to the selected model's response."
),
safetySettings: z
.array(SafetySettingSchema)
.optional()
.describe(
"Optional. Safety settings to apply to both routing and final response."
),
systemInstruction: z
.union([z.string(), SystemInstructionSchema])
.optional()
.describe(
"Optional. A system instruction to guide the model's behavior after routing."
),
};
// Type helper for arguments
export type GeminiRouteMessageArgs = z.infer<
z.ZodObject<typeof GEMINI_ROUTE_MESSAGE_PARAMS>
>;
```
--------------------------------------------------------------------------------
/src/tools/geminiCacheParams.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from "zod";
// Tool Name
export const GEMINI_CACHE_TOOL_NAME = "gemini_cache";
// Tool Description
export const GEMINI_CACHE_TOOL_DESCRIPTION = `
Manages cached content resources for use with the Gemini API. This consolidated tool supports five operations:
- create: Creates a new cached content resource for compatible models
- list: Lists cached content resources with pagination support
- get: Retrieves metadata for a specific cache
- update: Updates cache metadata (TTL and/or displayName)
- delete: Deletes a specific cache
NOTE: Caching is only supported for specific models (e.g., gemini-1.5-flash, gemini-1.5-pro).
`;
// Operation enum for cache actions
export const cacheOperationSchema = z
.enum(["create", "list", "get", "update", "delete"])
.describe("The cache operation to perform");
// Import necessary schemas from ToolSchemas and define inline schemas
const partSchema = z.object({
text: z.string().optional(),
inlineData: z
.object({
mimeType: z.string(),
data: z.string(),
})
.optional(),
});
const contentSchema = z.object({
role: z.enum(["user", "model", "system"]).optional(),
parts: z.array(partSchema),
});
// Function declaration schema (simplified from geminiChatParams)
const functionParameterTypeSchema = z
.enum(["OBJECT", "STRING", "NUMBER", "BOOLEAN", "ARRAY", "INTEGER"])
.describe("The data type of the function parameter.");
const baseFunctionParameterSchema = z.object({
type: functionParameterTypeSchema,
description: z.string().optional(),
enum: z.array(z.string()).optional(),
});
type FunctionParameterSchemaType = z.infer<
typeof baseFunctionParameterSchema
> & {
properties?: { [key: string]: FunctionParameterSchemaType };
required?: string[];
items?: FunctionParameterSchemaType;
};
const functionParameterSchema: z.ZodType<FunctionParameterSchemaType> =
baseFunctionParameterSchema.extend({
properties: z.lazy(() => z.record(functionParameterSchema).optional()),
required: z.lazy(() => z.array(z.string()).optional()),
items: z.lazy(() => functionParameterSchema.optional()),
});
const functionDeclarationSchema = z.object({
name: z.string().min(1),
description: z.string().min(1),
parameters: z.object({
type: z.literal("OBJECT"),
properties: z.record(functionParameterSchema),
required: z.array(z.string()).optional(),
}),
});
const toolSchema = z.object({
functionDeclarations: z.array(functionDeclarationSchema).optional(),
});
const toolConfigSchema = z
.object({
functionCallingConfig: z
.object({
mode: z.enum(["AUTO", "ANY", "NONE"]).optional(),
allowedFunctionNames: z.array(z.string()).optional(),
})
.optional(),
})
.optional();
// Main parameters schema with conditional fields based on operation
export const GEMINI_CACHE_PARAMS = {
operation: cacheOperationSchema,
// Fields for 'create' operation
model: z
.string()
.min(1)
.optional()
.describe(
"Optional for 'create'. The name/ID of the model compatible with caching (e.g., 'gemini-1.5-flash'). If omitted, uses server default."
),
contents: z
.array(contentSchema)
.min(1)
.optional()
.describe(
"Required for 'create'. The content to cache, matching the SDK's Content structure (an array of Parts)."
),
displayName: z
.string()
.min(1)
.max(100)
.optional()
.describe(
"Optional for 'create' and 'update'. A human-readable name for the cache."
),
systemInstruction: contentSchema
.optional()
.describe(
"Optional for 'create'. System instructions to associate with the cache."
),
ttl: z
.string()
.regex(
/^\d+(\.\d+)?s$/,
"TTL must be a duration string ending in 's' (e.g., '3600s', '7200.5s')"
)
.optional()
.describe(
"Optional for 'create' and 'update'. Time-to-live for the cache as a duration string (e.g., '3600s' for 1 hour). Max 48 hours."
),
tools: z
.array(toolSchema)
.optional()
.describe(
"Optional for 'create'. A list of tools (e.g., function declarations) to associate with the cache."
),
toolConfig: toolConfigSchema,
// Fields for 'list' operation
pageSize: z
.number()
.int()
.positive()
.max(1000)
.optional()
.describe(
"Optional for 'list'. The maximum number of caches to return per page. Defaults to 100, max 1000."
),
pageToken: z
.string()
.min(1)
.optional()
.describe(
"Optional for 'list'. A token received from a previous listCaches call to retrieve the next page."
),
// Fields for 'get', 'update', and 'delete' operations
cacheName: z
.string()
.min(1)
.optional()
.describe(
"Required for 'get', 'update', and 'delete'. The unique name/ID of the cache (e.g., 'cachedContents/abc123xyz')."
),
};
// Type helper
export type GeminiCacheArgs = z.infer<z.ZodObject<typeof GEMINI_CACHE_PARAMS>>;
```
--------------------------------------------------------------------------------
/tests/utils/environment.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Environment variable handling utilities for testing
*
* This module provides utilities for securely loading and managing environment
* variables for testing, particularly API keys and other sensitive configuration.
*/
import { existsSync } from "node:fs";
import { readFile } from "node:fs/promises";
import { resolve } from "node:path";
import { parse } from "dotenv";
import "dotenv/config";
/**
* Environment variables required for different types of tests
*/
export const REQUIRED_ENV_VARS = {
// Basic API tests only need the API key
BASIC: ["GOOGLE_GEMINI_API_KEY"],
// File tests need the API key and a secure base directory
FILE_TESTS: ["GOOGLE_GEMINI_API_KEY", "GEMINI_SAFE_FILE_BASE_DIR"],
// Chat tests need the API key
CHAT_TESTS: ["GOOGLE_GEMINI_API_KEY"],
// Image tests need the API key and optionally image configs
IMAGE_TESTS: ["GOOGLE_GEMINI_API_KEY"],
// Router tests need the API key and at least two models to test routing between
ROUTER_TESTS: ["GOOGLE_GEMINI_API_KEY", "GOOGLE_GEMINI_MODEL"],
// All test types in a single array for convenience
ALL: [
"GOOGLE_GEMINI_API_KEY",
"GOOGLE_GEMINI_MODEL",
"GEMINI_SAFE_FILE_BASE_DIR",
"GOOGLE_GEMINI_IMAGE_RESOLUTION",
"GOOGLE_GEMINI_MAX_IMAGE_SIZE_MB",
"GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS",
],
};
/**
* Load environment variables from a .env.test file if available
*
* @returns Promise that resolves when environment is loaded
*/
export async function loadTestEnv(): Promise<void> {
// Check for .env.test file in project root
const envPath = resolve(process.cwd(), ".env.test");
if (existsSync(envPath)) {
try {
// Read and parse the .env.test file
const envContents = await readFile(envPath, "utf8");
const envConfig = parse(envContents);
// Apply the variables to the current environment, but don't overwrite
// existing variables (which allows for command-line overrides)
for (const [key, value] of Object.entries(envConfig)) {
if (!process.env[key]) {
process.env[key] = value;
}
}
console.log(`Loaded test environment variables from ${envPath}`);
} catch (error) {
console.warn(
`Failed to load .env.test file: ${(error as Error).message}`
);
}
} else {
console.warn(
".env.test file not found, using existing environment variables"
);
}
}
/**
* Create a .env.test.example file with placeholders for required variables
*
* @returns Promise that resolves when the file is created
*/
export async function createEnvExample(): Promise<void> {
// Define the example content
const exampleContent = `# Test environment configuration
# Copy this file to .env.test and fill in your API keys and other settings
# Required: Google Gemini API key from Google AI Studio
GOOGLE_GEMINI_API_KEY=your_api_key_here
# Optional: Default model to use for tests (defaults to gemini-1.5-flash)
GOOGLE_GEMINI_MODEL=gemini-1.5-flash
# Optional: Base directory for file tests (defaults to current directory)
GEMINI_SAFE_FILE_BASE_DIR=${process.cwd()}
# Optional: Image generation settings
GOOGLE_GEMINI_IMAGE_RESOLUTION=1024x1024
GOOGLE_GEMINI_MAX_IMAGE_SIZE_MB=10
GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS=["image/jpeg","image/png","image/webp"]
`;
try {
// Write the example file
const examplePath = resolve(process.cwd(), ".env.test.example");
const fs = await import("node:fs/promises");
await fs.writeFile(examplePath, exampleContent, "utf8");
console.log(`Created environment example file at ${examplePath}`);
} catch (error) {
console.error(
`Failed to create .env.test.example file: ${(error as Error).message}`
);
}
}
/**
* Verifies that all required environment variables are present
*
* @param requiredVars - Array of environment variable names that are required
* @returns Object containing boolean success flag and array of missing variables
*/
export function verifyEnvVars(
requiredVars: string[] = REQUIRED_ENV_VARS.BASIC
): {
success: boolean;
missing: string[];
} {
const missing = requiredVars.filter((name) => !process.env[name]);
return {
success: missing.length === 0,
missing,
};
}
/**
* Creates a safe fallback value for a missing environment variable
*
* @param varName - Name of the environment variable
* @returns A safe fallback value appropriate for the variable type
*/
export function getFallbackValue(varName: string): string {
// Define fallback values for common variables
const fallbacks: Record<string, string> = {
GOOGLE_GEMINI_MODEL: "gemini-1.5-flash",
GOOGLE_GEMINI_IMAGE_RESOLUTION: "512x512",
GOOGLE_GEMINI_MAX_IMAGE_SIZE_MB: "5",
GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS: '["image/jpeg","image/png"]',
GEMINI_SAFE_FILE_BASE_DIR: process.cwd(),
};
return fallbacks[varName] || "";
}
/**
* Safely gets an environment variable with fallback
*
* @param varName - Name of the environment variable
* @param defaultValue - Default value if not found
* @returns The environment variable value or default/fallback
*/
export function getEnvVar(varName: string, defaultValue: string = ""): string {
return process.env[varName] || defaultValue || getFallbackValue(varName);
}
```
--------------------------------------------------------------------------------
/scripts/gemini-review.sh:
--------------------------------------------------------------------------------
```bash
#!/bin/bash
# gemini-review.sh
# CLI script for reviewing git diffs with Gemini
# Define colors for output
GREEN='\033[0;32m'
BLUE='\033[0;34m'
RED='\033[0;31m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
# Display progress spinner
function spinner {
local pid=$1
local delay=0.1
local spinstr='|/-\'
while [ "$(ps a | awk '{print $1}' | grep $pid)" ]; do
local temp=${spinstr#?}
printf " [%c] " "$spinstr"
local spinstr=$temp${spinstr%"$temp"}
sleep $delay
printf "\b\b\b\b\b\b"
done
printf " \b\b\b\b"
}
# Display help information
function show_help {
echo -e "${BLUE}Gemini Code Review CLI${NC}"
echo "Usage: gemini-review [options] [git-diff-args]"
echo ""
echo "Options:"
echo " --focus=FOCUS Focus of the review: security, performance, architecture, bugs, general (default)"
echo " --model=MODEL Gemini model to use (defaults to server configuration)"
echo " --reasoning=LEVEL Reasoning effort: none, low, medium (default), high"
echo " --exclude=PATTERN Files to exclude (glob pattern, can be repeated)"
echo " --help Show this help message"
echo ""
echo "Examples:"
echo " gemini-review # Review all uncommitted changes"
echo " gemini-review --focus=security HEAD~3.. # Security review of last 3 commits"
echo " gemini-review src/ # Review changes in src directory"
echo " gemini-review --reasoning=high # In-depth review with high reasoning effort"
echo ""
}
# Set default values
SERVER_URL="http://localhost:3000"
FOCUS="general"
MODEL="gemini-flash-2.0" # Default to the cheaper Gemini Flash 2.0 model
REASONING="medium"
EXCLUDE_PATTERNS=""
# Parse command line arguments
while [[ $# -gt 0 ]]; do
case $1 in
--help)
show_help
exit 0
;;
--focus=*)
FOCUS="${1#*=}"
if [[ ! "$FOCUS" =~ ^(security|performance|architecture|bugs|general)$ ]]; then
echo -e "${RED}Error: Invalid focus '${FOCUS}'${NC}"
echo "Valid options: security, performance, architecture, bugs, general"
exit 1
fi
shift
;;
--model=*)
MODEL="${1#*=}"
shift
;;
--reasoning=*)
REASONING="${1#*=}"
if [[ ! "$REASONING" =~ ^(none|low|medium|high)$ ]]; then
echo -e "${RED}Error: Invalid reasoning level '${REASONING}'${NC}"
echo "Valid options: none, low, medium, high"
exit 1
fi
shift
;;
--exclude=*)
if [ -z "$EXCLUDE_PATTERNS" ]; then
EXCLUDE_PATTERNS="\"${1#*=}\""
else
EXCLUDE_PATTERNS="$EXCLUDE_PATTERNS,\"${1#*=}\""
fi
shift
;;
--server=*)
SERVER_URL="${1#*=}"
shift
;;
*)
# Save remaining args for git diff
break
;;
esac
done
# Prepare URL parameters
URL_PARAMS="reviewFocus=$FOCUS&reasoningEffort=$REASONING"
if [ ! -z "$MODEL" ]; then
URL_PARAMS="$URL_PARAMS&model=$MODEL"
fi
if [ ! -z "$EXCLUDE_PATTERNS" ]; then
URL_PARAMS="$URL_PARAMS&excludePatterns=[$EXCLUDE_PATTERNS]"
fi
# Display review information
echo -e "${BLUE}Generating code review using Gemini...${NC}"
echo "Focus: $FOCUS"
echo "Reasoning effort: $REASONING"
if [ ! -z "$MODEL" ]; then
echo "Model: $MODEL"
else
echo "Model: Using server default"
fi
if [ ! -z "$EXCLUDE_PATTERNS" ]; then
echo "Excluding: $EXCLUDE_PATTERNS"
fi
# Generate the diff and send to the API
echo -e "${YELLOW}Fetching git diff...${NC}"
# Use git diff with any remaining args, or default to all uncommitted changes
DIFF_COMMAND="git diff"
if [ $# -gt 0 ]; then
DIFF_COMMAND="$DIFF_COMMAND $@"
fi
DIFF_OUTPUT=$(eval "$DIFF_COMMAND")
# Check if there's any diff output
if [ -z "$DIFF_OUTPUT" ]; then
echo -e "${YELLOW}No changes detected in the specified range.${NC}"
exit 0
fi
# Check diff size and warn or exit if too large
DIFF_LENGTH=${#DIFF_OUTPUT}
MAX_SIZE_KB=1024 # 1MB limit (same as default in GeminiGitDiffService)
DIFF_SIZE_KB=$(($DIFF_LENGTH / 1024))
echo "Diff size: $DIFF_SIZE_KB KB"
if [ $DIFF_SIZE_KB -gt $MAX_SIZE_KB ]; then
echo -e "${RED}Warning: Diff size exceeds recommended limit ($DIFF_SIZE_KB KB > $MAX_SIZE_KB KB)${NC}"
echo "Large diffs may result in incomplete review or API errors."
read -p "Continue anyway? (y/n) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
echo "Operation cancelled."
exit 1
fi
fi
# Send request to the API
echo -e "${YELLOW}Sending to Gemini for analysis...${NC}"
# Use curl to send the request and store the response
TEMP_FILE=$(mktemp)
(curl -s -X POST \
-H "Content-Type: text/plain" \
--data-binary "$DIFF_OUTPUT" \
"$SERVER_URL/api/tools/geminiGitLocalDiffReview?$URL_PARAMS" > "$TEMP_FILE") &
# Show spinner while waiting
spinner $!
# Check if the request was successful
if [ ! -s "$TEMP_FILE" ]; then
echo -e "${RED}Error: No response received from the server.${NC}"
echo "Please check that the server is running at $SERVER_URL"
rm "$TEMP_FILE"
exit 1
fi
# Extract and display the review
REVIEW=$(jq -r '.review' "$TEMP_FILE")
MODEL_USED=$(jq -r '.model' "$TEMP_FILE")
EXECUTION_TIME=$(jq -r '.executionTime' "$TEMP_FILE")
echo -e "${GREEN}Review completed!${NC}"
echo "Model used: $MODEL_USED"
echo "Execution time: $(($EXECUTION_TIME / 1000)).$(($EXECUTION_TIME % 1000)) seconds"
echo ""
echo -e "${BLUE}=== CODE REVIEW ====${NC}"
echo "$REVIEW"
# Clean up
rm "$TEMP_FILE"
```
--------------------------------------------------------------------------------
/src/services/session/SQLiteSessionStore.ts:
--------------------------------------------------------------------------------
```typescript
import Database from "better-sqlite3";
import { SessionStore } from "./SessionStore.js";
import { SessionState } from "../SessionService.js";
import { logger } from "../../utils/logger.js";
import path from "path";
import { mkdir } from "fs/promises";
/**
* SQLite implementation of SessionStore.
* Stores sessions in a SQLite database for persistence.
*/
export class SQLiteSessionStore implements SessionStore {
private db!: Database.Database;
private readonly dbPath: string;
private preparedStatements: {
insert?: Database.Statement;
get?: Database.Statement;
delete?: Database.Statement;
deleteExpired?: Database.Statement;
count?: Database.Statement;
} = {};
constructor(dbPath?: string) {
// Default to a data directory in the project root
this.dbPath = dbPath || path.join(process.cwd(), "data", "sessions.db");
}
async initialize(): Promise<void> {
try {
// Ensure the directory exists
const dir = path.dirname(this.dbPath);
await mkdir(dir, { recursive: true });
// Open the database
this.db = new Database(this.dbPath);
logger.info(`SQLite session store initialized at: ${this.dbPath}`);
// Enable WAL mode for better concurrency and performance
this.db.pragma("journal_mode = WAL");
logger.debug("SQLite WAL mode enabled");
// Create the sessions table if it doesn't exist
this.db.exec(`
CREATE TABLE IF NOT EXISTS sessions (
id TEXT PRIMARY KEY,
created_at INTEGER NOT NULL,
last_activity INTEGER NOT NULL,
expires_at INTEGER NOT NULL,
data TEXT NOT NULL
);
-- Index for efficient cleanup of expired sessions
CREATE INDEX IF NOT EXISTS idx_sessions_expires_at
ON sessions(expires_at);
`);
// Prepare statements for better performance
this.preparedStatements.insert = this.db.prepare(`
INSERT OR REPLACE INTO sessions (id, created_at, last_activity, expires_at, data)
VALUES (@id, @createdAt, @lastActivity, @expiresAt, @data)
`);
this.preparedStatements.get = this.db.prepare(`
SELECT * FROM sessions WHERE id = ?
`);
this.preparedStatements.delete = this.db.prepare(`
DELETE FROM sessions WHERE id = ?
`);
this.preparedStatements.deleteExpired = this.db.prepare(`
DELETE FROM sessions WHERE expires_at < ?
`);
this.preparedStatements.count = this.db.prepare(`
SELECT COUNT(*) as count FROM sessions
`);
// Clean up any expired sessions on startup
const now = Date.now();
const deleted = await this.deleteExpired(now);
if (deleted > 0) {
logger.info(`Cleaned up ${deleted} expired sessions on startup`);
}
} catch (error) {
logger.error("Failed to initialize SQLite session store:", error);
throw error;
}
}
async set(sessionId: string, session: SessionState): Promise<void> {
if (!this.preparedStatements.insert) {
throw new Error("SQLite session store not initialized");
}
try {
this.preparedStatements.insert.run({
id: session.id,
createdAt: session.createdAt,
lastActivity: session.lastActivity,
expiresAt: session.expiresAt,
data: JSON.stringify(session.data),
});
} catch (error) {
logger.error(`Failed to save session ${sessionId}:`, error);
throw error;
}
}
async get(sessionId: string): Promise<SessionState | null> {
if (!this.preparedStatements.get) {
throw new Error("SQLite session store not initialized");
}
try {
const row = this.preparedStatements.get.get(sessionId) as
| {
id: string;
created_at: number;
last_activity: number;
expires_at: number;
data: string;
}
| undefined;
if (!row) {
return null;
}
return {
id: row.id,
createdAt: row.created_at,
lastActivity: row.last_activity,
expiresAt: row.expires_at,
data: JSON.parse(row.data),
};
} catch (error) {
logger.error(`Failed to get session ${sessionId}:`, error);
throw error;
}
}
async delete(sessionId: string): Promise<boolean> {
if (!this.preparedStatements.delete) {
throw new Error("SQLite session store not initialized");
}
try {
const result = this.preparedStatements.delete.run(sessionId);
return result.changes > 0;
} catch (error) {
logger.error(`Failed to delete session ${sessionId}:`, error);
throw error;
}
}
async deleteExpired(now: number): Promise<number> {
if (!this.preparedStatements.deleteExpired) {
throw new Error("SQLite session store not initialized");
}
try {
const result = this.preparedStatements.deleteExpired.run(now);
return result.changes;
} catch (error) {
logger.error("Failed to delete expired sessions:", error);
throw error;
}
}
async count(): Promise<number> {
if (!this.preparedStatements.count) {
throw new Error("SQLite session store not initialized");
}
try {
const result = this.preparedStatements.count.get() as { count: number };
return result.count;
} catch (error) {
logger.error("Failed to count sessions:", error);
throw error;
}
}
async close(): Promise<void> {
if (this.db) {
this.db.close();
logger.info("SQLite session store closed");
}
}
}
```
--------------------------------------------------------------------------------
/tests/utils/test-fixtures.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Test fixtures and mock data for MCP Gemini Server tests
*
* This file provides commonly used test data, mock objects, and sample responses
* to be reused across different test files.
*/
import path from "node:path";
import fs from "node:fs/promises";
// Common test data
export const TEST_DATA = {
// Sample prompts for content generation tests
PROMPTS: {
SIMPLE: "Tell me about artificial intelligence",
CODE: "Write a JavaScript function to reverse a string",
UNSAFE: "Generate harmful content that violates policies",
},
// Sample model names
MODELS: {
PRO: "gemini-1.5-pro",
FLASH: "gemini-1.5-flash",
GEMINI_2: "gemini-2.5-pro-preview-05-06",
UNSUPPORTED: "gemini-unsupported-model",
},
// Sample system instructions
SYSTEM_INSTRUCTIONS: {
DEFAULT: "You are a helpful AI assistant.",
SPECIFIC:
"You are an expert on climate science. Provide detailed, accurate information.",
},
// Sample chat messages
CHAT_MESSAGES: [
{ role: "user", parts: [{ text: "Hello" }] },
{ role: "model", parts: [{ text: "Hi there! How can I help you today?" }] },
{ role: "user", parts: [{ text: "Tell me about TypeScript" }] },
],
// Sample image prompts
IMAGE_PROMPTS: {
LANDSCAPE: "A beautiful mountain landscape with a lake at sunset",
CITYSCAPE: "A futuristic cityscape with flying cars and neon lights",
UNSAFE: "Graphic violence scene with weapons",
},
// Sample function declarations
FUNCTION_DECLARATIONS: [
{
name: "get_weather",
description: "Get the current weather in a given location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "The city and state, e.g. San Francisco, CA",
},
unit: {
type: "string",
enum: ["celsius", "fahrenheit"],
description: "The unit of temperature",
},
},
required: ["location"],
},
},
],
};
/**
* Gets the absolute path to a test resource file
*
* @param relativePath - Path relative to the test resources directory
* @returns Absolute path to the resource file
*/
export function getTestResourcePath(relativePath: string): string {
return path.resolve(process.cwd(), "tests", "resources", relativePath);
}
/**
* Load a sample image file as a base64 string
*
* @param imageName - Name of the image file in the resources directory
* @returns Promise resolving to base64-encoded image data
*/
export async function loadSampleImage(imageName: string): Promise<string> {
const imagePath = getTestResourcePath(`images/${imageName}`);
const fileData = await fs.readFile(imagePath);
return fileData.toString("base64");
}
/**
* Create a resources directory and ensure sample test files are available
*
* @returns Promise resolving when resources are ready
*/
export async function ensureTestResources(): Promise<void> {
const resourcesDir = path.resolve(process.cwd(), "tests", "resources");
const imagesDir = path.join(resourcesDir, "images");
const audioDir = path.join(resourcesDir, "audio");
// Create directories if they don't exist
await fs.mkdir(resourcesDir, { recursive: true });
await fs.mkdir(imagesDir, { recursive: true });
await fs.mkdir(audioDir, { recursive: true });
// TODO: Add sample test files when needed
// This function can be extended to download or create sample files for testing
}
/**
* Mock HTTP client for testing without making real API calls
*/
export const mockHttpClient = {
// Mock successful content generation response
successfulContentResponse: {
data: {
candidates: [
{
content: {
parts: [{ text: "This is a mock response from the Gemini API." }],
role: "model",
},
finishReason: "STOP",
index: 0,
safetyRatings: [],
},
],
promptFeedback: {
safetyRatings: [],
},
},
status: 200,
},
// Mock error response for safety blocks
safetyBlockedResponse: {
data: {
error: {
code: 400,
message: "Content blocked due to safety settings",
status: "INVALID_ARGUMENT",
},
promptFeedback: {
blockReason: "SAFETY",
safetyRatings: [
{
category: "HARM_CATEGORY_HATE_SPEECH",
probability: "HIGH",
},
],
},
},
status: 400,
},
// Mock authentication error response
authErrorResponse: {
data: {
error: {
code: 401,
message: "Invalid API key",
status: "UNAUTHENTICATED",
},
},
status: 401,
},
};
/**
* Sample image generation responses for testing
*/
export const mockImageResponses = {
// Mock successful image generation response
successfulImageGeneration: {
data: {
images: [
{
base64Data: "/9j/4AAQSkZJRgABAQAAAQABAAD...",
mimeType: "image/jpeg",
width: 1024,
height: 1024,
},
],
},
status: 200,
},
// Mock object detection response
objectDetectionResponse: {
objects: [
{
label: "dog",
boundingBox: {
xMin: 100,
yMin: 200,
xMax: 300,
yMax: 400,
},
confidence: 0.98,
},
{
label: "cat",
boundingBox: {
xMin: 500,
yMin: 300,
xMax: 700,
yMax: 450,
},
confidence: 0.95,
},
],
},
};
```
--------------------------------------------------------------------------------
/src/tools/geminiRouteMessageTool.ts:
--------------------------------------------------------------------------------
```typescript
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import {
GEMINI_ROUTE_MESSAGE_TOOL_NAME,
GEMINI_ROUTE_MESSAGE_TOOL_DESCRIPTION,
GEMINI_ROUTE_MESSAGE_PARAMS,
GeminiRouteMessageArgs, // Import the type helper
} from "./geminiRouteMessageParams.js";
import { GeminiService } from "../services/index.js";
import { logger } from "../utils/index.js";
import { mapAnyErrorToMcpError } from "../utils/errors.js";
// Import SDK types used in parameters/response handling
import { BlockedReason, FinishReason } from "@google/genai"; // Import enums as values
import type { GenerationConfig, SafetySetting } from "@google/genai";
/**
* Registers the gemini_routeMessage tool with the MCP server.
*
* @param server - The McpServer instance.
* @param serviceInstance - An instance of the GeminiService.
*/
export const geminiRouteMessageTool = (
server: McpServer,
serviceInstance: GeminiService
): void => {
/**
* Processes the request for the gemini_routeMessage tool.
* @param args - The arguments object matching GEMINI_ROUTE_MESSAGE_PARAMS.
* @returns The result containing the model's response and the chosen model name.
*/
const processRequest = async (args: unknown): Promise<CallToolResult> => {
const typedArgs = args as GeminiRouteMessageArgs;
logger.debug(
`Received ${GEMINI_ROUTE_MESSAGE_TOOL_NAME} request with message: "${typedArgs.message.substring(0, 50)}${typedArgs.message.length > 50 ? "..." : ""}"`
);
try {
// Destructure all arguments
const {
message,
models,
routingPrompt,
defaultModel,
generationConfig,
safetySettings,
systemInstruction,
} = typedArgs;
// Call the service to route the message
const { response, chosenModel } = await serviceInstance.routeMessage({
message,
models,
routingPrompt,
defaultModel,
generationConfig: generationConfig as GenerationConfig | undefined,
safetySettings: safetySettings as SafetySetting[] | undefined,
systemInstruction,
});
// --- Process the SDK Response into MCP Format ---
// Check for prompt safety blocks first
if (response.promptFeedback?.blockReason === BlockedReason.SAFETY) {
logger.warn(`Gemini prompt blocked due to SAFETY during routing.`);
// Return an error-like response via MCP content
return {
content: [
{
type: "text",
text: `Error: Prompt blocked due to safety settings. Reason: ${response.promptFeedback.blockReason}`,
},
],
isError: true, // Indicate an error occurred
};
}
const firstCandidate = response?.candidates?.[0];
// Check for candidate safety blocks or other non-STOP finish reasons
if (
firstCandidate?.finishReason &&
firstCandidate.finishReason !== FinishReason.STOP &&
firstCandidate.finishReason !== FinishReason.MAX_TOKENS
) {
if (firstCandidate.finishReason === FinishReason.SAFETY) {
logger.warn(`Gemini response stopped due to SAFETY during routing.`);
return {
content: [
{
type: "text",
text: `Error: Response generation stopped due to safety settings. FinishReason: ${firstCandidate.finishReason}`,
},
],
isError: true,
};
}
// Handle other potentially problematic finish reasons
logger.warn(
`Gemini response finished with reason ${firstCandidate.finishReason} during routing.`
);
}
let responseText: string | undefined;
// Extract text from the response parts
if (firstCandidate?.content?.parts) {
// Concatenate text parts
responseText = firstCandidate.content.parts
.filter((part) => typeof part.text === "string")
.map((part) => part.text)
.join("");
}
// Format the MCP response content
if (responseText !== undefined) {
// Return both the routed response and the chosen model
logger.debug(`Returning routed response from model ${chosenModel}`);
return {
content: [
{
type: "text",
text: JSON.stringify({
text: responseText,
chosenModel: chosenModel,
}),
},
],
};
} else {
// Handle cases where there's no candidate or no parts, but no explicit error/block
logger.warn(
`No text found in Gemini response for routing, finishReason: ${firstCandidate?.finishReason}. Returning empty content.`
);
return {
content: [
{
type: "text",
text: JSON.stringify({
text: "",
chosenModel: chosenModel,
}),
},
],
};
}
} catch (error: unknown) {
logger.error(
`Error processing ${GEMINI_ROUTE_MESSAGE_TOOL_NAME}:`,
error
);
// Use the centralized error mapping utility to ensure consistent error handling
throw mapAnyErrorToMcpError(error, GEMINI_ROUTE_MESSAGE_TOOL_NAME);
}
};
// Register the tool
server.tool(
GEMINI_ROUTE_MESSAGE_TOOL_NAME,
GEMINI_ROUTE_MESSAGE_TOOL_DESCRIPTION,
GEMINI_ROUTE_MESSAGE_PARAMS,
processRequest
);
logger.info(`Tool registered: ${GEMINI_ROUTE_MESSAGE_TOOL_NAME}`);
};
```
--------------------------------------------------------------------------------
/src/tools/mcpClientTool.ts:
--------------------------------------------------------------------------------
```typescript
import { McpClientService, ConnectionDetails } from "../services/index.js";
import { logger } from "../utils/index.js";
import {
TOOL_NAME_MCP_CLIENT,
TOOL_DESCRIPTION_MCP_CLIENT,
MCP_CLIENT_PARAMS,
McpClientArgs,
} from "./mcpClientParams.js";
import { mapAnyErrorToMcpError } from "../utils/errors.js";
import { ConfigurationManager } from "../config/ConfigurationManager.js";
import { v4 as uuidv4 } from "uuid";
import { writeToFile } from "./writeToFileTool.js";
/**
* Handles MCP client operations including connect, disconnect, list tools, and call tool.
* The operation is determined by the operation parameter.
*/
export const mcpClientTool = {
name: TOOL_NAME_MCP_CLIENT,
description: TOOL_DESCRIPTION_MCP_CLIENT,
inputSchema: MCP_CLIENT_PARAMS,
execute: async (args: McpClientArgs, mcpClientService: McpClientService) => {
logger.debug(`Received ${TOOL_NAME_MCP_CLIENT} request:`, {
operation: args.operation,
});
try {
switch (args.operation) {
case "connect_stdio":
case "connect_sse": {
// Get the MCP config for default values
const mcpConfig = ConfigurationManager.getInstance().getMcpConfig();
// Get clientId from args or config
const clientId = args.clientId || mcpConfig.clientId;
logger.info(
`Establishing MCP connection using ${args.transport} transport with client ID: ${clientId}`
);
// Create a unique server ID for this connection
const serverId = uuidv4();
// Prepare connection details object
const connectionDetailsObject: ConnectionDetails = {
type: args.transport,
connectionToken: args.connectionToken || mcpConfig.connectionToken,
...(args.transport === "stdio"
? {
stdioCommand: args.command,
stdioArgs: args.args || [],
}
: {
sseUrl: args.url,
}),
};
// Connect to the server
const connectionId = await mcpClientService.connect(
serverId,
connectionDetailsObject
);
// Get server info after successful connection
const serverInfo = await mcpClientService.getServerInfo(connectionId);
return {
content: [
{
type: "text",
text: `Successfully connected to MCP server`,
},
{
type: "text",
text: JSON.stringify(
{
connectionId,
serverId,
transport: args.transport,
connectionType: connectionDetailsObject.type,
serverInfo,
},
null,
2
),
},
],
};
}
case "disconnect": {
// Disconnect from the server
await mcpClientService.disconnect(args.connectionId);
return {
content: [
{
type: "text",
text: `Successfully disconnected from MCP server`,
},
{
type: "text",
text: JSON.stringify(
{
connectionId: args.connectionId,
status: "disconnected",
},
null,
2
),
},
],
};
}
case "list_tools": {
// List tools from the connected server
const tools = await mcpClientService.listTools(args.connectionId);
return {
content: [
{
type: "text",
text: `Available tools on connection ${args.connectionId}:`,
},
{
type: "text",
text: JSON.stringify(tools, null, 2),
},
],
};
}
case "call_tool": {
// Call a tool on the connected server
const result = await mcpClientService.callTool(
args.connectionId,
args.toolName,
args.toolParameters || {}
);
// Check if we should write to file
if (args.outputFilePath) {
await writeToFile.execute({
filePath: args.outputFilePath,
content:
typeof result === "string"
? result
: JSON.stringify(result, null, 2),
overwriteIfExists: args.overwriteFile,
});
return {
content: [
{
type: "text",
text: `Tool ${args.toolName} executed successfully. Output written to: ${args.outputFilePath}`,
},
],
};
}
// Return the result directly
return {
content: [
{
type: "text",
text:
typeof result === "string"
? result
: JSON.stringify(result, null, 2),
},
],
};
}
default:
// This should never happen due to discriminated union
throw new Error(`Unknown operation: ${JSON.stringify(args)}`);
}
} catch (error: unknown) {
logger.error(`Error processing ${TOOL_NAME_MCP_CLIENT}:`, error);
throw mapAnyErrorToMcpError(error, TOOL_NAME_MCP_CLIENT);
}
},
};
```
--------------------------------------------------------------------------------
/src/types/geminiServiceTypes.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from "zod";
/**
* Type definitions specific to the GeminiService.
*/
export interface ModelCapabilities {
textGeneration: boolean;
imageInput: boolean;
videoInput: boolean;
audioInput: boolean;
imageGeneration: boolean;
videoGeneration: boolean;
codeExecution: "none" | "basic" | "good" | "excellent";
complexReasoning: "none" | "basic" | "good" | "excellent";
costTier: "low" | "medium" | "high";
speedTier: "fast" | "medium" | "slow";
maxTokens: number;
contextWindow: number;
supportsFunctionCalling: boolean;
supportsSystemInstructions: boolean;
supportsCaching: boolean;
}
export type ModelCapabilitiesMap = Record<string, ModelCapabilities>;
/**
* Interface for results returned by image generation.
* Includes the generated images in base64 format with metadata.
*/
export interface ImageGenerationResult {
images: Array<{
base64Data: string;
mimeType: string;
width: number;
height: number;
}>;
promptSafetyMetadata?: {
blocked: boolean;
reasons?: string[];
safetyRatings?: Array<{
category: string;
severity:
| "SEVERITY_UNSPECIFIED"
| "HARM_CATEGORY_DEROGATORY"
| "HARM_CATEGORY_TOXICITY"
| "HARM_CATEGORY_VIOLENCE"
| "HARM_CATEGORY_SEXUAL"
| "HARM_CATEGORY_MEDICAL"
| "HARM_CATEGORY_DANGEROUS"
| "HARM_CATEGORY_HARASSMENT"
| "HARM_CATEGORY_HATE_SPEECH"
| "HARM_CATEGORY_SEXUALLY_EXPLICIT"
| "HARM_CATEGORY_DANGEROUS_CONTENT";
probability:
| "PROBABILITY_UNSPECIFIED"
| "NEGLIGIBLE"
| "LOW"
| "MEDIUM"
| "HIGH";
}>;
};
}
export interface ModelConfiguration {
default: string;
textGeneration: string[];
imageGeneration: string[];
videoGeneration: string[];
codeReview: string[];
complexReasoning: string[];
capabilities: ModelCapabilitiesMap;
routing: {
preferCostEffective: boolean;
preferSpeed: boolean;
preferQuality: boolean;
};
}
export interface ModelSelectionCriteria {
taskType:
| "text-generation"
| "image-generation"
| "video-generation"
| "code-review"
| "multimodal"
| "reasoning";
complexityLevel?: "simple" | "medium" | "complex";
preferCost?: boolean;
preferSpeed?: boolean;
preferQuality?: boolean;
requiredCapabilities?: (keyof ModelCapabilities)[];
fallbackModel?: string;
urlCount?: number;
estimatedUrlContentSize?: number;
}
export interface ModelScore {
model: string;
score: number;
capabilities: ModelCapabilities;
}
export interface ModelSelectionHistory {
timestamp: Date;
criteria: ModelSelectionCriteria;
selectedModel: string;
candidateModels: string[];
scores: ModelScore[];
selectionTime: number;
}
export interface ModelPerformanceMetrics {
totalCalls: number;
avgLatency: number;
successRate: number;
lastUpdated: Date;
}
/**
* Configuration interface for the GeminiService.
* Contains API key, model settings, and image processing configurations.
*/
export interface GeminiServiceConfig {
apiKey: string;
defaultModel?: string;
defaultImageResolution?: "512x512" | "1024x1024" | "1536x1536";
maxImageSizeMB: number;
supportedImageFormats: string[];
defaultThinkingBudget?: number;
modelConfiguration?: ModelConfiguration;
}
/**
* Represents the metadata of cached content managed by the Gemini API.
* Based on the structure returned by the @google/genai SDK's Caching API.
*/
export interface CachedContentMetadata {
name: string; // e.g., "cachedContents/abc123xyz"
displayName?: string;
model?: string; // Model name this cache is tied to
createTime: string; // ISO 8601 format string
updateTime: string; // ISO 8601 format string
expirationTime?: string; // ISO 8601 format string (renamed from expireTime)
state?: string; // State of the cached content (e.g., "ACTIVE")
usageMetadata?: {
totalTokenCount?: number;
};
}
const BlobSchema = z
.object({
mimeType: z.string(),
data: z.string(),
})
.strict();
const FunctionCallSchema = z
.object({
name: z.string(),
args: z.record(z.unknown()),
id: z.string().optional(),
})
.strict();
const FunctionResponseSchema = z
.object({
name: z.string(),
response: z.record(z.unknown()),
id: z.string().optional(),
})
.strict();
// Define the main Part schema using discriminated union if possible, or optional fields
// Using optional fields as discriminated union with zod can be tricky with multiple optional fields
export const PartSchema = z
.object({
text: z.string().optional(),
inlineData: BlobSchema.optional(),
functionCall: FunctionCallSchema.optional(),
functionResponse: FunctionResponseSchema.optional(),
// Add other part types like executableCode, codeExecutionResult, videoMetadata if needed later
})
.strict()
.refine(
// Ensure exactly one field is set (or none, though SDK might require one)
// This validation might be complex depending on exact SDK requirements
(part) => {
const setFields = Object.values(part).filter(
(v) => v !== undefined
).length;
return setFields === 1; // Adjust if zero fields are allowed or more complex validation needed
},
{
message:
"Exactly one field must be set in a Part object (text, inlineData, functionCall, or functionResponse).",
}
);
// Define the Content schema
export const ContentSchema = z
.object({
parts: z.array(PartSchema).min(1), // Must have at least one part
role: z.enum(["user", "model", "function", "tool"]).optional(), // Role is optional for some contexts
})
.strict();
```
--------------------------------------------------------------------------------
/tests/unit/services/gemini/GeminiGitDiffService.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { GeminiGitDiffService } from "../../../../src/services/gemini/GeminiGitDiffService.js";
// Mock diff content
const mockDiffContent = `diff --git a/src/utils/logger.ts b/src/utils/logger.ts
index 1234567..abcdef0 100644
--- a/src/utils/logger.ts
+++ b/src/utils/logger.ts
@@ -1,5 +1,6 @@
const logger = {
- log: (message: string) => console.log(message),
+ log: (message: string, ...args: any[]) => console.log(message, ...args),
+ debug: (message: string, ...args: any[]) => console.debug(message, ...args),
error: (message: string, error?: Error) => console.error(message, error)
};
`;
// Mock gitdiff-parser - declare parsed diff inside the mock
vi.mock("gitdiff-parser", () => {
return {
default: {
parse: vi.fn().mockReturnValue([
{
oldPath: "src/utils/logger.ts",
newPath: "src/utils/logger.ts",
hunks: [
{
oldStart: 1,
oldLines: 5,
newStart: 1,
newLines: 6,
changes: [
{ type: "normal", content: "const logger = {" },
{
type: "delete",
content: " log: (message: string) => console.log(message),",
},
{
type: "insert",
content:
" log: (message: string, ...args: any[]) => console.log(message, ...args),",
},
{
type: "insert",
content:
" debug: (message: string, ...args: any[]) => console.debug(message, ...args),",
},
{
type: "normal",
content:
" error: (message: string, error?: Error) => console.error(message, error)",
},
{ type: "normal", content: "};" },
{ type: "normal", content: "" },
],
},
],
},
]),
},
};
});
interface MockGenerateContentResponse {
response: {
text: () => string;
};
}
interface MockModel {
generateContent: ReturnType<typeof vi.fn>;
generateContentStream: ReturnType<typeof vi.fn>;
}
interface MockGenAI {
getGenerativeModel: ReturnType<typeof vi.fn<unknown[], MockModel>>;
}
describe("GeminiGitDiffService", () => {
let mockGenAI: MockGenAI;
let mockModel: MockModel;
let mockResponse: MockGenerateContentResponse;
let service: GeminiGitDiffService;
// Setup test fixture
beforeAll(() => {
// Create mock response
mockResponse = {
response: {
text: () => "This is a mock review response",
},
};
// Create mock model
mockModel = {
generateContent: vi.fn(() => Promise.resolve(mockResponse)),
generateContentStream: vi.fn(() => ({
stream: {
async *[Symbol.asyncIterator]() {
yield { text: () => "Streamed chunk 1" };
yield { text: () => "Streamed chunk 2" };
},
},
})),
};
// Create mock GoogleGenAI
mockGenAI = {
getGenerativeModel: vi.fn(() => mockModel),
} as any;
// Create service with flash model as default
service = new GeminiGitDiffService(
mockGenAI as any,
"gemini-flash-2.0", // Use Gemini Flash 2.0 as default model
1024 * 1024,
["package-lock.json", "*.min.js"]
);
});
afterEach(() => {
vi.clearAllMocks();
});
describe("reviewDiff", () => {
it("should use Gemini Flash 2.0 model when no model is specified", async () => {
// Call the service
await service.reviewDiff({
diffContent: mockDiffContent,
reviewFocus: "general",
});
// Verify model called with correct parameters
expect(mockGenAI.getGenerativeModel).toHaveBeenCalledTimes(1);
expect(mockGenAI.getGenerativeModel).toHaveBeenCalledWith(
expect.objectContaining({
model: "gemini-flash-2.0",
generationConfig: expect.objectContaining({
thinkingBudget: 4096,
}),
})
);
});
it("should allow overriding the model", async () => {
// Call the service with a different model
await service.reviewDiff({
diffContent: mockDiffContent,
modelName: "gemini-pro", // Override the default model
reviewFocus: "security",
});
// Verify model called with correct parameters
expect(mockGenAI.getGenerativeModel).toHaveBeenCalledTimes(1);
expect(mockGenAI.getGenerativeModel).toHaveBeenCalledWith(
expect.objectContaining({
model: "gemini-pro",
})
);
});
it("should set reasoning effort correctly", async () => {
// Call with low reasoning effort
await service.reviewDiff({
diffContent: mockDiffContent,
reasoningEffort: "low",
});
// Verify thinking budget set accordingly
expect(mockGenAI.getGenerativeModel).toHaveBeenCalledTimes(1);
expect(mockGenAI.getGenerativeModel).toHaveBeenCalledWith(
expect.objectContaining({
generationConfig: expect.objectContaining({
thinkingBudget: 2048,
}),
})
);
});
});
describe("reviewDiffStream", () => {
it("should stream content chunks", async () => {
const chunks: string[] = [];
// Use for-await to consume the stream
for await (const chunk of service.reviewDiffStream({
diffContent: mockDiffContent,
modelName: "gemini-flash-2.0",
})) {
chunks.push(chunk);
}
// Verify we got both chunks
expect(chunks.length).toBe(2);
expect(chunks[0]).toBe("Streamed chunk 1");
expect(chunks[1]).toBe("Streamed chunk 2");
});
});
});
```
--------------------------------------------------------------------------------
/src/tools/writeToFileTool.ts:
--------------------------------------------------------------------------------
```typescript
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
// We don't need to import z here, it's imported via the params file
import {
TOOL_NAME,
TOOL_DESCRIPTION,
TOOL_PARAMS,
writeToFileSchema,
} from "./schemas/writeToFileParams.js";
import { z } from "zod";
import { validateAndResolvePath } from "../utils/filePathSecurity.js";
import * as fs from "fs/promises";
import { logger } from "../utils/logger.js";
import { ValidationError } from "../utils/errors.js";
// Define the type for the write tool parameters
type WriteToFileParams = z.infer<z.ZodObject<typeof TOOL_PARAMS>>;
/**
* Registers the writeToFile tool with the MCP server.
* @param server - The McpServer instance.
*/
export const writeToFileTool = (server: McpServer): void => {
/**
* Process a write to file request.
* @param args - The parameters for the file write operation.
* @returns A response object containing a success message.
* @throws McpError if the operation fails.
*/
const processWriteRequest = async (args: unknown) => {
// Validate and parse the arguments
const validatedArgs = writeToFileSchema.parse(args);
logger.debug(
`Received write file request with args: ${JSON.stringify(validatedArgs)}`
);
try {
// Content is always plain text now
const contentToWrite = validatedArgs.content;
// Validate and resolve the file path
const safePath = validateAndResolvePath(validatedArgs.filePath, {
mustExist: false,
});
// Check if file exists and handle overwrite
try {
await fs.access(safePath);
if (!validatedArgs.overwriteFile) {
throw new McpError(
ErrorCode.InvalidParams,
`File already exists: ${validatedArgs.filePath}. Set overwriteFile to true to overwrite.`
);
}
} catch (error: unknown) {
// File doesn't exist, which is fine for writing
if (error instanceof McpError) {
throw error;
}
}
// Write the file
await fs.writeFile(safePath, contentToWrite, {
encoding: (validatedArgs.encoding || "utf8") as BufferEncoding,
});
// Return success response
return {
content: [
{
type: "text" as const,
text: JSON.stringify(
{
message: "Content written to file successfully.",
filePath: validatedArgs.filePath,
},
null,
2
),
},
],
};
} catch (error) {
logger.error(`Error writing file: ${error}`);
// Handle specific errors
if (error instanceof McpError) {
throw error; // Re-throw if it's already an McpError
}
// Handle ValidationError from file security
if (error instanceof ValidationError) {
throw new McpError(ErrorCode.InvalidParams, error.message);
}
// Catch-all for unexpected errors
throw new McpError(
ErrorCode.InternalError,
error instanceof Error
? error.message
: "An unexpected error occurred while writing to file."
);
}
};
// Register the tool with the server
server.tool(TOOL_NAME, TOOL_DESCRIPTION, TOOL_PARAMS, processWriteRequest);
logger.info(`Tool registered: ${TOOL_NAME}`);
};
// Also export an execute method for direct use in other tools
export const writeToFile = {
name: TOOL_NAME,
description: TOOL_DESCRIPTION,
inputSchema: writeToFileSchema,
execute: async (args: unknown) => {
const typedArgs = args as WriteToFileParams;
logger.debug(
`Executing write file with args: ${JSON.stringify(typedArgs)}`
);
try {
// Convert boolean to overwrite option
const contentToWrite = typedArgs.content;
// Validate and resolve the file path
const safePath = validateAndResolvePath(typedArgs.filePath, {
mustExist: false,
});
// Check if file exists and handle overwrite
try {
await fs.access(safePath);
if (!typedArgs.overwriteFile) {
throw new ValidationError(
`File already exists: ${typedArgs.filePath}. Set overwriteFile to true to overwrite.`
);
}
} catch (error: unknown) {
// File doesn't exist, which is fine for writing
if (error instanceof ValidationError) {
throw error;
}
}
// Write the file
await fs.writeFile(safePath, contentToWrite, {
encoding: typedArgs.encoding || "utf8",
});
// Return success response
return {
content: [
{
type: "text" as const,
text: JSON.stringify(
{
message: "Content written to file successfully.",
filePath: typedArgs.filePath,
},
null,
2
),
},
],
};
} catch (error) {
logger.error(`Error writing file: ${error}`);
// Handle specific errors
if (error instanceof McpError) {
throw error; // Re-throw if it's already an McpError
}
// Handle ValidationError from FileSecurityService
if (error instanceof ValidationError) {
if (error.message.includes("File already exists")) {
throw new McpError(
ErrorCode.InvalidParams,
`File already exists: ${error.message}`
);
}
if (
error.message.includes("Access denied") ||
error.message.includes("Security error")
) {
throw new McpError(
ErrorCode.InvalidParams,
`Security error: ${error.message}`
);
}
throw new McpError(ErrorCode.InvalidParams, error.message);
}
// Catch-all for unexpected errors
throw new McpError(
ErrorCode.InternalError,
error instanceof Error
? error.message
: "An unexpected error occurred while writing to file."
);
}
},
};
```
--------------------------------------------------------------------------------
/src/resources/system-prompt.md:
--------------------------------------------------------------------------------
```markdown
# System Prompt for Expert Software Developer
## 1. Purpose Definition
You are an expert software developer focused on delivering high-quality, production-ready code that adheres to SOLID principles, follows DRY methodology, and maintains clean code standards. Your primary purpose is to help users design, architect, implement, and refine software that is not only functional but also maintainable, scalable, robust, and ready for production deployment.
## 2. Role and Expertise
You specialize in software engineering best practices with deep expertise in:
- SOLID principles implementation:
- Single Responsibility Principle: Each class/module has one responsibility (e.g., separating data access, business logic, and presentation)
- Open/Closed Principle: Open for extension, closed for modification (e.g., using strategy patterns or inheritance appropriately)
- Liskov Substitution Principle: Subtypes must be substitutable for their base types (e.g., ensuring overridden methods preserve contracts)
- Interface Segregation Principle: Clients shouldn't depend on interfaces they don't use (e.g., creating focused, specific interfaces)
- Dependency Inversion Principle: Depend on abstractions, not concretions (e.g., using dependency injection and interfaces)
- DRY (Don't Repeat Yourself) methodology:
- Identifying and eliminating code duplication through refactoring
- Creating reusable components, libraries, and abstractions
- Implementing effective modularization strategies and composition
- Using appropriate design patterns to promote code reuse
- Clean code practices:
- Meaningful, consistent naming conventions that reveal intent
- Small, focused functions/methods with single purposes (15-30 lines preferred)
- Self-documenting code with appropriate comments for complex logic
- Consistent formatting and structure following language conventions
- Comprehensive test coverage and testable design
- Production readiness:
- Robust error handling and graceful failure mechanisms
- Comprehensive logging and monitoring integration
- Security best practices and vulnerability prevention
- Performance optimization for scale
- Configuration management and environment handling
- Deployment considerations and CI/CD compatibility
You demonstrate expertise in software architecture patterns, testing methodologies, security best practices, performance optimization techniques, and collaborative development workflows.
## 3. Response Characteristics
Your responses should be:
- Precise and technical, using correct terminology
- Well-structured with appropriate code formatting
- Balanced between theory and practical implementation
- Accompanied by explanations of design decisions and trade-offs
- Scalable to the complexity of the problem (simple solutions for simple problems)
- Complete yet concise, focusing on core principles without unnecessary complexity
When providing code, include:
- Clear, consistent naming conventions that reveal intent
- Appropriate comments explaining complex logic or design decisions
- Complete error handling and exception management
- Type safety considerations and input validation
- Logging at appropriate levels (error, warning, info, debug)
- Example usage where helpful
## 4. Task-Specific Guidelines
When receiving a coding request:
- Clarify requirements and edge cases before implementation
- Start with a clear design approach before diving into implementation details
- Structure for testability with clear separation of concerns
- Implement comprehensive error handling, logging, and validation
- Consider deployment and runtime environment factors
- Provide usage examples demonstrating proper implementation
- Include appropriate test strategies (unit, integration, etc.)
For architecture/design tasks:
- Begin with understanding the problem domain and requirements
- Consider separation of concerns and appropriate layering
- Design for the appropriate level of abstraction and flexibility
- Account for non-functional requirements (scalability, performance, security)
- Evaluate and recommend appropriate design patterns
- Consider how the architecture will evolve over time
- Address deployment, monitoring, and operational considerations
For code reviews and refactoring:
- Identify violations of SOLID principles with specific recommendations
- Highlight potential code duplication with refactoring suggestions
- Suggest improvements for readability and maintenance
- Assess test coverage and quality
- Consider security vulnerabilities and performance implications
- Provide constructive, actionable feedback with examples
- Address technical debt with prioritized refactoring strategies
For testing guidance:
- Recommend appropriate testing strategies (unit, integration, E2E)
- Demonstrate test structure and organization
- Guide on test coverage priorities
- Show effective mocking and test isolation approaches
- Emphasize testing both happy paths and edge cases/error conditions
## 5. Context and Limitations
- Focus on widely-accepted industry best practices while acknowledging context-specific trade-offs
- When multiple valid approaches exist, explain the trade-offs considering maintenance, performance, and complexity
- Scale solutions appropriately to project size and requirements (avoid over-engineering)
- Prioritize maintainability and readability over clever or overly complex solutions
- Default to production-grade code with proper error handling, logging, and security unless explicitly requested otherwise
- Acknowledge when a perfect solution isn't possible given constraints and offer pragmatic alternatives
- For language-specific requests beyond your expertise, provide guidance on universal principles and patterns that apply across languages
For collaborative development:
- Emphasize clear documentation standards
- Recommend effective version control workflows
- Guide on code review best practices
- Suggest communication and knowledge-sharing approaches
If a request would result in insecure, unmaintainable, or poor-quality code, provide alternative approaches that maintain quality standards while meeting the core requirements, explaining the rationale for your recommendations.
```
--------------------------------------------------------------------------------
/tests/unit/utils/FileSecurityServiceBasics.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import * as path from "node:path";
import * as fs from "node:fs/promises";
// Import the code to test
import { FileSecurityService } from "../../../src/utils/FileSecurityService.js";
import { logger } from "../../../src/utils/logger.js";
describe("FileSecurityService Basic Operations", () => {
// Define test constants for all tests
const TEST_CONTENT = "Test file content";
// Test directories for our tests
const testDir = path.resolve("./test-temp-dir");
const outsideDir = path.resolve("./outside-dir");
const ALLOWED_DIR = path.join(testDir, "allowed");
// Setup and teardown for tests
beforeEach(async () => {
// Setup test directories
await fs.mkdir(testDir, { recursive: true });
await fs.mkdir(ALLOWED_DIR, { recursive: true });
await fs.mkdir(outsideDir, { recursive: true });
// Mock logger to prevent console spam
vi.spyOn(logger, "info").mockImplementation(vi.fn());
vi.spyOn(logger, "error").mockImplementation(vi.fn());
vi.spyOn(logger, "warn").mockImplementation(vi.fn());
});
afterEach(async () => {
// Clean up test directories
await fs.rm(testDir, { recursive: true, force: true });
await fs.rm(outsideDir, { recursive: true, force: true });
// Restore mocks
vi.restoreAllMocks();
});
describe("Basic File Security Operations", () => {
it("should write to a file directly within allowed absolute directory", async () => {
// Arrange
const filePath = path.join(ALLOWED_DIR, "file.txt");
const allowedPaths = [ALLOWED_DIR];
const fileSecurityService = new FileSecurityService(allowedPaths);
// Act
await fileSecurityService.secureWriteFile(filePath, TEST_CONTENT);
// Assert
const fileContent = await fs.readFile(filePath, "utf8");
expect(fileContent).toBe(TEST_CONTENT);
});
it("should write to a file in a nested subdirectory of allowed directory", async () => {
// Arrange
const nestedDir = path.join(ALLOWED_DIR, "subdir");
const filePath = path.join(nestedDir, "file.txt");
const allowedPaths = [ALLOWED_DIR];
const fileSecurityService = new FileSecurityService(allowedPaths);
// Act
await fileSecurityService.secureWriteFile(filePath, TEST_CONTENT);
// Assert
const fileContent = await fs.readFile(filePath, "utf8");
expect(fileContent).toBe(TEST_CONTENT);
});
it("should allow writing when filePath is an exact match to allowed absolute file path", async () => {
// Arrange
const exactFilePath = path.join(ALLOWED_DIR, "exact-file.txt");
const allowedPaths = [exactFilePath]; // Allowing the exact file path
const fileSecurityService = new FileSecurityService(allowedPaths);
// Act
await fileSecurityService.secureWriteFile(exactFilePath, TEST_CONTENT);
// Assert
const fileContent = await fs.readFile(exactFilePath, "utf8");
expect(fileContent).toBe(TEST_CONTENT);
});
it("should throw error when filePath resolves outside allowed paths", async () => {
// Arrange
const unsafePath = path.join(outsideDir, "unsafe-file.txt");
const allowedPaths = [ALLOWED_DIR];
const fileSecurityService = new FileSecurityService(allowedPaths);
// Act & Assert
await expect(
fileSecurityService.secureWriteFile(unsafePath, TEST_CONTENT)
).rejects.toThrow(
/Access denied: The file path must be within the allowed directories/
);
// Additional check that logger.warn was called (FileSecurityService uses warn, not error)
expect(logger.warn).toHaveBeenCalled();
// Verify file was not written
await expect(fs.access(unsafePath)).rejects.toThrow();
});
it("should throw error when filePath uses directory traversal to escape allowed path", async () => {
// Arrange
const traversalPath = path.join(
ALLOWED_DIR,
"subdir",
"..",
"..",
"outside",
"file.txt"
);
const allowedPaths = [ALLOWED_DIR];
const fileSecurityService = new FileSecurityService(allowedPaths);
// Act & Assert
await expect(
fileSecurityService.secureWriteFile(traversalPath, TEST_CONTENT)
).rejects.toThrow(
/Access denied: The file path must be within the allowed directories/
);
});
it("should use default path when no allowed paths are provided", async () => {
// Arrange
const filePath = path.join(process.cwd(), "test-file.txt");
const fileSecurityService = new FileSecurityService(); // No paths provided uses CWD as default
try {
// Act
await fileSecurityService.secureWriteFile(filePath, TEST_CONTENT);
// Assert
const fileContent = await fs.readFile(filePath, "utf8");
expect(fileContent).toBe(TEST_CONTENT);
} finally {
// Cleanup the file created in CWD
try {
await fs.unlink(filePath);
} catch (err) {
// Ignore error if file doesn't exist
}
}
});
it("should correctly handle path normalization and resolution", async () => {
// Arrange
const complexPath = path.join(
ALLOWED_DIR,
".",
"subdir",
"..",
"normalized-file.txt"
);
const allowedPaths = [ALLOWED_DIR];
const fileSecurityService = new FileSecurityService(allowedPaths);
// Act
await fileSecurityService.secureWriteFile(complexPath, TEST_CONTENT);
// Assert - check the file exists at the normalized location
const expectedPath = path.join(ALLOWED_DIR, "normalized-file.txt");
const fileContent = await fs.readFile(expectedPath, "utf8");
expect(fileContent).toBe(TEST_CONTENT);
});
it("should handle multiple allowed paths", async () => {
// Arrange
const filePath = path.join(outsideDir, "allowed-outside-file.txt");
const content = "multi-allowed content";
const fileSecurityService = new FileSecurityService([
ALLOWED_DIR,
outsideDir,
]);
// Act
await fileSecurityService.secureWriteFile(filePath, content);
// Assert
const fileContent = await fs.readFile(filePath, "utf8");
expect(fileContent).toBe(content);
});
});
});
```
--------------------------------------------------------------------------------
/tests/unit/services/gemini/GitHubUrlParser.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { GitHubUrlParser } from "../../../../src/services/gemini/GitHubUrlParser.js";
describe("GitHubUrlParser", () => {
describe("parse()", () => {
it("should parse repository URLs correctly", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server";
const result = GitHubUrlParser.parse(url);
expect(result?.type).toBe("repository");
expect(result?.owner).toBe("bsmi021");
expect(result?.repo).toBe("mcp-gemini-server");
expect(result?.branch).toBeUndefined();
expect(result?.prNumber).toBeUndefined();
expect(result?.issueNumber).toBeUndefined();
});
it("should parse branch URLs correctly", () => {
const url =
"https://github.com/bsmi021/mcp-gemini-server/tree/feature/add-reasoning-effort-option";
const result = GitHubUrlParser.parse(url);
expect(result?.type).toBe("branch");
expect(result?.owner).toBe("bsmi021");
expect(result?.repo).toBe("mcp-gemini-server");
expect(result?.branch).toBe("feature/add-reasoning-effort-option");
expect(result?.prNumber).toBeUndefined();
expect(result?.issueNumber).toBeUndefined();
});
it("should parse pull request URLs correctly", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/pull/2";
const result = GitHubUrlParser.parse(url);
expect(result?.type).toBe("pull_request");
expect(result?.owner).toBe("bsmi021");
expect(result?.repo).toBe("mcp-gemini-server");
expect(result?.branch).toBeUndefined();
expect(result?.prNumber).toBe("2");
expect(result?.issueNumber).toBeUndefined();
});
it("should parse pull request files URLs correctly", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/pull/2/files";
const result = GitHubUrlParser.parse(url);
expect(result?.type).toBe("pr_files");
expect(result?.owner).toBe("bsmi021");
expect(result?.repo).toBe("mcp-gemini-server");
expect(result?.branch).toBeUndefined();
expect(result?.prNumber).toBe("2");
expect(result?.filesView).toBe(true);
expect(result?.issueNumber).toBeUndefined();
});
it("should parse issue URLs correctly", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/issues/5";
const result = GitHubUrlParser.parse(url);
expect(result?.type).toBe("issue");
expect(result?.owner).toBe("bsmi021");
expect(result?.repo).toBe("mcp-gemini-server");
expect(result?.branch).toBeUndefined();
expect(result?.prNumber).toBeUndefined();
expect(result?.issueNumber).toBe("5");
});
it("should return null for invalid URLs", () => {
const urls = [
"https://example.com",
"https://github.com",
"https://github.com/bsmi021",
"https://github.com/bsmi021/mcp-gemini-server/unknown",
"not a url at all",
];
for (const url of urls) {
expect(GitHubUrlParser.parse(url)).toBeNull();
}
});
});
describe("isValidGitHubUrl()", () => {
it("should return true for valid GitHub URLs", () => {
const urls = [
"https://github.com/bsmi021/mcp-gemini-server",
"https://github.com/bsmi021/mcp-gemini-server/tree/main",
"https://github.com/bsmi021/mcp-gemini-server/pull/2",
"https://github.com/bsmi021/mcp-gemini-server/pull/2/files",
"https://github.com/bsmi021/mcp-gemini-server/issues/5",
];
for (const url of urls) {
expect(GitHubUrlParser.isValidGitHubUrl(url)).toBe(true);
}
});
it("should return false for invalid URLs", () => {
const urls = [
"https://example.com",
"https://github.com",
"https://github.com/bsmi021",
"https://github.com/bsmi021/mcp-gemini-server/unknown",
"not a url at all",
];
for (const url of urls) {
expect(GitHubUrlParser.isValidGitHubUrl(url)).toBe(false);
}
});
});
describe("getApiEndpoint()", () => {
it("should return the correct API endpoint for repository URLs", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server";
expect(GitHubUrlParser.getApiEndpoint(url)).toBe(
"repos/bsmi021/mcp-gemini-server"
);
});
it("should return the correct API endpoint for branch URLs", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/tree/main";
expect(GitHubUrlParser.getApiEndpoint(url)).toBe(
"repos/bsmi021/mcp-gemini-server/branches/main"
);
});
it("should return the correct API endpoint for PR URLs", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/pull/2";
expect(GitHubUrlParser.getApiEndpoint(url)).toBe(
"repos/bsmi021/mcp-gemini-server/pulls/2"
);
});
it("should return the correct API endpoint for PR files URLs", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/pull/2/files";
expect(GitHubUrlParser.getApiEndpoint(url)).toBe(
"repos/bsmi021/mcp-gemini-server/pulls/2"
);
});
it("should return the correct API endpoint for issue URLs", () => {
const url = "https://github.com/bsmi021/mcp-gemini-server/issues/5";
expect(GitHubUrlParser.getApiEndpoint(url)).toBe(
"repos/bsmi021/mcp-gemini-server/issues/5"
);
});
it("should return null for invalid URLs", () => {
const url = "https://example.com";
expect(GitHubUrlParser.getApiEndpoint(url)).toBeNull();
});
});
describe("getRepositoryInfo()", () => {
it("should return repository info for valid GitHub URLs", () => {
const urls = [
"https://github.com/bsmi021/mcp-gemini-server",
"https://github.com/bsmi021/mcp-gemini-server/tree/main",
"https://github.com/bsmi021/mcp-gemini-server/pull/2",
"https://github.com/bsmi021/mcp-gemini-server/issues/5",
];
for (const url of urls) {
const info = GitHubUrlParser.getRepositoryInfo(url);
expect(info?.owner).toBe("bsmi021");
expect(info?.repo).toBe("mcp-gemini-server");
}
});
it("should return null for invalid URLs", () => {
const url = "https://example.com";
expect(GitHubUrlParser.getRepositoryInfo(url)).toBeNull();
});
});
});
```
--------------------------------------------------------------------------------
/src/services/SessionService.ts:
--------------------------------------------------------------------------------
```typescript
import { v4 as uuidv4 } from "uuid";
import { logger } from "../utils/logger.js";
import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
import { SessionStore } from "./session/SessionStore.js";
import { InMemorySessionStore } from "./session/InMemorySessionStore.js";
import { SQLiteSessionStore } from "./session/SQLiteSessionStore.js";
export interface SessionState<T = Record<string, unknown>> {
id: string;
createdAt: number;
lastActivity: number;
expiresAt: number;
data: T; // Generic data for the session (e.g., chat history, tool state)
}
export class SessionService {
private store: SessionStore;
private cleanupInterval: NodeJS.Timeout | null = null;
private defaultTimeoutSeconds: number;
private initialized: Promise<void>;
constructor(
defaultTimeoutSeconds: number = 3600,
storeType?: "memory" | "sqlite",
dbPath?: string
) {
// Default 1 hour
this.defaultTimeoutSeconds = defaultTimeoutSeconds;
// Initialize the appropriate store based on configuration
const effectiveStoreType =
storeType || process.env.SESSION_STORE_TYPE || "memory";
switch (effectiveStoreType) {
case "sqlite":
this.store = new SQLiteSessionStore(
dbPath || process.env.SQLITE_DB_PATH
);
break;
case "memory":
default:
this.store = new InMemorySessionStore();
break;
}
// Initialize the store asynchronously
this.initialized = this.initializeStore();
this.initialized
.then(() => {
this.startCleanupInterval();
logger.info(
`SessionService initialized with ${effectiveStoreType} store and default timeout: ${defaultTimeoutSeconds}s`
);
})
.catch((error) => {
logger.error("Failed to initialize session store:", error);
throw error;
});
}
private async initializeStore(): Promise<void> {
await this.store.initialize();
}
/**
* Creates a new session.
* @param initialData Initial data to store in the session.
* @param timeoutSeconds Optional custom timeout for this session.
* @returns The newly created session ID.
*/
public async createSession<
T extends Record<string, unknown> = Record<string, unknown>,
>(initialData: T = {} as T, timeoutSeconds?: number): Promise<string> {
// Ensure store is initialized
await this.initialized;
const sessionId = uuidv4();
const now = Date.now();
const effectiveTimeout = timeoutSeconds ?? this.defaultTimeoutSeconds;
const expiresAt = now + effectiveTimeout * 1000;
const newSession: SessionState<T> = {
id: sessionId,
createdAt: now,
lastActivity: now,
expiresAt: expiresAt,
data: initialData,
};
await this.store.set(sessionId, newSession);
logger.debug(
`Session ${sessionId} created, expires in ${effectiveTimeout}s`
);
return sessionId;
}
/**
* Retrieves a session and updates its last activity timestamp.
* @param sessionId The ID of the session to retrieve.
* @returns The session state.
* @throws McpError if the session is not found or has expired.
*/
public async getSession(sessionId: string): Promise<SessionState> {
// Ensure store is initialized
await this.initialized;
const session = await this.store.get(sessionId);
if (!session) {
throw new McpError(
ErrorCode.InvalidRequest,
`Session not found: ${sessionId}`
);
}
if (Date.now() > session.expiresAt) {
await this.deleteSession(sessionId); // Clean up expired session
throw new McpError(
ErrorCode.InvalidRequest,
`Session expired: ${sessionId}`
);
}
// Update last activity and extend expiration
session.lastActivity = Date.now();
session.expiresAt =
session.lastActivity + this.defaultTimeoutSeconds * 1000;
await this.store.set(sessionId, session);
logger.debug(`Session ${sessionId} accessed, expiration extended.`);
return session;
}
/**
* Updates existing session data.
* @param sessionId The ID of the session to update.
* @param partialData Partial data to merge into the session's data.
* @throws McpError if the session is not found or has expired.
*/
public async updateSession(
sessionId: string,
partialData: Partial<Record<string, unknown>>
): Promise<void> {
const session = await this.getSession(sessionId); // This also updates lastActivity
session.data = { ...session.data, ...partialData };
await this.store.set(sessionId, session);
logger.debug(`Session ${sessionId} updated.`);
}
/**
* Deletes a session.
* @param sessionId The ID of the session to delete.
* @returns True if the session was deleted, false otherwise.
*/
public async deleteSession(sessionId: string): Promise<boolean> {
await this.initialized;
const deleted = await this.store.delete(sessionId);
if (deleted) {
logger.debug(`Session ${sessionId} deleted.`);
} else {
logger.warn(`Attempted to delete non-existent session: ${sessionId}`);
}
return deleted;
}
/**
* Starts the periodic cleanup of expired sessions.
*/
private startCleanupInterval(): void {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
}
this.cleanupInterval = setInterval(() => {
this.cleanupExpiredSessions();
}, 60 * 1000); // Check every minute
}
/**
* Cleans up all expired sessions.
*/
private async cleanupExpiredSessions(): Promise<void> {
try {
const now = Date.now();
const cleanedCount = await this.store.deleteExpired(now);
if (cleanedCount > 0) {
logger.info(
`SessionService cleaned up ${cleanedCount} expired sessions.`
);
}
} catch (error) {
logger.error("Error during session cleanup:", error);
}
}
/**
* Stops the periodic cleanup interval.
*/
public stopCleanupInterval(): void {
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
this.cleanupInterval = null;
logger.info("SessionService cleanup interval stopped.");
}
}
/**
* Returns the number of active sessions.
*/
public async getActiveSessionCount(): Promise<number> {
await this.initialized;
return this.store.count();
}
/**
* Closes the session service and cleans up resources.
*/
public async close(): Promise<void> {
this.stopCleanupInterval();
await this.store.close();
logger.info("SessionService closed");
}
}
```
--------------------------------------------------------------------------------
/tests/unit/tools/schemas/ToolParamSchemas.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { exampleToolSchema } from "../../../../src/tools/exampleToolParams.js";
import { geminiGenerateContentSchema } from "../../../../src/tools/geminiGenerateContentConsolidatedParams.js";
import { writeToFileSchema } from "../../../../src/tools/schemas/writeToFileParams.js";
/**
* This test file focuses on the specific tool parameter schemas
* used throughout the application. Each tool schema is tested
* for proper validation of both valid and invalid inputs.
*/
describe("Tool Parameter Schemas", () => {
describe("exampleToolSchema", () => {
it("should validate valid parameters", () => {
const validParams = {
name: "Test User",
};
const result = exampleToolSchema.safeParse(validParams);
expect(result.success).toBe(true);
});
it("should validate with optional language parameter", () => {
const validParams = {
name: "Test User",
language: "es",
};
const result = exampleToolSchema.safeParse(validParams);
expect(result.success).toBe(true);
});
describe("name parameter boundary values", () => {
it("should validate minimum valid name length (1 character)", () => {
const params = { name: "A" };
expect(exampleToolSchema.safeParse(params).success).toBe(true);
});
it("should validate maximum valid name length (50 characters)", () => {
const params = { name: "A".repeat(50) };
expect(exampleToolSchema.safeParse(params).success).toBe(true);
});
it("should reject empty name parameter (0 characters)", () => {
const params = { name: "" };
expect(exampleToolSchema.safeParse(params).success).toBe(false);
});
it("should reject name that exceeds max length (51 characters)", () => {
const params = { name: "A".repeat(51) };
expect(exampleToolSchema.safeParse(params).success).toBe(false);
});
});
describe("language parameter values", () => {
it("should validate all valid language options", () => {
const validOptions = ["en", "es", "fr"];
validOptions.forEach((lang) => {
const params = { name: "Test User", language: lang };
expect(exampleToolSchema.safeParse(params).success).toBe(true);
});
});
it("should reject invalid language options", () => {
const invalidOptions = ["de", "jp", "it", ""];
invalidOptions.forEach((lang) => {
const params = { name: "Test User", language: lang };
expect(exampleToolSchema.safeParse(params).success).toBe(false);
});
});
});
});
describe("geminiGenerateContentSchema", () => {
it("should validate minimal required parameters", () => {
const validParams = {
prompt: "Tell me a story",
};
const result = geminiGenerateContentSchema.safeParse(validParams);
expect(result.success).toBe(true);
});
it("should validate with all optional parameters", () => {
const validParams = {
prompt: "Tell me a story",
modelName: "gemini-pro",
generationConfig: {
temperature: 0.7,
topP: 0.8,
topK: 40,
maxOutputTokens: 2048,
stopSequences: ["THE END"],
thinkingConfig: {
thinkingBudget: 1000,
reasoningEffort: "medium",
},
},
safetySettings: [
{
category: "HARM_CATEGORY_HATE_SPEECH",
threshold: "BLOCK_MEDIUM_AND_ABOVE",
},
],
systemInstruction: "Respond in a friendly tone",
cachedContentName: "cachedContents/example123",
};
const result = geminiGenerateContentSchema.safeParse(validParams);
expect(result.success).toBe(true);
});
it("should reject empty prompt", () => {
const invalidParams = {
prompt: "",
modelName: "gemini-pro",
};
const result = geminiGenerateContentSchema.safeParse(invalidParams);
expect(result.success).toBe(false);
});
it("should reject invalid generation config parameters", () => {
const invalidParams = {
prompt: "Tell me a story",
generationConfig: {
temperature: 2.0, // Should be between 0 and 1
},
};
const result = geminiGenerateContentSchema.safeParse(invalidParams);
expect(result.success).toBe(false);
});
it("should reject invalid safety settings", () => {
const invalidParams = {
prompt: "Tell me a story",
safetySettings: [
{
category: "INVALID_CATEGORY", // Not a valid harm category
threshold: "BLOCK_MEDIUM_AND_ABOVE",
},
],
};
const result = geminiGenerateContentSchema.safeParse(invalidParams);
expect(result.success).toBe(false);
});
});
describe("writeToFileSchema", () => {
it("should validate minimal required parameters", () => {
const validParams = {
filePath: "/path/to/file.txt",
content: "File content",
};
const result = writeToFileSchema.safeParse(validParams);
expect(result.success).toBe(true);
});
it("should validate with all optional parameters", () => {
const validParams = {
filePath: "/path/to/file.txt",
content: "File content",
encoding: "utf8",
overwriteFile: true,
};
const result = writeToFileSchema.safeParse(validParams);
expect(result.success).toBe(true);
});
it("should validate with utf8 encoding", () => {
const utf8Params = {
filePath: "/path/to/file.txt",
content: "File content",
encoding: "utf8",
};
expect(writeToFileSchema.safeParse(utf8Params).success).toBe(true);
});
it("should reject unsupported encoding", () => {
const base64Params = {
filePath: "/path/to/file.txt",
content: "File content",
encoding: "base64",
};
expect(writeToFileSchema.safeParse(base64Params).success).toBe(false);
});
it("should reject empty file path", () => {
const invalidParams = {
filePath: "",
content: "File content",
};
const result = writeToFileSchema.safeParse(invalidParams);
expect(result.success).toBe(false);
});
it("should reject invalid encoding options", () => {
const invalidParams = {
filePath: "/path/to/file.txt",
content: "File content",
encoding: "binary", // Not in ['utf8']
};
const result = writeToFileSchema.safeParse(invalidParams);
expect(result.success).toBe(false);
});
});
});
```
--------------------------------------------------------------------------------
/tests/e2e/clients/mcp-test-client.ts:
--------------------------------------------------------------------------------
```typescript
import fetch, { Response as FetchResponse } from "node-fetch";
import EventSource from "eventsource";
export interface MCPTestClientOptions {
url: string;
timeout?: number;
}
// Basic JSON-RPC 2.0 response union type used by this test client
type JsonRpcSuccess = {
jsonrpc: "2.0";
id: number | string | null;
result: unknown;
error?: never;
};
type JsonRpcError = {
jsonrpc: "2.0";
id: number | string | null;
error: { message: string; [key: string]: unknown };
result?: never;
};
type JsonRpcResponse = JsonRpcSuccess | JsonRpcError;
export class MCPTestClient {
public sessionId?: string;
private url: string;
private timeout: number;
private eventSource?: EventSource;
constructor(optionsOrUrl: MCPTestClientOptions | string) {
if (typeof optionsOrUrl === "string") {
this.url = optionsOrUrl;
this.timeout = 30000;
} else {
this.url = optionsOrUrl.url;
this.timeout = optionsOrUrl.timeout || 30000;
}
}
async initialize(): Promise<{
protocolVersion: string;
capabilities: unknown;
}> {
const response = await fetch(this.url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "application/json, text/event-stream",
},
body: JSON.stringify({
jsonrpc: "2.0",
id: 1,
method: "initialize",
params: {
protocolVersion: "2024-11-05",
capabilities: {
tools: {},
},
clientInfo: {
name: "mcp-test-client",
version: "1.0.0",
},
},
}),
});
this.sessionId = response.headers.get("Mcp-Session-Id") || undefined;
const result = await this.parseResponse(response);
if ("error" in result && result.error) {
throw new Error(`Initialize failed: ${result.error.message}`);
}
return (result as JsonRpcSuccess).result as {
protocolVersion: string;
capabilities: unknown;
};
}
async listTools(): Promise<{ tools: unknown[] }> {
if (!this.sessionId) {
throw new Error("Not initialized - call initialize() first");
}
const response = await fetch(this.url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "application/json, text/event-stream",
"Mcp-Session-Id": this.sessionId,
},
body: JSON.stringify({
jsonrpc: "2.0",
id: 2,
method: "tools/list",
params: {},
}),
});
const result = await this.parseResponse(response);
if ("error" in result && result.error) {
throw new Error(`List tools failed: ${result.error.message}`);
}
return (result as JsonRpcSuccess).result as { tools: unknown[] };
}
async callTool(
name: string,
args: Record<string, unknown>
): Promise<{
content?: Array<Record<string, unknown>>;
[key: string]: unknown;
}> {
if (!this.sessionId) {
throw new Error("Not initialized - call initialize() first");
}
const response = await fetch(this.url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "application/json, text/event-stream",
"Mcp-Session-Id": this.sessionId,
},
body: JSON.stringify({
jsonrpc: "2.0",
id: Date.now(),
method: "tools/call",
params: {
name,
arguments: args,
},
}),
});
const result = await this.parseResponse(response);
if ("error" in result && result.error) {
throw new Error(`Tool call failed: ${result.error.message}`);
}
return (result as JsonRpcSuccess).result as {
content?: Array<Record<string, unknown>>;
[key: string]: unknown;
};
}
async streamTool(
name: string,
args: Record<string, unknown>
): Promise<AsyncIterable<unknown>> {
if (!this.sessionId) {
throw new Error("Not initialized - call initialize() first");
}
// For streaming, we need to handle SSE
const url = `${this.url}?sessionId=${this.sessionId}`;
this.eventSource = new EventSource(url);
// Send the request to trigger streaming
await fetch(this.url, {
method: "POST",
headers: {
"Content-Type": "application/json",
Accept: "text/event-stream",
"Mcp-Session-Id": this.sessionId,
},
body: JSON.stringify({
jsonrpc: "2.0",
id: Date.now(),
method: "tools/call",
params: {
name,
arguments: args,
},
}),
});
// Return async iterable for streaming data
const eventSource = this.eventSource;
return {
async *[Symbol.asyncIterator]() {
const chunks: unknown[] = [];
let done = false;
eventSource.onmessage = (event) => {
const data = JSON.parse(event.data);
chunks.push(data);
};
eventSource.onerror = () => {
done = true;
eventSource.close();
};
while (!done) {
if (chunks.length > 0) {
yield chunks.shift();
} else {
await new Promise((resolve) => setTimeout(resolve, 10));
}
}
},
};
}
async disconnect(): Promise<void> {
if (this.eventSource) {
this.eventSource.close();
}
if (this.sessionId) {
// Send disconnect/cleanup request if needed
await fetch(this.url, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Mcp-Session-Id": this.sessionId,
},
body: JSON.stringify({
jsonrpc: "2.0",
id: Date.now(),
method: "disconnect",
params: {},
}),
}).catch(() => {
// Ignore errors on disconnect
});
}
this.sessionId = undefined;
}
// Alias for backward compatibility with existing tests
async close(): Promise<void> {
await this.disconnect();
}
private async parseResponse(
response: FetchResponse
): Promise<JsonRpcResponse> {
const contentType = response.headers.get("content-type") || "";
if (contentType.includes("text/event-stream")) {
// Parse SSE format
const text = await response.text();
const lines = text.split("\n");
for (const line of lines) {
if (line.startsWith("data: ")) {
try {
return JSON.parse(line.substring(6)) as JsonRpcResponse;
} catch (e) {
// Continue to next line
}
}
}
throw new Error("No valid JSON data in SSE response");
} else {
// Standard JSON response
return (await response.json()) as JsonRpcResponse;
}
}
}
```
--------------------------------------------------------------------------------
/src/services/gemini/GeminiValidationSchemas.ts:
--------------------------------------------------------------------------------
```typescript
import { z } from "zod";
import { SafetySetting } from "./GeminiTypes.js";
import { HarmCategory, HarmBlockThreshold } from "@google/genai";
import type { RouteMessageParams } from "../GeminiService.js";
/**
* Validation schemas for Gemini API parameters
* These schemas ensure type safety and provide runtime validation
*/
/**
* Shared schemas used across multiple services
*/
/**
* Harm categories for safety settings
*/
export const HarmCategorySchema = z.enum([
"HARM_CATEGORY_HARASSMENT",
"HARM_CATEGORY_HATE_SPEECH",
"HARM_CATEGORY_SEXUALLY_EXPLICIT",
"HARM_CATEGORY_DANGEROUS_CONTENT",
]);
/**
* Blocking thresholds for safety settings
*/
export const BlockThresholdSchema = z.enum([
"BLOCK_NONE",
"BLOCK_LOW_AND_ABOVE",
"BLOCK_MEDIUM_AND_ABOVE",
"BLOCK_HIGH_AND_ABOVE",
]);
/**
* Safety setting schema for content filtering
*/
export const SafetySettingSchema = z.object({
category: HarmCategorySchema,
threshold: BlockThresholdSchema,
});
/**
* Default safety settings to apply if none are provided
*/
export const DEFAULT_SAFETY_SETTINGS = [
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
] as SafetySetting[];
/**
* Image resolution schema for image generation
*/
export const ImageResolutionSchema = z
.enum(["512x512", "1024x1024", "1536x1536"])
.default("1024x1024");
/**
* Image generation parameters schema
*/
export const ImageGenerationParamsSchema = z.object({
prompt: z.string().min(1).max(1000),
modelName: z.string().min(1).optional(),
resolution: ImageResolutionSchema.optional(),
numberOfImages: z.number().int().min(1).max(8).default(1),
safetySettings: z.array(SafetySettingSchema).optional(),
negativePrompt: z.string().max(1000).optional(),
stylePreset: z.string().optional(),
seed: z.number().int().optional(),
styleStrength: z.number().min(0).max(1).optional(),
});
/**
* Type representing validated image generation parameters
*/
export type ValidatedImageGenerationParams = z.infer<
typeof ImageGenerationParamsSchema
>;
/**
* Style presets available for image generation
*/
export const STYLE_PRESETS = [
"photographic",
"digital-art",
"cinematic",
"anime",
"3d-render",
"oil-painting",
"watercolor",
"pixel-art",
"sketch",
"comic-book",
"neon",
"fantasy",
] as const;
/**
* Schema for thinking configuration to control model reasoning
*/
export const ThinkingConfigSchema = z
.object({
thinkingBudget: z.number().int().min(0).max(24576).optional(),
reasoningEffort: z.enum(["none", "low", "medium", "high"]).optional(),
})
.optional();
/**
* Generation configuration schema for text generation
*/
export const GenerationConfigSchema = z
.object({
temperature: z.number().min(0).max(1).optional(),
topP: z.number().min(0).max(1).optional(),
topK: z.number().int().min(1).optional(),
maxOutputTokens: z.number().int().min(1).optional(),
stopSequences: z.array(z.string()).optional(),
thinkingConfig: ThinkingConfigSchema,
})
.optional();
/**
* Image generation schemas
*/
/**
* Content generation schemas
*/
/**
* Schema for inline data used in content generation
*/
export const InlineDataSchema = z.object({
data: z.string().min(1),
mimeType: z.string().min(1),
});
/**
* Schema for content parts
*/
export const PartSchema = z.object({
text: z.string().optional(),
inlineData: InlineDataSchema.optional(),
});
/**
* Schema for content object used in requests
*/
export const ContentSchema = z.object({
role: z.enum(["user", "model", "system"]).optional(),
parts: z.array(PartSchema),
});
/**
* Schema for validating GenerateContentParams
*/
export const GenerateContentParamsSchema = z.object({
prompt: z.string().min(1),
modelName: z.string().min(1).optional(),
generationConfig: GenerationConfigSchema,
safetySettings: z.array(SafetySettingSchema).optional(),
systemInstruction: z.union([z.string(), ContentSchema]).optional(),
cachedContentName: z.string().min(1).optional(),
inlineData: z.string().optional(),
inlineDataMimeType: z.string().optional(),
});
/**
* Type representing validated content generation parameters
*/
export type ValidatedGenerateContentParams = z.infer<
typeof GenerateContentParamsSchema
>;
/**
* Schema for validating RouteMessageParams
*/
export const RouteMessageParamsSchema = z.object({
message: z.string().min(1),
models: z.array(z.string().min(1)).min(1),
routingPrompt: z.string().min(1).optional(),
defaultModel: z.string().min(1).optional(),
generationConfig: GenerationConfigSchema.optional(),
safetySettings: z.array(SafetySettingSchema).optional(),
systemInstruction: z.union([z.string(), ContentSchema]).optional(),
});
/**
* Type representing validated router parameters
*/
export type ValidatedRouteMessageParams = z.infer<
typeof RouteMessageParamsSchema
>;
/**
* Validation methods
*/
/**
* Validates image generation parameters
* @param params Raw parameters provided by the caller
* @returns Validated parameters with defaults applied
* @throws ZodError if validation fails
*/
export function validateImageGenerationParams(
prompt: string,
modelName?: string,
resolution?: "512x512" | "1024x1024" | "1536x1536",
numberOfImages?: number,
safetySettings?: SafetySetting[],
negativePrompt?: string,
stylePreset?: string,
seed?: number,
styleStrength?: number
): ValidatedImageGenerationParams {
return ImageGenerationParamsSchema.parse({
prompt,
modelName,
resolution,
numberOfImages,
safetySettings,
negativePrompt,
stylePreset,
seed,
styleStrength,
});
}
/**
* Validates content generation parameters
* @param params Raw parameters provided by the caller
* @returns Validated parameters with defaults applied
* @throws ZodError if validation fails
*/
export function validateGenerateContentParams(
params: Record<string, unknown>
): ValidatedGenerateContentParams {
return GenerateContentParamsSchema.parse(params);
}
/**
* Validates router message parameters
* @param params Raw parameters provided by the caller
* @returns Validated parameters with defaults applied
* @throws ZodError if validation fails
*/
export function validateRouteMessageParams(
params: RouteMessageParams
): ValidatedRouteMessageParams {
return RouteMessageParamsSchema.parse(params);
}
```
--------------------------------------------------------------------------------
/src/tools/geminiCodeReviewTool.ts:
--------------------------------------------------------------------------------
```typescript
import { GeminiService } from "../services/index.js";
import { logger } from "../utils/index.js";
import {
TOOL_NAME_CODE_REVIEW,
TOOL_DESCRIPTION_CODE_REVIEW,
GEMINI_CODE_REVIEW_PARAMS,
GeminiCodeReviewArgs,
} from "./geminiCodeReviewParams.js";
import { mapAnyErrorToMcpError } from "../utils/errors.js";
import { GitDiffReviewParams } from "../services/gemini/GeminiGitDiffService.js";
import type { NewGeminiServiceToolObject } from "./registration/ToolAdapter.js";
import type { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
/**
* Handles Gemini code review operations including local diffs, GitHub repos, and pull requests.
* The operation is determined by the source parameter.
*/
export const geminiCodeReviewTool: NewGeminiServiceToolObject<
GeminiCodeReviewArgs,
CallToolResult
> = {
name: TOOL_NAME_CODE_REVIEW,
description: TOOL_DESCRIPTION_CODE_REVIEW,
inputSchema: GEMINI_CODE_REVIEW_PARAMS,
execute: async (args: GeminiCodeReviewArgs, service: GeminiService) => {
logger.debug(`Received ${TOOL_NAME_CODE_REVIEW} request:`, {
source: args.source,
modelName: args.model,
});
try {
switch (args.source) {
case "local_diff": {
// Convert repository context object to string
const repositoryContextString = args.repositoryContext
? JSON.stringify(args.repositoryContext)
: undefined;
// Prepare parameters for local diff review
const reviewParams: GitDiffReviewParams = {
diffContent: args.diffContent,
modelName: args.model,
reasoningEffort: args.reasoningEffort,
reviewFocus: args.reviewFocus,
repositoryContext: repositoryContextString,
diffOptions: {
maxFilesToInclude: args.maxFilesToInclude,
excludePatterns: args.excludePatterns,
prioritizeFiles: args.prioritizeFiles,
},
customPrompt: args.customPrompt,
};
// Call the service
const reviewText = await service.reviewGitDiff(reviewParams);
return {
content: [
{
type: "text",
text: reviewText,
},
],
};
}
case "github_repo": {
// Parse GitHub URL to extract owner and repo
const urlMatch = args.repoUrl.match(/github\.com\/([^/]+)\/([^/]+)/);
if (!urlMatch) {
throw new Error("Invalid GitHub repository URL format");
}
const [, owner, repo] = urlMatch;
// Call the service for GitHub repository review
const reviewText = await service.reviewGitHubRepository({
owner,
repo,
branch: args.branch,
modelName: args.model,
reasoningEffort: args.reasoningEffort,
reviewFocus: args.reviewFocus,
maxFilesToInclude: args.maxFiles,
excludePatterns: args.excludePatterns,
prioritizeFiles: args.prioritizeFiles,
customPrompt: args.customPrompt,
});
return {
content: [
{
type: "text",
text: reviewText,
},
],
};
}
case "github_pr": {
// Parse GitHub PR URL to extract owner, repo, and PR number
const urlMatch = args.prUrl.match(
/github\.com\/([^/]+)\/([^/]+)\/pull\/(\d+)/
);
if (!urlMatch) {
throw new Error("Invalid GitHub pull request URL format");
}
const [, owner, repo, prNumberStr] = urlMatch;
const prNumber = parseInt(prNumberStr, 10);
// Call the service for GitHub PR review
const reviewText = await service.reviewGitHubPullRequest({
owner,
repo,
prNumber,
modelName: args.model,
reasoningEffort: args.reasoningEffort,
reviewFocus: args.reviewFocus,
excludePatterns: args.excludePatterns,
customPrompt: args.customPrompt,
});
return {
content: [
{
type: "text",
text: reviewText,
},
],
};
}
default: {
// This should never happen due to discriminated union
throw new Error(`Unknown review source: ${JSON.stringify(args)}`);
}
}
} catch (error: unknown) {
logger.error(`Error processing ${TOOL_NAME_CODE_REVIEW}:`, error);
throw mapAnyErrorToMcpError(error, TOOL_NAME_CODE_REVIEW);
}
},
};
// Also export a streaming version for local diffs
export const geminiCodeReviewStreamTool: NewGeminiServiceToolObject<
GeminiCodeReviewArgs,
AsyncGenerator<CallToolResult, void, unknown>
> = {
name: "gemini_code_review_stream",
description:
"Stream code review results for local git diffs using Gemini models",
inputSchema: GEMINI_CODE_REVIEW_PARAMS,
execute: async (
args: GeminiCodeReviewArgs,
service: GeminiService
): Promise<AsyncGenerator<CallToolResult, void, unknown>> => {
async function* streamResults() {
if (args.source !== "local_diff") {
throw new Error("Streaming is only supported for local_diff source");
}
logger.debug(`Received gemini_code_review_stream request:`, {
source: args.source,
modelName: args.model,
});
try {
// Convert repository context object to string
const repositoryContextString = args.repositoryContext
? JSON.stringify(args.repositoryContext)
: undefined;
// Prepare parameters for local diff review
const reviewParams: GitDiffReviewParams = {
diffContent: args.diffContent,
modelName: args.model,
reasoningEffort: args.reasoningEffort,
reviewFocus: args.reviewFocus,
repositoryContext: repositoryContextString,
diffOptions: {
maxFilesToInclude: args.maxFilesToInclude,
excludePatterns: args.excludePatterns,
prioritizeFiles: args.prioritizeFiles,
},
customPrompt: args.customPrompt,
};
// Stream the review results
for await (const chunk of service.reviewGitDiffStream(reviewParams)) {
yield {
content: [
{
type: "text" as const,
text: chunk,
},
],
};
}
} catch (error: unknown) {
logger.error(`Error processing gemini_code_review_stream:`, error);
throw mapAnyErrorToMcpError(error, "gemini_code_review_stream");
}
}
return streamResults();
},
};
```
--------------------------------------------------------------------------------
/src/utils/RetryService.ts:
--------------------------------------------------------------------------------
```typescript
import { logger } from "./logger.js";
/**
* Error types that are generally considered as transient/retryable
*/
const RETRYABLE_ERROR_NAMES = new Set([
"NetworkError",
"GeminiNetworkError",
"ECONNRESET",
"ETIMEDOUT",
"ECONNREFUSED",
"429", // Too Many Requests
"503", // Service Unavailable
"504", // Gateway Timeout
]);
/**
* Error messages that suggest a retryable error
*/
const RETRYABLE_ERROR_MESSAGES = [
"network",
"timeout",
"connection",
"too many requests",
"rate limit",
"quota",
"try again",
"temporary",
"unavailable",
"overloaded",
];
/**
* Options for configuring retry behavior
*/
export interface RetryOptions {
/** Maximum number of retry attempts */
maxAttempts?: number;
/** Initial delay in milliseconds before first retry */
initialDelayMs?: number;
/** Maximum delay in milliseconds between retries */
maxDelayMs?: number;
/** Backoff factor to multiply delay after each attempt */
backoffFactor?: number;
/** Whether to add jitter to delays to prevent thundering herd */
jitter?: boolean;
/** Custom function to determine if a specific error should be retried */
retryableErrorCheck?: (error: unknown) => boolean;
/** Function to call before each retry attempt */
onRetry?: (error: unknown, attempt: number, delayMs: number) => void;
}
/**
* Default retry configuration values
*/
const DEFAULT_RETRY_OPTIONS: Required<RetryOptions> = {
maxAttempts: 3,
initialDelayMs: 100,
maxDelayMs: 10000,
backoffFactor: 2,
jitter: true,
retryableErrorCheck: (_error: unknown): boolean => false,
onRetry: (_error: unknown, _attempt: number, _delayMs: number): void => {},
};
/**
* Provides exponential backoff retry functionality for asynchronous operations
*/
export class RetryService {
private options: Required<RetryOptions>;
/**
* Creates a new RetryService with the specified options
*/
constructor(options: RetryOptions = {}) {
this.options = { ...DEFAULT_RETRY_OPTIONS, ...options };
}
/**
* Determines if an error is retryable based on error name and message
*/
private isRetryableError(error: unknown): boolean {
// Use custom check if provided
if (this.options.retryableErrorCheck) {
return this.options.retryableErrorCheck(error);
}
// Handle Error objects
if (error instanceof Error) {
// Check error name
if (RETRYABLE_ERROR_NAMES.has(error.name)) {
return true;
}
// Check if error message contains any retryable patterns
const errorMsg = error.message.toLowerCase();
if (
RETRYABLE_ERROR_MESSAGES.some((pattern) => errorMsg.includes(pattern))
) {
return true;
}
// For tests - consider "NetworkError" as retryable
if (error.name === "NetworkError") {
return true;
}
}
// Handle HTTP status code errors
if (typeof error === "object" && error !== null) {
const err = error as { status?: number; code?: number };
if (
err.status &&
(err.status === 429 || err.status === 503 || err.status === 504)
) {
return true;
}
// Google API might use code instead of status
if (
err.code &&
(err.code === 429 || err.code === 503 || err.code === 504)
) {
return true;
}
}
// Not identified as retryable
return false;
}
/**
* Calculates the delay for a retry attempt with optional jitter
*/
private calculateDelay(attempt: number): number {
// Calculate exponential backoff
const delay = Math.min(
this.options.initialDelayMs *
Math.pow(this.options.backoffFactor, attempt),
this.options.maxDelayMs
);
// Add jitter if enabled (prevents thundering herd)
if (this.options.jitter) {
// Full jitter: random value between 0 and the calculated delay
return Math.random() * delay;
}
return delay;
}
/**
* Executes an async function with retry logic
*
* @param fn The async function to execute with retry
* @returns Promise that resolves with the result of the operation
* @throws The last error encountered if all retries fail
*/
public async execute<T>(fn: () => Promise<T>): Promise<T> {
let lastError: unknown;
for (let attempt = 0; attempt <= this.options.maxAttempts; attempt++) {
try {
// First attempt doesn't count as a retry
if (attempt === 0) {
return await fn();
}
// Calculate delay for this retry attempt
const delayMs = this.calculateDelay(attempt - 1);
// Call onRetry callback if provided
if (this.options.onRetry) {
this.options.onRetry(lastError, attempt, delayMs);
}
// Log retry information
logger.debug(
`Retrying operation (attempt ${attempt}/${this.options.maxAttempts}) after ${delayMs}ms delay`
);
// Wait before retrying
await new Promise((resolve) => setTimeout(resolve, delayMs));
// Execute retry
return await fn();
} catch (error) {
lastError = error;
// Stop retrying if error is not retryable
if (!this.isRetryableError(error)) {
logger.debug(
`Non-retryable error encountered, aborting retry: ${error}`
);
throw error;
}
// Stop if this was the last attempt
if (attempt === this.options.maxAttempts) {
logger.debug(
`Max retry attempts (${this.options.maxAttempts}) reached, giving up`
);
throw error;
}
// Log the error but continue to next attempt
logger.debug(
`Retryable error encountered on attempt ${attempt}: ${error}`
);
}
}
// This should never be reached due to the throw in the last iteration,
// but TypeScript requires a return statement
throw lastError;
}
/**
* Creates a wrapped version of an async function that includes retry logic
*
* @param fn The async function to wrap with retry logic
* @returns A new function with the same signature but with retry capabilities
*/
public wrap<T extends unknown[], R>(
fn: (...args: T) => Promise<R>
): (...args: T) => Promise<R> {
return async (...args: T): Promise<R> => {
return this.execute(() => fn(...args));
};
}
}
/**
* Creates a singleton RetryService instance with default options
*/
const defaultRetryService = new RetryService();
/**
* Helper function to execute an operation with retry using the default settings
*
* @param fn The async function to execute with retry
* @returns Promise that resolves with the result of the operation
*/
export async function withRetry<T>(fn: () => Promise<T>): Promise<T> {
return defaultRetryService.execute(fn);
}
```
--------------------------------------------------------------------------------
/tests/utils/test-setup.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Test setup utilities for MCP Gemini Server tests
*
* This file provides helper functions for setting up and tearing down the server during tests,
* as well as creating test fixtures and mock objects.
*/
import { Server } from "node:http";
import { AddressInfo } from "node:net";
import { setTimeout } from "node:timers/promises";
/**
* Options for creating a test server
*/
export interface TestServerOptions {
/** Port to run the server on (0 for random port) */
port?: number;
/** API key to use (defaults to environment variable) */
apiKey?: string;
/** Default model to use for tests */
defaultModel?: string;
/** Base directory for file operations during tests */
fileBasePath?: string;
/** Whether to use verbose logging during tests */
verbose?: boolean;
}
/**
* Context object returned by setupTestServer
*/
export interface TestServerContext {
/** The HTTP server instance */
server: Server;
/** The base URL to connect to the server */
baseUrl: string;
/** Port the server is running on */
port: number;
/** Function to cleanly shut down the server */
teardown: () => Promise<void>;
/** GeminiService instance for mocking */
geminiService: object;
}
/**
* Sets up a test server with the specified options
*
* @param options - Configuration options for the test server
* @returns TestServerContext object with server and helper methods
*/
export async function setupTestServer(
options: TestServerOptions = {}
): Promise<TestServerContext> {
// Save original environment variables
const originalEnv = {
GOOGLE_GEMINI_API_KEY: process.env.GOOGLE_GEMINI_API_KEY,
GOOGLE_GEMINI_MODEL: process.env.GOOGLE_GEMINI_MODEL,
GEMINI_SAFE_FILE_BASE_DIR: process.env.GEMINI_SAFE_FILE_BASE_DIR,
NODE_ENV: process.env.NODE_ENV,
};
// Set test environment variables
process.env.NODE_ENV = "test";
if (options.apiKey) {
process.env.GOOGLE_GEMINI_API_KEY = options.apiKey;
}
if (options.defaultModel) {
process.env.GOOGLE_GEMINI_MODEL = options.defaultModel;
}
if (options.fileBasePath) {
process.env.GEMINI_SAFE_FILE_BASE_DIR = options.fileBasePath;
}
// Import server creation functions
const { createServer } = await import("../../src/createServer.js");
const http = await import("node:http");
// Create MCP server instance
// This is intentionally unused in the test setup but kept for reference
// eslint-disable-next-line @typescript-eslint/no-unused-vars
const { server: mcpServer, mcpClientService } = createServer();
// Type assertion pattern: McpClientService -> { geminiService: object }
// This double assertion is necessary because:
// 1. McpClientService doesn't formally expose geminiService in its type definition
// 2. We need to access it for test mocking purposes
// 3. The service implementation actually contains this property at runtime
const geminiService = (
mcpClientService as unknown as { geminiService: object }
).geminiService;
// Create an HTTP server using the MCP server
const port = options.port || 0;
const httpServer = http.createServer();
// Create a request handler
httpServer.on("request", (req, res) => {
// Since McpServer doesn't directly handle HTTP requests like Express middleware,
// we need to create a compatible transport or adapter here.
// For testing purposes, we'll implement a basic response
res.setHeader("Content-Type", "application/json");
res.writeHead(200);
res.end(
JSON.stringify({
status: "ok",
message:
"This is a mock response for testing. In a real implementation, requests would be processed through the McpServer transport layer.",
})
);
});
// Start the HTTP server
httpServer.listen(port);
// Wait for the server to be ready
await new Promise<void>((resolve) => {
httpServer.once("listening", () => resolve());
});
// Get the actual port (in case it was randomly assigned)
const actualPort = (httpServer.address() as AddressInfo).port;
const baseUrl = `http://localhost:${actualPort}`;
// Return the context with server and helper methods
return {
server: httpServer,
baseUrl,
port: actualPort,
geminiService,
teardown: async () => {
// Close the HTTP server
await new Promise<void>((resolve, reject) => {
httpServer.close((err: Error | undefined) => {
if (err) reject(err);
else resolve();
});
});
// Restore original environment variables
process.env.GOOGLE_GEMINI_API_KEY = originalEnv.GOOGLE_GEMINI_API_KEY;
process.env.GOOGLE_GEMINI_MODEL = originalEnv.GOOGLE_GEMINI_MODEL;
process.env.GEMINI_SAFE_FILE_BASE_DIR =
originalEnv.GEMINI_SAFE_FILE_BASE_DIR;
process.env.NODE_ENV = originalEnv.NODE_ENV;
// Small delay to ensure cleanup completes
await setTimeout(100);
},
};
}
/**
* Interface for mock API responses
*/
export interface MockApiResponse<T> {
status: number;
data: T;
headers: Record<string, string>;
config: Record<string, unknown>;
request: Record<string, unknown>;
}
/**
* Creates a mock API response object for testing
*
* @param status - HTTP status code to return
* @param data - Response data
* @returns Mock response object
*/
export function createMockResponse<T>(
status: number,
data: T
): MockApiResponse<T> {
return {
status,
data,
headers: {},
config: {},
request: {},
};
}
/**
* Check if required environment variables for testing are available
*
* @param requiredVars - Array of required environment variable names
* @returns true if all variables are available, false otherwise
*/
export function checkRequiredEnvVars(
requiredVars: string[] = ["GOOGLE_GEMINI_API_KEY"]
): boolean {
const missing = requiredVars.filter((varName) => !process.env[varName]);
if (missing.length > 0) {
console.warn(
`Missing required environment variables for testing: ${missing.join(", ")}`
);
console.warn(
"Create a .env.test file or set these variables in your environment"
);
return false;
}
return true;
}
/**
* Interface for test context that can be skipped
*/
export interface SkippableTestContext {
skip: (reason: string) => void;
}
/**
* Skip a test if required environment variables are missing
*
* @param t - Test context
* @param requiredVars - Array of required environment variable names
* @returns Whether the test should be skipped
*/
export function skipIfMissingEnvVars(
t: SkippableTestContext,
requiredVars: string[] = ["GOOGLE_GEMINI_API_KEY"]
): boolean {
const missing = requiredVars.filter((varName) => !process.env[varName]);
if (missing.length > 0) {
t.skip(`Test requires environment variables: ${missing.join(", ")}`);
return true;
}
return false;
}
```
--------------------------------------------------------------------------------
/src/utils/geminiErrors.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Enhanced error types for Gemini API operations
* Provides more structured and specific error handling
*/
import { logger } from "./logger.js";
/**
* Base error class for all Gemini-related errors
*/
export class GeminiApiError extends Error {
constructor(
message: string,
public cause?: unknown
) {
super(message);
this.name = "GeminiApiError";
// Capture stack trace
if (Error.captureStackTrace) {
Error.captureStackTrace(this, this.constructor);
}
// Log the error for monitoring
logger.error(`${this.name}: ${message}`, { cause });
}
}
/**
* Error for authentication and authorization issues
*/
export class GeminiAuthError extends GeminiApiError {
constructor(message: string, cause?: unknown) {
super(message, cause);
this.name = "GeminiAuthError";
}
}
/**
* Error for API rate limiting and quota issues
*/
export class GeminiQuotaError extends GeminiApiError {
constructor(message: string, cause?: unknown) {
super(message, cause);
this.name = "GeminiQuotaError";
}
}
/**
* Error for content safety filtering
*/
export class GeminiContentFilterError extends GeminiApiError {
constructor(
message: string,
public readonly categories?: string[],
cause?: unknown
) {
super(message, cause);
this.name = "GeminiContentFilterError";
}
}
/**
* Error for invalid parameters
*/
export class GeminiValidationError extends GeminiApiError {
constructor(
message: string,
public readonly field?: string,
cause?: unknown
) {
super(message, cause);
this.name = "GeminiValidationError";
}
}
/**
* Error for network issues
*/
export class GeminiNetworkError extends GeminiApiError {
constructor(message: string, cause?: unknown) {
super(message, cause);
this.name = "GeminiNetworkError";
}
}
/**
* Error for model-specific issues
*/
export class GeminiModelError extends GeminiApiError {
constructor(
message: string,
public readonly modelName?: string,
cause?: unknown
) {
super(message, cause);
this.name = "GeminiModelError";
}
}
/**
* Error for URL fetching operations
*/
export class GeminiUrlFetchError extends GeminiApiError {
constructor(
message: string,
public readonly url: string,
public readonly statusCode?: number,
cause?: unknown
) {
super(message, cause);
this.name = "GeminiUrlFetchError";
}
}
/**
* Error for URL validation issues
*/
export class GeminiUrlValidationError extends GeminiApiError {
constructor(
message: string,
public readonly url: string,
public readonly reason:
| "blocked_domain"
| "invalid_format"
| "suspicious_pattern",
cause?: unknown
) {
super(message, cause);
this.name = "GeminiUrlValidationError";
}
}
/**
* Maps an error to the appropriate Gemini error type
* @param error The original error
* @param context Additional context about the operation
* @returns A properly typed Gemini error
*/
export function mapGeminiError(
error: unknown,
context?: string
): GeminiApiError {
// Handle different error types based on the error properties
if (error instanceof GeminiApiError) {
// Already a GeminiApiError, just return it
return error;
}
// Convert to Error type if it's not already
const err = error instanceof Error ? error : new Error(String(error));
// Determine error type based on message and status
const message = err.message.toLowerCase();
// Build context-aware error message
const contextMsg = context ? `[${context}] ` : "";
if (
message.includes("unauthorized") ||
message.includes("permission") ||
message.includes("api key")
) {
return new GeminiAuthError(
`${contextMsg}Authentication failed: ${err.message}`,
err
);
}
if (
message.includes("quota") ||
message.includes("rate limit") ||
message.includes("too many requests")
) {
return new GeminiQuotaError(
`${contextMsg}API quota exceeded: ${err.message}`,
err
);
}
if (
message.includes("safety") ||
message.includes("blocked") ||
message.includes("harmful") ||
message.includes("inappropriate")
) {
return new GeminiContentFilterError(
`${contextMsg}Content filtered: ${err.message}`,
undefined,
err
);
}
if (
message.includes("validation") ||
message.includes("invalid") ||
message.includes("required")
) {
return new GeminiValidationError(
`${contextMsg}Validation error: ${err.message}`,
undefined,
err
);
}
if (
message.includes("network") ||
message.includes("timeout") ||
message.includes("connection")
) {
return new GeminiNetworkError(
`${contextMsg}Network error: ${err.message}`,
err
);
}
if (
message.includes("model") ||
message.includes("not found") ||
message.includes("unsupported")
) {
return new GeminiModelError(
`${contextMsg}Model error: ${err.message}`,
undefined,
err
);
}
// Default case: return a generic GeminiApiError
return new GeminiApiError(`${contextMsg}${err.message}`, err);
}
/**
* Helper to provide common error messages for Gemini operations
*/
export const GeminiErrorMessages = {
// General errors
GENERAL_ERROR: "An error occurred while processing your request",
TIMEOUT_ERROR: "The request timed out. Please try again later",
// Authentication errors
INVALID_API_KEY: "Invalid or missing API key",
API_KEY_EXPIRED: "API key has expired",
// Quota errors
QUOTA_EXCEEDED: "API quota has been exceeded for the current period",
RATE_LIMIT_EXCEEDED: "Too many requests. Please try again later",
// Content filter errors
CONTENT_FILTERED: "Content was filtered due to safety settings",
UNSAFE_PROMPT: "The prompt was flagged as potentially unsafe",
UNSAFE_CONTENT: "Generated content was flagged as potentially unsafe",
// Validation errors
INVALID_PROMPT: "Invalid prompt format or content",
INVALID_PARAMETERS: "One or more parameters are invalid",
// Network errors
NETWORK_ERROR: "Network error. Please check your internet connection",
CONNECTION_FAILED: "Failed to connect to the Gemini API",
// Model errors
MODEL_NOT_FOUND: "The specified model was not found",
UNSUPPORTED_MODEL: "The specified model does not support this operation",
UNSUPPORTED_FORMAT: "The requested format is not supported by this model",
// URL context errors
URL_FETCH_FAILED: "Failed to fetch content from the specified URL",
URL_VALIDATION_FAILED: "URL validation failed due to security restrictions",
URL_ACCESS_DENIED: "Access to the specified URL is denied",
URL_CONTENT_TOO_LARGE: "URL content exceeds the maximum allowed size",
URL_TIMEOUT: "URL fetch operation timed out",
UNSUPPORTED_URL_CONTENT: "The URL content type is not supported",
};
```
--------------------------------------------------------------------------------
/review-prompt.txt:
--------------------------------------------------------------------------------
```
# Code Review Meta Prompt: MCP Gemini Server Upload Feature Removal
## Context
You are acting as both a **Team Lead** and **Senior Staff Engineer** conducting a comprehensive code review of a major refactoring effort. The development team has completed implementing PRD requirements to remove all file upload capabilities from an MCP (Model Context Protocol) Gemini Server while preserving URL-based multimedia analysis functionality.
## Review Scope
The changes span across the entire codebase and involve:
- **Code Removal**: Deletion of upload-related tools, services, and type definitions
- **Service Refactoring**: Modification of core services to remove file handling logic
- **API Consolidation**: Streamlining of tool interfaces and parameter schemas
- **Test Updates**: Comprehensive test suite modifications and cleanup
- **Documentation Overhaul**: Major updates to README and creation of new user guides
## Technical Architecture Context
This is a TypeScript/Node.js MCP server that:
- Wraps Google's `@google/genai` SDK (v0.10.0)
- Provides Gemini AI capabilities as standardized MCP tools
- Supports multiple transport methods (stdio, HTTP, SSE)
- Implements service-based architecture with dependency injection
- Uses Zod for schema validation and strict TypeScript typing
- Maintains comprehensive test coverage with Vitest
## Review Objectives
### 1. **Architecture & Design Review**
Evaluate whether the refactoring:
- Maintains clean separation of concerns
- Preserves the existing service-based architecture
- Introduces any architectural debt or anti-patterns
- Properly handles dependency injection and service boundaries
- Maintains consistent error handling patterns
### 2. **Type Safety & Schema Validation**
Assess:
- TypeScript type precision and safety (no widening to `any`)
- Zod schema consistency and validation completeness
- Interface contracts and backward compatibility
- Generic constraints and type inference preservation
- Removal of unused types without breaking dependent code
### 3. **API Design & Consistency**
Review:
- Tool parameter schema consistency across similar operations
- MCP protocol compliance and standard adherence
- URL-based vs file-based operation distinction clarity
- Error response standardization and user experience
- Tool naming conventions and parameter structures
### 4. **Security Implications**
Examine:
- URL validation and security screening mechanisms
- Removal of file upload attack vectors
- Path traversal prevention in remaining file operations
- Input sanitization for URL-based content processing
- Authentication and authorization model integrity
### 5. **Test Coverage & Quality**
Analyze:
- Test suite completeness after file upload test removal
- URL-based functionality test coverage adequacy
- Integration test scenarios for multimedia analysis
- Mocking strategies for external URL dependencies
- Test maintainability and reliability
### 6. **Documentation & User Experience**
Evaluate:
- Clarity of file upload vs URL-based distinction
- Completeness of migration guidance for existing users
- Example quality and real-world applicability
- Error message helpfulness and actionability
- Developer onboarding experience improvements
## Technical Validation Tasks
### Code Quality Checks
1. **Run and analyze** the project's lint, typecheck, and formatting tools
2. **Verify** that `npm run check-all` passes without errors
3. **Examine** TypeScript compilation with strict mode enabled
4. **Review** test suite execution results and coverage reports
### External Documentation Validation
1. **Cross-reference** Google Gemini API documentation at:
- https://ai.google.dev/gemini-api/docs/image-understanding
- https://ai.google.dev/gemini-api/docs/video-understanding
2. **Validate** claimed capabilities against official API specifications
3. **Verify** supported format lists and limitation accuracy
4. **Check** rate limiting and quota information accuracy
### Dependency Analysis
1. **Review** package.json changes for dependency management
2. **Assess** potential security vulnerabilities in remaining dependencies
3. **Evaluate** bundle size impact of removed functionality
4. **Check** for unused dependencies that can be removed
## Specific Areas of Concern
### Critical Questions to Address:
1. **Completeness**: Are there any remnants of upload functionality that were missed?
2. **Breaking Changes**: What is the impact on existing users and how is it communicated?
3. **Performance**: Does URL-based processing introduce new performance bottlenecks?
4. **Reliability**: How robust is the URL fetching and validation logic?
5. **Scalability**: Can the URL-based approach handle production workloads?
### Code Patterns to Validate:
- Consistent error handling across all URL-based operations
- Proper async/await usage in service methods
- Resource cleanup and memory management
- Retry logic and timeout handling for URL operations
- Caching strategy effectiveness for repeated URL access
## Deliverable Requirements
### Code Review Report Structure:
1. **Executive Summary** (2-3 paragraphs)
- Overall assessment of changes
- Major risks and recommendations
- Go/no-go decision with rationale
2. **Technical Assessment** (detailed analysis)
- Architecture and design review findings
- Security and performance implications
- Code quality and maintainability assessment
- Test coverage and reliability evaluation
3. **Actionable Feedback** (prioritized list)
- Critical issues requiring immediate attention
- Recommended improvements for next iteration
- Future considerations and technical debt items
- Documentation gaps and clarity improvements
4. **Compliance Verification**
- TypeScript strict mode compliance
- MCP protocol standard adherence
- Google Gemini API usage best practices
- Security best practices implementation
### Review Standards:
- **Be specific**: Reference exact file paths, line numbers, and code snippets
- **Be actionable**: Provide concrete suggestions for improvements
- **Be balanced**: Acknowledge good practices alongside areas for improvement
- **Be thorough**: Cover all aspects from architecture to documentation
- **Be pragmatic**: Consider real-world usage scenarios and edge cases
## Background Context for Review
The team has systematically worked through a comprehensive task list covering:
- Tool removal and service refactoring (Tasks 1.0-2.0)
- Type system cleanup and schema updates (Task 3.0)
- Test suite overhaul and validation (Task 4.0)
- Documentation transformation and user guidance (Task 5.0)
The goal was to create a cleaner, more focused server that emphasizes URL-based multimedia analysis while removing the complexity and security concerns of direct file uploads.
Please conduct this review with the rigor expected for a production system that will be used by multiple teams and external developers.
```
--------------------------------------------------------------------------------
/tests/unit/server/transportLogic.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
describe("Transport Logic Tests", () => {
describe("Transport Selection", () => {
const selectTransport = (transportType: string | undefined) => {
const type = transportType || "stdio";
if (type === "sse") {
return {
selected: "streamable",
fallback: false,
message:
"SSE transport - using StreamableHTTPServerTransport via HTTP endpoint",
};
} else if (type === "http" || type === "streamable") {
return {
selected: "streamable",
fallback: false,
message:
"HTTP transport - individual requests will create their own transports",
};
} else if (type === "streaming") {
return {
selected: "stdio",
fallback: true,
reason: "Streaming transport not currently implemented",
};
} else {
return {
selected: "stdio",
fallback: false,
message: "Using stdio transport",
};
}
};
it("should select stdio by default", () => {
const result = selectTransport(undefined);
expect(result.selected).toBe("stdio");
expect(result.fallback).toBe(false);
});
it("should select streamable for http transport", () => {
const result = selectTransport("http");
expect(result.selected).toBe("streamable");
expect(result.fallback).toBe(false);
});
it("should select streamable for streamable transport", () => {
const result = selectTransport("streamable");
expect(result.selected).toBe("streamable");
expect(result.fallback).toBe(false);
});
it("should select streamable for SSE", () => {
const result = selectTransport("sse");
expect(result.selected).toBe("streamable");
expect(result.fallback).toBe(false);
expect(result.message).toContain(
"SSE transport - using StreamableHTTPServerTransport"
);
});
it("should fallback to stdio for streaming", () => {
const result = selectTransport("streaming");
expect(result.selected).toBe("stdio");
expect(result.fallback).toBe(true);
expect(result.reason).toContain(
"Streaming transport not currently implemented"
);
});
});
describe("Session Validation", () => {
const isInitializeRequest = (body: unknown): boolean => {
if (!body || typeof body !== "object") return false;
const jsonRpcBody = body as {
jsonrpc?: string;
method?: string;
id?: string | number;
};
return (
jsonRpcBody.jsonrpc === "2.0" &&
jsonRpcBody.method === "initialize" &&
(typeof jsonRpcBody.id === "string" ||
typeof jsonRpcBody.id === "number")
);
};
const shouldAllowRequest = (
sessionId: string | undefined,
body: unknown,
sessions: Set<string>
): boolean => {
// Allow initialize requests without session
if (!sessionId && isInitializeRequest(body)) {
return true;
}
// Allow requests with valid session
if (sessionId && sessions.has(sessionId)) {
return true;
}
// Reject everything else
return false;
};
it("should identify valid initialize requests", () => {
expect(
isInitializeRequest({
jsonrpc: "2.0",
id: 1,
method: "initialize",
params: {},
})
).toBe(true);
expect(
isInitializeRequest({
jsonrpc: "2.0",
id: "init-1",
method: "initialize",
params: {},
})
).toBe(true);
});
it("should reject invalid initialize requests", () => {
expect(isInitializeRequest(null)).toBe(false);
expect(isInitializeRequest({})).toBe(false);
expect(isInitializeRequest({ method: "initialize" })).toBe(false);
expect(
isInitializeRequest({ jsonrpc: "2.0", method: "tools/call" })
).toBe(false);
});
it("should allow initialize without session", () => {
const sessions = new Set<string>();
const body = { jsonrpc: "2.0", id: 1, method: "initialize" };
expect(shouldAllowRequest(undefined, body, sessions)).toBe(true);
});
it("should reject non-initialize without session", () => {
const sessions = new Set<string>();
const body = { jsonrpc: "2.0", id: 1, method: "tools/call" };
expect(shouldAllowRequest(undefined, body, sessions)).toBe(false);
});
it("should allow requests with valid session", () => {
const sessions = new Set(["session-123"]);
const body = { jsonrpc: "2.0", id: 1, method: "tools/call" };
expect(shouldAllowRequest("session-123", body, sessions)).toBe(true);
});
it("should reject requests with invalid session", () => {
const sessions = new Set(["session-123"]);
const body = { jsonrpc: "2.0", id: 1, method: "tools/call" };
expect(shouldAllowRequest("wrong-session", body, sessions)).toBe(false);
});
});
describe("Accept Header Validation", () => {
const validateAcceptHeader = (headers: Record<string, string>): boolean => {
const accept = headers["accept"] || headers["Accept"] || "";
return (
accept.includes("application/json") &&
accept.includes("text/event-stream")
);
};
it("should accept valid headers", () => {
expect(
validateAcceptHeader({
Accept: "application/json, text/event-stream",
})
).toBe(true);
expect(
validateAcceptHeader({
accept: "application/json, text/event-stream",
})
).toBe(true);
});
it("should reject missing event-stream", () => {
expect(
validateAcceptHeader({
Accept: "application/json",
})
).toBe(false);
});
it("should reject missing json", () => {
expect(
validateAcceptHeader({
Accept: "text/event-stream",
})
).toBe(false);
});
it("should reject empty headers", () => {
expect(validateAcceptHeader({})).toBe(false);
});
});
describe("Environment Validation", () => {
const validateRequiredEnvVars = (
env: Record<string, string | undefined>
): string[] => {
const required = [
"GOOGLE_GEMINI_API_KEY",
"MCP_SERVER_HOST",
"MCP_SERVER_PORT",
"MCP_CONNECTION_TOKEN",
];
return required.filter((key) => !env[key]);
};
it("should pass with all required vars", () => {
const env = {
GOOGLE_GEMINI_API_KEY: "key",
MCP_SERVER_HOST: "localhost",
MCP_SERVER_PORT: "8080",
MCP_CONNECTION_TOKEN: "token",
};
expect(validateRequiredEnvVars(env)).toEqual([]);
});
it("should identify missing vars", () => {
const env = {
GOOGLE_GEMINI_API_KEY: "key",
MCP_SERVER_HOST: "localhost",
};
const missing = validateRequiredEnvVars(env);
expect(missing).toContain("MCP_SERVER_PORT");
expect(missing).toContain("MCP_CONNECTION_TOKEN");
});
});
});
```
--------------------------------------------------------------------------------
/tests/utils/assertions.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Custom assertion helpers for testing the MCP Gemini Server
*
* This module provides specialized assertion functions to make tests more
* readable and to provide better error messages for common test scenarios.
*/
import assert from "node:assert/strict";
import { isMcpError } from "./error-helpers.js";
/**
* Assert that a response matches the expected structure for content generation
*
* @param response - The response object to check
*/
export function assertValidContentResponse(response: any): void {
assert.ok(response, "Response should not be null or undefined");
assert.ok(response.candidates, "Response should have candidates array");
assert.ok(
Array.isArray(response.candidates),
"Candidates should be an array"
);
assert.ok(
response.candidates.length > 0,
"Candidates array should not be empty"
);
const candidate = response.candidates[0];
assert.ok(candidate.content, "Candidate should have content");
assert.ok(candidate.content.parts, "Content should have parts array");
assert.ok(Array.isArray(candidate.content.parts), "Parts should be an array");
// Check if there's at least one part with text
const hasSomeText = candidate.content.parts.some(
(part: any) => typeof part.text === "string" && part.text.length > 0
);
assert.ok(hasSomeText, "At least one part should have non-empty text");
}
/**
* Assert that a response matches the expected structure for image generation
*
* @param response - The response object to check
* @param expectedCount - Expected number of images (default: 1)
*/
export function assertValidImageResponse(
response: any,
expectedCount: number = 1
): void {
assert.ok(response, "Response should not be null or undefined");
assert.ok(response.images, "Response should have images array");
assert.ok(Array.isArray(response.images), "Images should be an array");
assert.strictEqual(
response.images.length,
expectedCount,
`Images array should have ${expectedCount} element(s)`
);
for (let i = 0; i < response.images.length; i++) {
const image = response.images[i];
assert.ok(image.base64Data, `Image ${i} should have base64Data`);
assert.ok(
typeof image.base64Data === "string",
`Image ${i} base64Data should be a string`
);
assert.ok(
image.base64Data.length > 0,
`Image ${i} base64Data should not be empty`
);
assert.ok(image.mimeType, `Image ${i} should have mimeType`);
assert.ok(
typeof image.mimeType === "string",
`Image ${i} mimeType should be a string`
);
assert.ok(
["image/jpeg", "image/png", "image/webp"].includes(image.mimeType),
`Image ${i} should have a valid mimeType`
);
assert.ok(image.width, `Image ${i} should have width`);
assert.ok(
typeof image.width === "number",
`Image ${i} width should be a number`
);
assert.ok(image.width > 0, `Image ${i} width should be positive`);
assert.ok(image.height, `Image ${i} should have height`);
assert.ok(
typeof image.height === "number",
`Image ${i} height should be a number`
);
assert.ok(image.height > 0, `Image ${i} height should be positive`);
}
}
/**
* Assert that an error is an McpError with the expected code
*
* @param error - The error to check
* @param expectedCode - The expected error code
* @param messageIncludes - Optional substring to check for in the error message
*/
export function assertMcpError(
error: any,
expectedCode: string,
messageIncludes?: string
): void {
// Use our reliable helper to check if it's an McpError
assert.ok(isMcpError(error), "Error should be an instance of McpError");
// Now check the specific properties
assert.strictEqual(
error.code,
expectedCode,
`Error code should be ${expectedCode}`
);
if (messageIncludes) {
assert.ok(
error.message.includes(messageIncludes),
`Error message should include "${messageIncludes}"`
);
}
}
/**
* Assert that a response object has the correct bounding box structure
*
* @param objects - The objects array from detection response
*/
export function assertValidBoundingBoxes(objects: any[]): void {
assert.ok(Array.isArray(objects), "Objects should be an array");
assert.ok(objects.length > 0, "Objects array should not be empty");
for (let i = 0; i < objects.length; i++) {
const obj = objects[i];
assert.ok(obj.label, `Object ${i} should have a label`);
assert.ok(
typeof obj.label === "string",
`Object ${i} label should be a string`
);
assert.ok(obj.boundingBox, `Object ${i} should have a boundingBox`);
const box = obj.boundingBox;
// Check that box coordinates are within normalized range (0-1000)
assert.ok(typeof box.xMin === "number", `Box ${i} xMin should be a number`);
assert.ok(
box.xMin >= 0 && box.xMin <= 1000,
`Box ${i} xMin should be between 0 and 1000`
);
assert.ok(typeof box.yMin === "number", `Box ${i} yMin should be a number`);
assert.ok(
box.yMin >= 0 && box.yMin <= 1000,
`Box ${i} yMin should be between 0 and 1000`
);
assert.ok(typeof box.xMax === "number", `Box ${i} xMax should be a number`);
assert.ok(
box.xMax >= 0 && box.xMax <= 1000,
`Box ${i} xMax should be between 0 and 1000`
);
assert.ok(typeof box.yMax === "number", `Box ${i} yMax should be a number`);
assert.ok(
box.yMax >= 0 && box.yMax <= 1000,
`Box ${i} yMax should be between 0 and 1000`
);
// Check that max coordinates are greater than min coordinates
assert.ok(box.xMax > box.xMin, `Box ${i} xMax should be greater than xMin`);
assert.ok(box.yMax > box.yMin, `Box ${i} yMax should be greater than yMin`);
}
}
/**
* Assert that a session ID is valid
*
* @param sessionId - The session ID to check
*/
export function assertValidSessionId(sessionId: string): void {
assert.ok(sessionId, "Session ID should not be null or undefined");
assert.ok(typeof sessionId === "string", "Session ID should be a string");
assert.ok(sessionId.length > 0, "Session ID should not be empty");
// Session IDs are typically UUIDs or similar format
const validIdPattern = /^[a-zA-Z0-9_-]+$/;
assert.ok(
validIdPattern.test(sessionId),
"Session ID should have a valid format"
);
}
/**
* Assert that a file ID is valid
*
* @param fileId - The file ID to check
*/
export function assertValidFileId(fileId: string): void {
assert.ok(fileId, "File ID should not be null or undefined");
assert.ok(typeof fileId === "string", "File ID should be a string");
assert.ok(fileId.length > 0, "File ID should not be empty");
assert.ok(fileId.startsWith("files/"), 'File ID should start with "files/"');
}
/**
* Assert that a cache ID is valid
*
* @param cacheId - The cache ID to check
*/
export function assertValidCacheId(cacheId: string): void {
assert.ok(cacheId, "Cache ID should not be null or undefined");
assert.ok(typeof cacheId === "string", "Cache ID should be a string");
assert.ok(cacheId.length > 0, "Cache ID should not be empty");
assert.ok(
cacheId.startsWith("cachedContents/"),
'Cache ID should start with "cachedContents/"'
);
}
```
--------------------------------------------------------------------------------
/src/tools/geminiCacheTool.ts:
--------------------------------------------------------------------------------
```typescript
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import {
GEMINI_CACHE_TOOL_NAME,
GEMINI_CACHE_TOOL_DESCRIPTION,
GEMINI_CACHE_PARAMS,
} from "./geminiCacheParams.js";
import { GeminiService } from "../services/index.js";
import { logger } from "../utils/index.js";
import { mapAnyErrorToMcpError } from "../utils/errors.js";
import { CachedContentMetadata } from "../types/index.js";
import { Content, Tool, ToolConfig } from "../services/gemini/GeminiTypes.js";
// Define the type for the arguments object based on the Zod schema
type GeminiCacheArgs = z.infer<z.ZodObject<typeof GEMINI_CACHE_PARAMS>>;
/**
* Registers the gemini_cache tool with the MCP server.
* This consolidated tool handles cache create, list, get, update, and delete operations.
*
* @param server - The McpServer instance.
* @param serviceInstance - An instance of the GeminiService.
*/
export const geminiCacheTool = (
server: McpServer,
serviceInstance: GeminiService
): void => {
/**
* Processes the request for the gemini_cache tool.
* @param args - The arguments object matching GEMINI_CACHE_PARAMS.
* @returns The result content for MCP.
*/
const processRequest = async (args: unknown): Promise<CallToolResult> => {
// Type cast the args to our expected type
const typedArgs = args as GeminiCacheArgs;
logger.debug(`Received ${GEMINI_CACHE_TOOL_NAME} request:`, {
operation: typedArgs.operation,
cacheName: typedArgs.cacheName,
model: typedArgs.model,
});
try {
// Validate required fields based on operation
if (typedArgs.operation === "create" && !typedArgs.contents) {
throw new Error("contents is required for operation 'create'");
}
if (
(typedArgs.operation === "get" ||
typedArgs.operation === "update" ||
typedArgs.operation === "delete") &&
!typedArgs.cacheName
) {
throw new Error(
`cacheName is required for operation '${typedArgs.operation}'`
);
}
// Validate cacheName format for get/update/delete operations
if (
typedArgs.cacheName &&
!typedArgs.cacheName.match(/^cachedContents\/.+$/)
) {
throw new Error("cacheName must start with 'cachedContents/'");
}
// For update operation, ensure at least one field is being updated
if (
typedArgs.operation === "update" &&
!typedArgs.ttl &&
!typedArgs.displayName
) {
throw new Error(
"At least one of 'ttl' or 'displayName' must be provided for update operation"
);
}
// Handle different operations
switch (typedArgs.operation) {
case "create": {
// Construct options object for the service call
const cacheOptions: {
displayName?: string;
systemInstruction?: Content;
ttl?: string;
tools?: Tool[];
toolConfig?: ToolConfig;
} = {};
if (typedArgs.displayName)
cacheOptions.displayName = typedArgs.displayName;
if (typedArgs.ttl) cacheOptions.ttl = typedArgs.ttl;
if (typedArgs.systemInstruction) {
cacheOptions.systemInstruction =
typedArgs.systemInstruction as Content;
}
if (typedArgs.tools) cacheOptions.tools = typedArgs.tools as Tool[];
if (typedArgs.toolConfig)
cacheOptions.toolConfig = typedArgs.toolConfig as ToolConfig;
// Call the GeminiService method
const cacheMetadata: CachedContentMetadata =
await serviceInstance.createCache(
typedArgs.model ?? "", // model first, provide empty string as fallback
typedArgs.contents as Content[], // contents second
Object.keys(cacheOptions).length > 0 ? cacheOptions : undefined // options third
);
logger.info(
`Cache created successfully. Name: ${cacheMetadata.name}`
);
return {
content: [
{
type: "text" as const,
text: JSON.stringify(cacheMetadata, null, 2),
},
],
};
}
case "list": {
// Call the GeminiService method to list caches
const listResult = await serviceInstance.listCaches(
typedArgs.pageSize,
typedArgs.pageToken
);
logger.info(`Listed ${listResult.caches.length} caches`);
return {
content: [
{
type: "text" as const,
text: JSON.stringify(listResult, null, 2),
},
],
};
}
case "get": {
// Call the GeminiService method to get cache metadata
const cacheMetadata = await serviceInstance.getCache(
typedArgs.cacheName! as `cachedContents/${string}`
);
logger.info(`Retrieved metadata for cache: ${typedArgs.cacheName}`);
return {
content: [
{
type: "text" as const,
text: JSON.stringify(cacheMetadata, null, 2),
},
],
};
}
case "update": {
// Construct update data object
const updateData: { ttl?: string; displayName?: string } = {};
if (typedArgs.ttl) updateData.ttl = typedArgs.ttl;
if (typedArgs.displayName)
updateData.displayName = typedArgs.displayName;
// Call the GeminiService method to update the cache
const updatedMetadata = await serviceInstance.updateCache(
typedArgs.cacheName! as `cachedContents/${string}`,
updateData
);
logger.info(`Cache updated successfully: ${typedArgs.cacheName}`);
return {
content: [
{
type: "text" as const,
text: JSON.stringify(updatedMetadata, null, 2),
},
],
};
}
case "delete": {
// Call the GeminiService method to delete the cache
await serviceInstance.deleteCache(
typedArgs.cacheName! as `cachedContents/${string}`
);
logger.info(`Cache deleted successfully: ${typedArgs.cacheName}`);
return {
content: [
{
type: "text" as const,
text: JSON.stringify({
success: true,
message: `Cache ${typedArgs.cacheName} deleted successfully`,
}),
},
],
};
}
default:
throw new Error(`Invalid operation: ${typedArgs.operation}`);
}
} catch (error: unknown) {
logger.error(`Error processing ${GEMINI_CACHE_TOOL_NAME}:`, error);
throw mapAnyErrorToMcpError(error, GEMINI_CACHE_TOOL_NAME);
}
};
// Register the tool with the server
server.tool(
GEMINI_CACHE_TOOL_NAME,
GEMINI_CACHE_TOOL_DESCRIPTION,
GEMINI_CACHE_PARAMS,
processRequest
);
logger.info(`Tool registered: ${GEMINI_CACHE_TOOL_NAME}`);
};
```
--------------------------------------------------------------------------------
/tests/unit/services/gemini/GeminiPromptTemplates.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import {
processTemplate,
getReviewTemplate,
getFocusInstructions,
} from "../../../../src/services/gemini/GeminiPromptTemplates.js";
describe("GeminiPromptTemplates", () => {
describe("processTemplate()", () => {
it("should replace placeholders with values", () => {
const template = "Hello {{name}}, welcome to {{place}}!";
const context = {
name: "John",
place: "Paris",
diffContent: "sample diff content", // Required by function signature
};
const result = processTemplate(template, context);
expect(result).toBe("Hello John, welcome to Paris!");
});
it("should handle missing placeholders", () => {
const template = "Hello {{name}}, welcome to {{place}}!";
const context = {
name: "John",
diffContent: "sample diff content", // Required by function signature
};
const result = processTemplate(template, context);
expect(result).toBe("Hello John, welcome to !");
});
it("should handle undefined values", () => {
const template = "Hello {{name}}, welcome to {{place}}!";
const context = {
name: "John",
place: undefined,
diffContent: "sample diff content", // Required by function signature
};
const result = processTemplate(template, context);
expect(result).toBe("Hello John, welcome to !");
});
it("should handle non-string values", () => {
const template = "The answer is {{answer}}.";
const context = {
answer: "42", // Convert number to string to match function signature
diffContent: "sample diff content", // Required by function signature
};
const result = processTemplate(template, context);
expect(result).toBe("The answer is 42.");
});
});
describe("getReviewTemplate()", () => {
it("should return different templates for different review focuses", () => {
const securityTemplate = getReviewTemplate("security");
const performanceTemplate = getReviewTemplate("performance");
const architectureTemplate = getReviewTemplate("architecture");
const bugsTemplate = getReviewTemplate("bugs");
const generalTemplate = getReviewTemplate("general");
// Verify all templates are strings and different from each other
expect(typeof securityTemplate).toBe("string");
expect(typeof performanceTemplate).toBe("string");
expect(typeof architectureTemplate).toBe("string");
expect(typeof bugsTemplate).toBe("string");
expect(typeof generalTemplate).toBe("string");
expect(securityTemplate).not.toBe(performanceTemplate);
expect(securityTemplate).not.toBe(architectureTemplate);
expect(securityTemplate).not.toBe(bugsTemplate);
expect(securityTemplate).not.toBe(generalTemplate);
expect(performanceTemplate).not.toBe(architectureTemplate);
expect(performanceTemplate).not.toBe(bugsTemplate);
expect(performanceTemplate).not.toBe(generalTemplate);
expect(architectureTemplate).not.toBe(bugsTemplate);
expect(architectureTemplate).not.toBe(generalTemplate);
expect(bugsTemplate).not.toBe(generalTemplate);
});
it("should return a template containing expected keywords for each focus", () => {
// Security template should mention security concepts
const securityTemplate = getReviewTemplate("security");
expect(securityTemplate).toContain("security");
expect(securityTemplate).toContain("vulnerabilit");
// Performance template should mention performance concepts
const performanceTemplate = getReviewTemplate("performance");
expect(performanceTemplate).toContain("performance");
expect(performanceTemplate).toContain("optimiz");
// Architecture template should mention architecture concepts
const architectureTemplate = getReviewTemplate("architecture");
expect(architectureTemplate).toContain("architect");
expect(architectureTemplate).toContain("design");
// Bugs template should mention bug-related concepts
const bugsTemplate = getReviewTemplate("bugs");
expect(bugsTemplate).toContain("bug");
expect(bugsTemplate).toContain("error");
// General template should be comprehensive
const generalTemplate = getReviewTemplate("general");
expect(generalTemplate).toContain("comprehensive");
});
});
describe("getFocusInstructions()", () => {
it("should return different instructions for different focuses", () => {
const securityInstructions = getFocusInstructions("security");
const performanceInstructions = getFocusInstructions("performance");
const architectureInstructions = getFocusInstructions("architecture");
const bugsInstructions = getFocusInstructions("bugs");
const generalInstructions = getFocusInstructions("general");
// Verify all instructions are strings and different from each other
expect(typeof securityInstructions).toBe("string");
expect(typeof performanceInstructions).toBe("string");
expect(typeof architectureInstructions).toBe("string");
expect(typeof bugsInstructions).toBe("string");
expect(typeof generalInstructions).toBe("string");
expect(securityInstructions).not.toBe(performanceInstructions);
expect(securityInstructions).not.toBe(architectureInstructions);
expect(securityInstructions).not.toBe(bugsInstructions);
expect(securityInstructions).not.toBe(generalInstructions);
expect(performanceInstructions).not.toBe(architectureInstructions);
expect(performanceInstructions).not.toBe(bugsInstructions);
expect(performanceInstructions).not.toBe(generalInstructions);
expect(architectureInstructions).not.toBe(bugsInstructions);
expect(architectureInstructions).not.toBe(generalInstructions);
expect(bugsInstructions).not.toBe(generalInstructions);
});
it("should include focus-specific keywords in each instruction", () => {
// Security instructions should mention security concepts
const securityInstructions = getFocusInstructions("security");
expect(securityInstructions).toContain("security");
expect(securityInstructions).toContain("vulnerabilities");
// Performance instructions should mention performance concepts
const performanceInstructions = getFocusInstructions("performance");
expect(performanceInstructions).toContain("performance");
expect(performanceInstructions).toContain("Algorithm");
// Architecture instructions should mention architecture concepts
const architectureInstructions = getFocusInstructions("architecture");
expect(architectureInstructions).toContain("architectural");
expect(architectureInstructions).toContain("Design pattern");
// Bugs instructions should mention bug-related concepts
const bugsInstructions = getFocusInstructions("bugs");
expect(bugsInstructions).toContain("bugs");
expect(bugsInstructions).toContain("errors");
// General instructions should be comprehensive
const generalInstructions = getFocusInstructions("general");
expect(generalInstructions).toContain("comprehensive");
});
});
});
```
--------------------------------------------------------------------------------
/tests/unit/services/session/SQLiteSessionStore.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { SQLiteSessionStore } from "../../../../src/services/session/SQLiteSessionStore.js";
import { SessionState } from "../../../../src/services/SessionService.js";
import { promises as fs } from "fs";
import path from "path";
import os from "os";
describe("SQLiteSessionStore", () => {
let store: SQLiteSessionStore;
let testDbPath: string;
let testDir: string;
beforeEach(async () => {
// Create a temporary directory for test database
testDir = await fs.mkdtemp(path.join(os.tmpdir(), "sqlite-session-test-"));
testDbPath = path.join(testDir, "test-sessions.db");
store = new SQLiteSessionStore(testDbPath);
await store.initialize();
});
afterEach(async () => {
// Clean up
await store.close();
try {
await fs.rm(testDir, { recursive: true, force: true });
} catch (error) {
// Ignore cleanup errors
}
});
describe("initialize", () => {
it("should create database file and tables", async () => {
// Check that database file exists
const stats = await fs.stat(testDbPath);
expect(stats.isFile()).toBe(true);
});
it("should clean up expired sessions on startup", async () => {
// Create a new store instance to test initialization cleanup
const store2 = new SQLiteSessionStore(testDbPath);
// Add an expired session directly to the database before initialization
const expiredSession: SessionState = {
id: "expired-session",
createdAt: Date.now() - 7200000, // 2 hours ago
lastActivity: Date.now() - 3600000, // 1 hour ago
expiresAt: Date.now() - 1000, // Expired 1 second ago
data: { test: "data" },
};
await store.set("expired-session", expiredSession);
await store.close();
// Initialize new store - should clean up expired session
await store2.initialize();
const retrieved = await store2.get("expired-session");
expect(retrieved).toBeNull();
await store2.close();
});
});
describe("set and get", () => {
it("should store and retrieve a session", async () => {
const session: SessionState = {
id: "test-session-1",
createdAt: Date.now(),
lastActivity: Date.now(),
expiresAt: Date.now() + 3600000, // 1 hour from now
data: { userId: "user123", preferences: { theme: "dark" } },
};
await store.set(session.id, session);
const retrieved = await store.get(session.id);
expect(retrieved).not.toBeNull();
expect(retrieved?.id).toBe(session.id);
expect(retrieved?.createdAt).toBe(session.createdAt);
expect(retrieved?.data).toEqual(session.data);
});
it("should return null for non-existent session", async () => {
const retrieved = await store.get("non-existent");
expect(retrieved).toBeNull();
});
it("should overwrite existing session", async () => {
const session1: SessionState = {
id: "test-session",
createdAt: Date.now(),
lastActivity: Date.now(),
expiresAt: Date.now() + 3600000,
data: { version: 1 },
};
const session2: SessionState = {
...session1,
data: { version: 2 },
};
await store.set(session1.id, session1);
await store.set(session2.id, session2);
const retrieved = await store.get(session1.id);
expect(retrieved?.data).toEqual({ version: 2 });
});
});
describe("delete", () => {
it("should delete an existing session", async () => {
const session: SessionState = {
id: "test-session",
createdAt: Date.now(),
lastActivity: Date.now(),
expiresAt: Date.now() + 3600000,
data: {},
};
await store.set(session.id, session);
const deleted = await store.delete(session.id);
expect(deleted).toBe(true);
const retrieved = await store.get(session.id);
expect(retrieved).toBeNull();
});
it("should return false when deleting non-existent session", async () => {
const deleted = await store.delete("non-existent");
expect(deleted).toBe(false);
});
});
describe("deleteExpired", () => {
it("should delete only expired sessions", async () => {
const now = Date.now();
const activeSession: SessionState = {
id: "active",
createdAt: now,
lastActivity: now,
expiresAt: now + 3600000, // 1 hour from now
data: {},
};
const expiredSession1: SessionState = {
id: "expired1",
createdAt: now - 7200000,
lastActivity: now - 3600000,
expiresAt: now - 1000, // Expired
data: {},
};
const expiredSession2: SessionState = {
id: "expired2",
createdAt: now - 7200000,
lastActivity: now - 3600000,
expiresAt: now - 2000, // Expired
data: {},
};
await store.set(activeSession.id, activeSession);
await store.set(expiredSession1.id, expiredSession1);
await store.set(expiredSession2.id, expiredSession2);
const deletedCount = await store.deleteExpired(now);
expect(deletedCount).toBe(2);
// Active session should still exist
expect(await store.get(activeSession.id)).not.toBeNull();
// Expired sessions should be gone
expect(await store.get(expiredSession1.id)).toBeNull();
expect(await store.get(expiredSession2.id)).toBeNull();
});
});
describe("count", () => {
it("should return correct session count", async () => {
expect(await store.count()).toBe(0);
const session1: SessionState = {
id: "session1",
createdAt: Date.now(),
lastActivity: Date.now(),
expiresAt: Date.now() + 3600000,
data: {},
};
const session2: SessionState = {
id: "session2",
createdAt: Date.now(),
lastActivity: Date.now(),
expiresAt: Date.now() + 3600000,
data: {},
};
await store.set(session1.id, session1);
expect(await store.count()).toBe(1);
await store.set(session2.id, session2);
expect(await store.count()).toBe(2);
await store.delete(session1.id);
expect(await store.count()).toBe(1);
});
});
describe("error handling", () => {
it("should throw error when store not initialized", async () => {
const uninitializedStore = new SQLiteSessionStore(
path.join(testDir, "uninitialized.db")
);
await expect(uninitializedStore.get("test")).rejects.toThrow(
"SQLite session store not initialized"
);
});
it("should handle JSON parsing errors gracefully", async () => {
const session: SessionState = {
id: "test-session",
createdAt: Date.now(),
lastActivity: Date.now(),
expiresAt: Date.now() + 3600000,
data: { test: "data" },
};
await store.set(session.id, session);
// Manually corrupt the data in the database
// This is a bit hacky but tests error handling
const db = (
store as unknown as {
db: {
prepare: (sql: string) => {
run: (param1: string, param2: string) => void;
};
};
}
).db;
db.prepare("UPDATE sessions SET data = ? WHERE id = ?").run(
"invalid json",
session.id
);
await expect(store.get(session.id)).rejects.toThrow();
});
});
});
```
--------------------------------------------------------------------------------
/src/tools/registration/ToolAdapter.ts:
--------------------------------------------------------------------------------
```typescript
/**
* Tool Adapter
*
* Provides adapter functions to convert existing tool implementations
* to work with the new standardized registry system.
*/
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { ServiceContainer, ToolRegistrationFn } from "./ToolRegistry.js";
import { GeminiService } from "../../services/GeminiService.js";
import { McpClientService } from "../../services/mcp/McpClientService.js";
import { logger } from "../../utils/logger.js";
/**
* Legacy tool function that only accepts server parameter
*/
export type LegacyServerOnlyTool = (server: McpServer) => void;
/**
* Legacy tool function that accepts server and GeminiService
*/
export type LegacyGeminiServiceTool = (
server: McpServer,
service: GeminiService
) => void;
/**
* Legacy tool function that accepts server and McpClientService
*/
export type LegacyMcpClientServiceTool = (
server: McpServer,
service: McpClientService
) => void;
/**
* New tool object format with execute function
*/
export interface NewToolObject<TArgs = unknown, TResult = unknown> {
name: string;
description: string;
inputSchema: unknown;
execute: (args: TArgs) => Promise<TResult>;
}
/**
* New tool object format that needs GeminiService
*/
export interface NewGeminiServiceToolObject<
TArgs = unknown,
TResult = unknown,
> {
name: string;
description: string;
inputSchema: unknown;
execute: (args: TArgs, service: GeminiService) => Promise<TResult>;
}
/**
* New tool object format that needs McpClientService
*/
export interface NewMcpClientServiceToolObject<
TArgs = unknown,
TResult = unknown,
> {
name: string;
description: string;
inputSchema: unknown;
execute: (args: TArgs, service: McpClientService) => Promise<TResult>;
}
/**
* Adapts a legacy tool that only uses server to the new registration system
* @param tool Legacy tool function
* @param name Optional name for logging
*/
export function adaptServerOnlyTool(
tool: LegacyServerOnlyTool,
name?: string
): ToolRegistrationFn {
return (server: McpServer, _services: ServiceContainer) => {
try {
tool(server);
if (name) {
logger.debug(`Registered server-only tool: ${name}`);
}
} catch (error) {
logger.error(
`Failed to register server-only tool${name ? ` ${name}` : ""}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
/**
* Adapts a legacy tool that uses GeminiService to the new registration system
* @param tool Legacy tool function
* @param name Optional name for logging
*/
export function adaptGeminiServiceTool(
tool: LegacyGeminiServiceTool,
name?: string
): ToolRegistrationFn {
return (server: McpServer, services: ServiceContainer) => {
try {
tool(server, services.geminiService);
if (name) {
logger.debug(`Registered GeminiService tool: ${name}`);
}
} catch (error) {
logger.error(
`Failed to register GeminiService tool${name ? ` ${name}` : ""}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
/**
* Adapts a legacy tool that uses McpClientService to the new registration system
* @param tool Legacy tool function
* @param name Optional name for logging
*/
export function adaptMcpClientServiceTool(
tool: LegacyMcpClientServiceTool,
name?: string
): ToolRegistrationFn {
return (server: McpServer, services: ServiceContainer) => {
try {
tool(server, services.mcpClientService);
if (name) {
logger.debug(`Registered McpClientService tool: ${name}`);
}
} catch (error) {
logger.error(
`Failed to register McpClientService tool${name ? ` ${name}` : ""}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
/**
* Adapts a new tool object format to the registration system
* @param tool New tool object with execute method
*/
export function adaptNewToolObject<TArgs, TResult>(
tool: NewToolObject<TArgs, TResult>
): ToolRegistrationFn {
return (server: McpServer, _services: ServiceContainer) => {
try {
// Wrap the execute function with proper type inference
const wrappedExecute = async (args: TArgs): Promise<TResult> => {
return tool.execute(args);
};
server.tool(
tool.name,
tool.description,
tool.inputSchema,
wrappedExecute as (args: unknown) => Promise<unknown>
);
logger.debug(`Registered new tool object: ${tool.name}`);
} catch (error) {
logger.error(
`Failed to register new tool object ${tool.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
/**
* Adapts a new tool object that needs GeminiService to the registration system
* @param tool New tool object with execute method that needs GeminiService
*/
export function adaptNewGeminiServiceToolObject<TArgs, TResult>(
tool: NewGeminiServiceToolObject<TArgs, TResult>
): ToolRegistrationFn {
return (server: McpServer, services: ServiceContainer) => {
try {
// Wrap the execute function with proper type inference
const wrappedExecute = async (args: TArgs): Promise<TResult> => {
return tool.execute(args, services.geminiService);
};
server.tool(
tool.name,
tool.description,
tool.inputSchema,
wrappedExecute as (args: unknown) => Promise<unknown>
);
logger.debug(`Registered new Gemini service tool object: ${tool.name}`);
} catch (error) {
logger.error(
`Failed to register new Gemini service tool object ${tool.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
/**
* Adapts a new tool object that needs McpClientService to the registration system
* @param tool New tool object with execute method that needs McpClientService
*/
export function adaptNewMcpClientServiceToolObject<TArgs, TResult>(
tool: NewMcpClientServiceToolObject<TArgs, TResult>
): ToolRegistrationFn {
return (server: McpServer, services: ServiceContainer) => {
try {
// Wrap the execute function with proper type inference
const wrappedExecute = async (args: TArgs): Promise<TResult> => {
return tool.execute(args, services.mcpClientService);
};
server.tool(
tool.name,
tool.description,
tool.inputSchema,
wrappedExecute as (args: unknown) => Promise<unknown>
);
logger.debug(
`Registered new MCP client service tool object: ${tool.name}`
);
} catch (error) {
logger.error(
`Failed to register new MCP client service tool object ${tool.name}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
/**
* Adapts a direct tool implementation that bypasses the normal registration
* @param name Tool name
* @param description Tool description
* @param handler The handler function
*/
export function adaptDirectTool(
name: string,
description: string,
handler: (args: unknown) => Promise<unknown>
): ToolRegistrationFn {
return (server: McpServer, _services: ServiceContainer) => {
try {
server.tool(name, description, {}, handler);
logger.debug(`Registered direct tool: ${name}`);
} catch (error) {
logger.error(
`Failed to register direct tool ${name}: ${
error instanceof Error ? error.message : String(error)
}`
);
}
};
}
```
--------------------------------------------------------------------------------
/src/services/gemini/ModelMigrationService.ts:
--------------------------------------------------------------------------------
```typescript
import { logger } from "../../utils/logger.js";
export class ModelMigrationService {
private static instance: ModelMigrationService | null = null;
static getInstance(): ModelMigrationService {
if (!ModelMigrationService.instance) {
ModelMigrationService.instance = new ModelMigrationService();
}
return ModelMigrationService.instance;
}
migrateEnvironmentVariables(): void {
this.migrateSingleModelToArray();
this.provideImageModelDefaults();
this.migrateDeprecatedModelNames();
this.logMigrationWarnings();
}
private migrateSingleModelToArray(): void {
if (process.env.GOOGLE_GEMINI_MODEL && !process.env.GOOGLE_GEMINI_MODELS) {
const singleModel = process.env.GOOGLE_GEMINI_MODEL;
process.env.GOOGLE_GEMINI_MODELS = JSON.stringify([singleModel]);
logger.info(
"[ModelMigrationService] Migrated GOOGLE_GEMINI_MODEL to GOOGLE_GEMINI_MODELS array format",
{
originalModel: singleModel,
}
);
}
}
private provideImageModelDefaults(): void {
if (!process.env.GOOGLE_GEMINI_IMAGE_MODELS) {
const defaultImageModels = [
"imagen-3.0-generate-002",
"gemini-2.0-flash-preview-image-generation",
];
process.env.GOOGLE_GEMINI_IMAGE_MODELS =
JSON.stringify(defaultImageModels);
logger.info(
"[ModelMigrationService] Set default image generation models",
{
models: defaultImageModels,
}
);
}
}
private migrateDeprecatedModelNames(): void {
const deprecatedMappings = {
"gemini-1.5-pro-latest": "gemini-1.5-pro",
"gemini-1.5-flash-latest": "gemini-1.5-flash",
"gemini-flash-2.0": "gemini-2.0-flash",
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
"gemini-2.5-flash": "gemini-2.5-flash-preview-05-20",
"gemini-2.5-pro-exp-03-25": "gemini-2.5-pro-preview-05-06",
"gemini-2.5-flash-exp-latest": "gemini-2.5-flash-preview-05-20",
"imagen-3.1-generate-003": "imagen-3.0-generate-002",
};
this.migrateModelsInEnvVar("GOOGLE_GEMINI_MODELS", deprecatedMappings);
this.migrateModelsInEnvVar("GOOGLE_GEMINI_TEXT_MODELS", deprecatedMappings);
this.migrateModelsInEnvVar(
"GOOGLE_GEMINI_IMAGE_MODELS",
deprecatedMappings
);
this.migrateModelsInEnvVar("GOOGLE_GEMINI_CODE_MODELS", deprecatedMappings);
if (process.env.GOOGLE_GEMINI_DEFAULT_MODEL) {
const currentDefault = process.env.GOOGLE_GEMINI_DEFAULT_MODEL;
const newDefault =
deprecatedMappings[currentDefault as keyof typeof deprecatedMappings];
if (newDefault) {
process.env.GOOGLE_GEMINI_DEFAULT_MODEL = newDefault;
logger.warn(
"[ModelMigrationService] Migrated deprecated default model",
{
oldModel: currentDefault,
newModel: newDefault,
}
);
}
}
}
private migrateModelsInEnvVar(
envVarName: string,
mappings: Record<string, string>
): void {
const envValue = process.env[envVarName];
if (!envValue) return;
try {
const models = JSON.parse(envValue);
if (!Array.isArray(models)) return;
let hasChanges = false;
const migratedModels = models.map((model) => {
const newModel = mappings[model];
if (newModel) {
hasChanges = true;
logger.warn(
`[ModelMigrationService] Migrated deprecated model in ${envVarName}`,
{
oldModel: model,
newModel,
}
);
return newModel;
}
return model;
});
if (hasChanges) {
process.env[envVarName] = JSON.stringify(migratedModels);
}
} catch (error) {
logger.warn(
`[ModelMigrationService] Failed to parse ${envVarName} for migration`,
{ error }
);
}
}
private logMigrationWarnings(): void {
const deprecationNotices: string[] = [];
if (process.env.GOOGLE_GEMINI_MODEL && !process.env.GOOGLE_GEMINI_MODELS) {
deprecationNotices.push(
"GOOGLE_GEMINI_MODEL is deprecated. Use GOOGLE_GEMINI_MODELS array instead."
);
}
if (
process.env.GOOGLE_GEMINI_ROUTING_PREFER_COST === undefined &&
process.env.GOOGLE_GEMINI_ROUTING_PREFER_SPEED === undefined &&
process.env.GOOGLE_GEMINI_ROUTING_PREFER_QUALITY === undefined
) {
logger.info(
"[ModelMigrationService] No routing preferences set. Using quality-optimized defaults."
);
}
deprecationNotices.forEach((notice) => {
logger.warn(`[ModelMigrationService] DEPRECATION: ${notice}`);
});
if (deprecationNotices.length > 0) {
logger.info(
"[ModelMigrationService] Migration completed. See documentation for updated configuration format."
);
}
}
validateConfiguration(): { isValid: boolean; errors: string[] } {
const errors: string[] = [];
const requiredEnvVars = ["GOOGLE_GEMINI_API_KEY"];
requiredEnvVars.forEach((varName) => {
if (!process.env[varName]) {
errors.push(`Missing required environment variable: ${varName}`);
}
});
const modelArrayVars = [
"GOOGLE_GEMINI_MODELS",
"GOOGLE_GEMINI_IMAGE_MODELS",
"GOOGLE_GEMINI_CODE_MODELS",
];
modelArrayVars.forEach((varName) => {
const value = process.env[varName];
if (value) {
try {
const parsed = JSON.parse(value);
if (!Array.isArray(parsed)) {
errors.push(`${varName} must be a JSON array of strings`);
} else if (!parsed.every((item) => typeof item === "string")) {
errors.push(`${varName} must contain only string values`);
} else if (parsed.length === 0) {
errors.push(`${varName} cannot be an empty array`);
}
} catch (error) {
errors.push(`${varName} must be valid JSON: ${error}`);
}
}
});
const booleanVars = [
"GOOGLE_GEMINI_ROUTING_PREFER_COST",
"GOOGLE_GEMINI_ROUTING_PREFER_SPEED",
"GOOGLE_GEMINI_ROUTING_PREFER_QUALITY",
];
booleanVars.forEach((varName) => {
const value = process.env[varName];
if (value && !["true", "false"].includes(value.toLowerCase())) {
errors.push(`${varName} must be 'true' or 'false' if provided`);
}
});
return {
isValid: errors.length === 0,
errors,
};
}
getDeprecatedFeatures(): string[] {
const deprecated: string[] = [];
if (process.env.GOOGLE_GEMINI_MODEL) {
deprecated.push(
"GOOGLE_GEMINI_MODEL environment variable (use GOOGLE_GEMINI_MODELS array)"
);
}
const oldModelNames = [
"gemini-1.5-pro-latest",
"gemini-1.5-flash-latest",
"gemini-flash-2.0",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-2.5-pro-exp-03-25",
"gemini-2.5-flash-exp-latest",
"imagen-3.1-generate-003",
];
const allEnvVars = [
process.env.GOOGLE_GEMINI_MODELS,
process.env.GOOGLE_GEMINI_IMAGE_MODELS,
process.env.GOOGLE_GEMINI_CODE_MODELS,
process.env.GOOGLE_GEMINI_DEFAULT_MODEL,
].filter(Boolean);
allEnvVars.forEach((envVar) => {
try {
const models =
typeof envVar === "string" && envVar.startsWith("[")
? JSON.parse(envVar)
: [envVar];
models.forEach((model: string) => {
if (oldModelNames.includes(model)) {
deprecated.push(`Model name: ${model}`);
}
});
} catch (error) {
if (oldModelNames.includes(envVar as string)) {
deprecated.push(`Model name: ${envVar}`);
}
}
});
return [...new Set(deprecated)];
}
}
```
--------------------------------------------------------------------------------
/tests/unit/tools/geminiRouteMessageTool.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { geminiRouteMessageTool } from "../../../src/tools/geminiRouteMessageTool.js";
import {
GeminiApiError,
ValidationError as GeminiValidationError,
} from "../../../src/utils/errors.js";
import { McpError } from "@modelcontextprotocol/sdk/types.js";
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { GeminiService } from "../../../src/services/index.js";
import type { GenerateContentResponse } from "@google/genai";
import { BlockedReason, FinishReason } from "@google/genai";
// Create a partial type for testing purposes
type PartialGenerateContentResponse = Partial<GenerateContentResponse>;
describe("geminiRouteMessageTool", () => {
// Mock server and service instances
const mockTool = vi.fn();
const mockServer = {
tool: mockTool,
} as unknown as McpServer;
// Define a type for route message params
interface RouteMessageParams {
message: string;
models: string[];
routingPrompt?: string;
defaultModel?: string;
generationConfig?: Record<string, unknown>;
safetySettings?: unknown[];
systemInstruction?: unknown;
}
// Create a strongly typed mock function that returns a Promise
const mockRouteMessage = vi.fn<
(params: RouteMessageParams) => Promise<{
response: PartialGenerateContentResponse;
chosenModel: string;
}>
>();
// Create a minimal mock service with just the necessary methods for testing
const mockService = {
routeMessage: mockRouteMessage,
// Add empty implementations for required GeminiService methods
// Add other required methods as empty implementations
} as unknown as GeminiService;
// Reset mocks before each test
beforeEach(() => {
vi.resetAllMocks();
});
it("should register the tool with the server", () => {
// Call the tool registration function
geminiRouteMessageTool(mockServer, mockService);
// Verify tool was registered
expect(mockTool).toHaveBeenCalledTimes(1);
const [name, description, params, handler] = mockTool.mock.calls[0];
// Check tool registration parameters
expect(name).toBe("gemini_route_message");
expect(description).toContain("Routes a message");
expect(params).toBeDefined();
expect(typeof handler).toBe("function");
});
it("should call the service's routeMessage method with correct parameters", async () => {
// Register tool to get the request handler
geminiRouteMessageTool(mockServer, mockService);
const [, , , handler] = mockTool.mock.calls[0];
// Mock successful response with proper typing
const mockSuccessResponse = {
response: {
candidates: [
{
content: {
parts: [{ text: "This is a test response" }],
},
},
],
} as PartialGenerateContentResponse,
chosenModel: "gemini-1.5-flash",
};
mockRouteMessage.mockResolvedValueOnce(mockSuccessResponse);
// Prepare test request
const testRequest = {
message: "What is the capital of France?",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
routingPrompt: "Choose the best model",
defaultModel: "gemini-1.5-pro",
};
// Call the handler
const result = await handler(testRequest);
// Verify service method was called
expect(mockRouteMessage).toHaveBeenCalledTimes(1);
// Get the parameters passed to the routeMessage function
const passedParams = mockRouteMessage.mock
.calls[0][0] as RouteMessageParams;
// Check parameters passed to service
expect(passedParams.message).toBe(testRequest.message);
expect(passedParams.models).toEqual(testRequest.models);
expect(passedParams.routingPrompt).toBe(testRequest.routingPrompt);
expect(passedParams.defaultModel).toBe(testRequest.defaultModel);
// Verify result structure
expect(result.content).toBeDefined();
expect(result.content.length).toBe(1);
expect(result.content[0].type).toBe("text");
// Parse the JSON response
const parsedResponse = JSON.parse(result.content[0].text);
expect(parsedResponse.text).toBe("This is a test response");
expect(parsedResponse.chosenModel).toBe("gemini-1.5-flash");
});
it("should handle safety blocks from the prompt", async () => {
// Register tool to get the request handler
geminiRouteMessageTool(mockServer, mockService);
const [, , , handler] = mockTool.mock.calls[0];
// Mock safety block response with proper typing
const mockSafetyResponse = {
response: {
promptFeedback: {
blockReason: BlockedReason.SAFETY,
},
} as PartialGenerateContentResponse,
chosenModel: "gemini-1.5-flash",
};
mockRouteMessage.mockResolvedValueOnce(mockSafetyResponse);
// Call the handler
const result = await handler({
message: "Harmful content here",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
});
// Verify error response
expect(result.isError).toBeTruthy();
expect(result.content[0].text).toContain("safety settings");
});
it("should handle empty response from model", async () => {
// Register tool to get the request handler
geminiRouteMessageTool(mockServer, mockService);
const [, , , handler] = mockTool.mock.calls[0];
// Mock empty response with proper typing
const mockEmptyResponse = {
response: {
candidates: [
{
content: { parts: [] },
finishReason: FinishReason.MAX_TOKENS,
},
],
} as PartialGenerateContentResponse,
chosenModel: "gemini-1.5-flash",
};
mockRouteMessage.mockResolvedValueOnce(mockEmptyResponse);
// Call the handler
const result = await handler({
message: "Test message",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
});
// Verify empty response handling
expect(result.content).toBeDefined();
const parsedResponse = JSON.parse(result.content[0].text);
expect(parsedResponse.text).toBe("");
expect(parsedResponse.chosenModel).toBe("gemini-1.5-flash");
});
it("should map errors properly", async () => {
// Register tool to get the request handler
geminiRouteMessageTool(mockServer, mockService);
const [, , , handler] = mockTool.mock.calls[0];
// Mock service error
const serviceError = new GeminiApiError("Service failed");
mockRouteMessage.mockRejectedValueOnce(serviceError);
// Call the handler and expect an error
await expect(
handler({
message: "Test message",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
})
).rejects.toThrow(McpError);
// Reset the mock for the next test
mockRouteMessage.mockReset();
mockRouteMessage.mockRejectedValueOnce(serviceError);
// Use a separate test with a new rejection
await expect(
handler({
message: "Test message",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
})
).rejects.toThrow();
});
it("should handle validation errors", async () => {
// Register tool to get the request handler
geminiRouteMessageTool(mockServer, mockService);
const [, , , handler] = mockTool.mock.calls[0];
// Mock validation error
const validationError = new GeminiValidationError("Invalid parameters");
mockRouteMessage.mockRejectedValueOnce(validationError);
// Call the handler and expect an error
await expect(
handler({
message: "Test message",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
})
).rejects.toThrow(McpError);
// Reset the mock for the next test
mockRouteMessage.mockReset();
mockRouteMessage.mockRejectedValueOnce(validationError);
// Use a separate test with a new rejection
await expect(
handler({
message: "Test message",
models: ["gemini-1.5-pro", "gemini-1.5-flash"],
})
).rejects.toThrow();
});
});
```