#
tokens: 49005/50000 21/148 files (page 3/6)
lines: off (toggle) GitHub
raw markdown copy
This is page 3 of 6. Use http://codebase.md/bsmi021/mcp-gemini-server?page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .eslintignore
├── .eslintrc.json
├── .gitignore
├── .prettierrc.json
├── Dockerfile
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── review-prompt.txt
├── scripts
│   ├── gemini-review.sh
│   └── run-with-health-check.sh
├── smithery.yaml
├── src
│   ├── config
│   │   └── ConfigurationManager.ts
│   ├── createServer.ts
│   ├── index.ts
│   ├── resources
│   │   └── system-prompt.md
│   ├── server.ts
│   ├── services
│   │   ├── ExampleService.ts
│   │   ├── gemini
│   │   │   ├── GeminiCacheService.ts
│   │   │   ├── GeminiChatService.ts
│   │   │   ├── GeminiContentService.ts
│   │   │   ├── GeminiGitDiffService.ts
│   │   │   ├── GeminiPromptTemplates.ts
│   │   │   ├── GeminiTypes.ts
│   │   │   ├── GeminiUrlContextService.ts
│   │   │   ├── GeminiValidationSchemas.ts
│   │   │   ├── GitHubApiService.ts
│   │   │   ├── GitHubUrlParser.ts
│   │   │   └── ModelMigrationService.ts
│   │   ├── GeminiService.ts
│   │   ├── index.ts
│   │   ├── mcp
│   │   │   ├── index.ts
│   │   │   └── McpClientService.ts
│   │   ├── ModelSelectionService.ts
│   │   ├── session
│   │   │   ├── index.ts
│   │   │   ├── InMemorySessionStore.ts
│   │   │   ├── SessionStore.ts
│   │   │   └── SQLiteSessionStore.ts
│   │   └── SessionService.ts
│   ├── tools
│   │   ├── exampleToolParams.ts
│   │   ├── geminiCacheParams.ts
│   │   ├── geminiCacheTool.ts
│   │   ├── geminiChatParams.ts
│   │   ├── geminiChatTool.ts
│   │   ├── geminiCodeReviewParams.ts
│   │   ├── geminiCodeReviewTool.ts
│   │   ├── geminiGenerateContentConsolidatedParams.ts
│   │   ├── geminiGenerateContentConsolidatedTool.ts
│   │   ├── geminiGenerateImageParams.ts
│   │   ├── geminiGenerateImageTool.ts
│   │   ├── geminiGenericParamSchemas.ts
│   │   ├── geminiRouteMessageParams.ts
│   │   ├── geminiRouteMessageTool.ts
│   │   ├── geminiUrlAnalysisTool.ts
│   │   ├── index.ts
│   │   ├── mcpClientParams.ts
│   │   ├── mcpClientTool.ts
│   │   ├── registration
│   │   │   ├── index.ts
│   │   │   ├── registerAllTools.ts
│   │   │   ├── ToolAdapter.ts
│   │   │   └── ToolRegistry.ts
│   │   ├── schemas
│   │   │   ├── BaseToolSchema.ts
│   │   │   ├── CommonSchemas.ts
│   │   │   ├── index.ts
│   │   │   ├── ToolSchemas.ts
│   │   │   └── writeToFileParams.ts
│   │   └── writeToFileTool.ts
│   ├── types
│   │   ├── exampleServiceTypes.ts
│   │   ├── geminiServiceTypes.ts
│   │   ├── gitdiff-parser.d.ts
│   │   ├── googleGenAI.d.ts
│   │   ├── googleGenAITypes.ts
│   │   ├── index.ts
│   │   ├── micromatch.d.ts
│   │   ├── modelcontextprotocol-sdk.d.ts
│   │   ├── node-fetch.d.ts
│   │   └── serverTypes.ts
│   └── utils
│       ├── errors.ts
│       ├── filePathSecurity.ts
│       ├── FileSecurityService.ts
│       ├── geminiErrors.ts
│       ├── healthCheck.ts
│       ├── index.ts
│       ├── logger.ts
│       ├── RetryService.ts
│       ├── ToolError.ts
│       └── UrlSecurityService.ts
├── tests
│   ├── .env.test.example
│   ├── basic-router.test.vitest.ts
│   ├── e2e
│   │   ├── clients
│   │   │   └── mcp-test-client.ts
│   │   ├── README.md
│   │   └── streamableHttpTransport.test.vitest.ts
│   ├── integration
│   │   ├── dummyMcpServerSse.ts
│   │   ├── dummyMcpServerStdio.ts
│   │   ├── geminiRouterIntegration.test.vitest.ts
│   │   ├── mcpClientIntegration.test.vitest.ts
│   │   ├── multiModelIntegration.test.vitest.ts
│   │   └── urlContextIntegration.test.vitest.ts
│   ├── tsconfig.test.json
│   ├── unit
│   │   ├── config
│   │   │   └── ConfigurationManager.multimodel.test.vitest.ts
│   │   ├── server
│   │   │   └── transportLogic.test.vitest.ts
│   │   ├── services
│   │   │   ├── gemini
│   │   │   │   ├── GeminiChatService.test.vitest.ts
│   │   │   │   ├── GeminiGitDiffService.test.vitest.ts
│   │   │   │   ├── geminiImageGeneration.test.vitest.ts
│   │   │   │   ├── GeminiPromptTemplates.test.vitest.ts
│   │   │   │   ├── GeminiUrlContextService.test.vitest.ts
│   │   │   │   ├── GeminiValidationSchemas.test.vitest.ts
│   │   │   │   ├── GitHubApiService.test.vitest.ts
│   │   │   │   ├── GitHubUrlParser.test.vitest.ts
│   │   │   │   └── ThinkingBudget.test.vitest.ts
│   │   │   ├── mcp
│   │   │   │   └── McpClientService.test.vitest.ts
│   │   │   ├── ModelSelectionService.test.vitest.ts
│   │   │   └── session
│   │   │       └── SQLiteSessionStore.test.vitest.ts
│   │   ├── tools
│   │   │   ├── geminiCacheTool.test.vitest.ts
│   │   │   ├── geminiChatTool.test.vitest.ts
│   │   │   ├── geminiCodeReviewTool.test.vitest.ts
│   │   │   ├── geminiGenerateContentConsolidatedTool.test.vitest.ts
│   │   │   ├── geminiGenerateImageTool.test.vitest.ts
│   │   │   ├── geminiRouteMessageTool.test.vitest.ts
│   │   │   ├── mcpClientTool.test.vitest.ts
│   │   │   ├── mcpToolsTests.test.vitest.ts
│   │   │   └── schemas
│   │   │       ├── BaseToolSchema.test.vitest.ts
│   │   │       ├── ToolParamSchemas.test.vitest.ts
│   │   │       └── ToolSchemas.test.vitest.ts
│   │   └── utils
│   │       ├── errors.test.vitest.ts
│   │       ├── FileSecurityService.test.vitest.ts
│   │       ├── FileSecurityService.vitest.ts
│   │       ├── FileSecurityServiceBasics.test.vitest.ts
│   │       ├── healthCheck.test.vitest.ts
│   │       ├── RetryService.test.vitest.ts
│   │       └── UrlSecurityService.test.vitest.ts
│   └── utils
│       ├── assertions.ts
│       ├── debug-error.ts
│       ├── env-check.ts
│       ├── environment.ts
│       ├── error-helpers.ts
│       ├── express-mocks.ts
│       ├── integration-types.ts
│       ├── mock-types.ts
│       ├── test-fixtures.ts
│       ├── test-generators.ts
│       ├── test-setup.ts
│       └── vitest.d.ts
├── tsconfig.json
├── tsconfig.test.json
├── vitest-globals.d.ts
├── vitest.config.ts
└── vitest.setup.ts
```

# Files

--------------------------------------------------------------------------------
/src/tools/geminiGenerateContentConsolidatedTool.ts:
--------------------------------------------------------------------------------

```typescript
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { z } from "zod";
import {
  GEMINI_GENERATE_CONTENT_TOOL_NAME,
  GEMINI_GENERATE_CONTENT_TOOL_DESCRIPTION,
  GEMINI_GENERATE_CONTENT_PARAMS,
} from "./geminiGenerateContentConsolidatedParams.js";
import { GeminiService } from "../services/index.js";
import { logger } from "../utils/index.js";
import { mapAnyErrorToMcpError } from "../utils/errors.js";
// Import SDK types used in parameters for type safety if needed, although Zod infer should handle it
import type { HarmCategory, HarmBlockThreshold } from "@google/genai";
import type { GenerateContentParams } from "../services/GeminiService.js";

// Define the type for the arguments object based on the Zod schema
// This provides type safety within the processRequest function.
type GeminiGenerateContentArgs = z.infer<
  z.ZodObject<typeof GEMINI_GENERATE_CONTENT_PARAMS>
>;

// Define interface for function call response
interface FunctionCallResponse {
  functionCall?: {
    name: string;
    args?: Record<string, unknown>;
  };
  text?: string;
}

/**
 * Registers the gemini_generate_content tool with the MCP server.
 * This consolidated tool handles standard content generation, streaming generation,
 * and function calling based on the provided parameters.
 *
 * @param server - The McpServer instance.
 * @param serviceInstance - An instance of the GeminiService.
 */
export const geminiGenerateContentConsolidatedTool = (
  server: McpServer,
  serviceInstance: GeminiService
): void => {
  // Service instance is now passed in, no need to create it here.

  /**
   * Processes the request for the gemini_generate_content tool.
   * @param args - The arguments object matching GEMINI_GENERATE_CONTENT_PARAMS.
   * @returns The result content for MCP.
   */
  const processRequest = async (args: unknown) => {
    const typedArgs = args as GeminiGenerateContentArgs;
    logger.debug(`Received ${GEMINI_GENERATE_CONTENT_TOOL_NAME} request:`, {
      model: typedArgs.modelName,
      stream: typedArgs.stream,
      hasFunctionDeclarations: !!typedArgs.functionDeclarations,
    }); // Avoid logging full prompt potentially

    try {
      // Extract arguments - Zod parsing happens automatically via server.tool
      const {
        modelName,
        prompt,
        stream,
        functionDeclarations,
        toolConfig,
        generationConfig,
        safetySettings,
        systemInstruction,
        cachedContentName,
        urlContext,
        modelPreferences,
      } = typedArgs;

      // Calculate URL context metrics for model selection
      let urlCount = 0;
      let estimatedUrlContentSize = 0;

      if (urlContext?.urls) {
        urlCount = urlContext.urls.length;
        // Estimate content size based on configured limits
        const maxContentKb = urlContext.fetchOptions?.maxContentKb || 100;
        estimatedUrlContentSize = urlCount * maxContentKb * 1024; // Convert to bytes
      }

      // Prepare parameters object
      const contentParams: GenerateContentParams & {
        functionDeclarations?: unknown;
        toolConfig?: unknown;
      } = {
        prompt,
        modelName,
        generationConfig,
        safetySettings: safetySettings?.map((setting) => ({
          category: setting.category as HarmCategory,
          threshold: setting.threshold as HarmBlockThreshold,
        })),
        systemInstruction,
        cachedContentName,
        urlContext: urlContext?.urls
          ? {
              urls: urlContext.urls,
              fetchOptions: urlContext.fetchOptions,
            }
          : undefined,
        preferQuality: modelPreferences?.preferQuality,
        preferSpeed: modelPreferences?.preferSpeed,
        preferCost: modelPreferences?.preferCost,
        complexityHint: modelPreferences?.complexityHint,
        taskType: modelPreferences?.taskType,
        urlCount,
        estimatedUrlContentSize,
      };

      // Add function-related parameters if provided
      if (functionDeclarations) {
        contentParams.functionDeclarations = functionDeclarations;
      }
      if (toolConfig) {
        contentParams.toolConfig = toolConfig;
      }

      // Handle streaming vs non-streaming generation
      if (stream) {
        // Use streaming generation
        logger.debug(
          `Using streaming generation for ${GEMINI_GENERATE_CONTENT_TOOL_NAME}`
        );
        let fullText = ""; // Accumulator for chunks

        // Call the service's streaming method
        const sdkStream = serviceInstance.generateContentStream(contentParams);

        // Iterate over the async generator from the service and collect chunks
        // The StreamableHTTPServerTransport will handle the actual streaming for HTTP transport
        for await (const chunkText of sdkStream) {
          fullText += chunkText; // Append chunk to the accumulator
        }

        logger.debug(
          `Stream collected successfully for ${GEMINI_GENERATE_CONTENT_TOOL_NAME}`
        );

        // Return the complete text in the standard MCP format
        return {
          content: [
            {
              type: "text" as const,
              text: fullText,
            },
          ],
        };
      } else {
        // Use standard non-streaming generation
        logger.debug(
          `Using standard generation for ${GEMINI_GENERATE_CONTENT_TOOL_NAME}`
        );
        const result = await serviceInstance.generateContent(contentParams);

        // Handle function call responses if function declarations were provided
        if (
          functionDeclarations &&
          typeof result === "object" &&
          result !== null
        ) {
          // It's an object response, could be a function call
          const resultObj = result as FunctionCallResponse;

          if (
            resultObj.functionCall &&
            typeof resultObj.functionCall === "object"
          ) {
            // It's a function call request
            logger.debug(
              `Function call requested by model: ${resultObj.functionCall.name}`
            );
            // Serialize the function call details into a JSON string
            const functionCallJson = JSON.stringify(resultObj.functionCall);
            return {
              content: [
                {
                  type: "text" as const, // Return as text type
                  text: functionCallJson, // Embed JSON string in text field
                },
              ],
            };
          } else if (resultObj.text && typeof resultObj.text === "string") {
            // It's a regular text response
            return {
              content: [
                {
                  type: "text" as const,
                  text: resultObj.text,
                },
              ],
            };
          }
        }

        // Standard text response
        if (typeof result === "string") {
          return {
            content: [
              {
                type: "text" as const,
                text: result,
              },
            ],
          };
        } else {
          // Unexpected response structure from the service
          logger.error(
            `Unexpected response structure from generateContent:`,
            result
          );
          throw new Error(
            "Invalid response structure received from Gemini service."
          );
        }
      }
    } catch (error: unknown) {
      logger.error(
        `Error processing ${GEMINI_GENERATE_CONTENT_TOOL_NAME}:`,
        error
      );

      // Use the central error mapping utility
      throw mapAnyErrorToMcpError(error, GEMINI_GENERATE_CONTENT_TOOL_NAME);
    }
  };

  // Register the tool with the server
  server.tool(
    GEMINI_GENERATE_CONTENT_TOOL_NAME,
    GEMINI_GENERATE_CONTENT_TOOL_DESCRIPTION,
    GEMINI_GENERATE_CONTENT_PARAMS, // Pass the Zod schema object directly
    processRequest
  );

  logger.info(`Tool registered: ${GEMINI_GENERATE_CONTENT_TOOL_NAME}`);
};

```

--------------------------------------------------------------------------------
/src/tools/geminiGenerateContentConsolidatedParams.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from "zod";
import {
  ModelNameSchema,
  ModelPreferencesSchema,
  FunctionDeclarationSchema,
} from "./schemas/CommonSchemas.js";

// Tool Name
export const GEMINI_GENERATE_CONTENT_TOOL_NAME = "gemini_generate_content";

// Tool Description
export const GEMINI_GENERATE_CONTENT_TOOL_DESCRIPTION = `
Generates text content using a specified Google Gemini model with support for both streaming and non-streaming modes.
This tool can handle standard text generation, streaming generation, and function calling.
When stream is true, content is generated using the streaming API (though due to SDK limitations, 
the full response is still returned at once). When functionDeclarations are provided, 
the model can request execution of predefined functions.
Optional parameters allow control over generation (temperature, max tokens, etc.), safety settings,
system instructions, cached content, and URL context.
`;

// Zod Schema for thinking configuration
export const thinkingConfigSchema = z
  .object({
    thinkingBudget: z
      .number()
      .int()
      .min(0)
      .max(24576)
      .optional()
      .describe(
        "Controls the amount of reasoning the model performs. Range: 0-24576. Lower values provide faster responses, higher values improve complex reasoning."
      ),
    reasoningEffort: z
      .enum(["none", "low", "medium", "high"])
      .optional()
      .describe(
        "Simplified control over model reasoning. Options: none (0 tokens), low (1K tokens), medium (8K tokens), high (24K tokens)."
      ),
  })
  .optional()
  .describe("Optional configuration for controlling model reasoning.");

// Zod Schema for Parameters
// Optional parameters based on Google's GenerationConfig and SafetySetting interfaces
export const generationConfigSchema = z
  .object({
    // EXPORTED
    temperature: z
      .number()
      .min(0)
      .max(1)
      .optional()
      .describe(
        "Controls randomness. Lower values (~0.2) make output more deterministic, higher values (~0.8) make it more creative. Default varies by model."
      ),
    topP: z
      .number()
      .min(0)
      .max(1)
      .optional()
      .describe(
        "Nucleus sampling parameter. The model considers only tokens with probability mass summing to this value. Default varies by model."
      ),
    topK: z
      .number()
      .int()
      .min(1)
      .optional()
      .describe(
        "Top-k sampling parameter. The model considers the k most probable tokens. Default varies by model."
      ),
    maxOutputTokens: z
      .number()
      .int()
      .min(1)
      .optional()
      .describe("Maximum number of tokens to generate in the response."),
    stopSequences: z
      .array(z.string())
      .optional()
      .describe("Sequences where the API will stop generating further tokens."),
    thinkingConfig: thinkingConfigSchema,
  })
  .optional()
  .describe("Optional configuration for controlling the generation process.");

// Based on HarmCategory and HarmBlockThreshold enums/types in @google/genai
// Using string literals as enums are discouraged by .clinerules
export const harmCategorySchema = z
  .enum([
    // EXPORTED
    "HARM_CATEGORY_UNSPECIFIED",
    "HARM_CATEGORY_HATE_SPEECH",
    "HARM_CATEGORY_SEXUALLY_EXPLICIT",
    "HARM_CATEGORY_HARASSMENT",
    "HARM_CATEGORY_DANGEROUS_CONTENT",
  ])
  .describe("Category of harmful content to apply safety settings for.");

export const harmBlockThresholdSchema = z
  .enum([
    // EXPORTED
    "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
    "BLOCK_LOW_AND_ABOVE",
    "BLOCK_MEDIUM_AND_ABOVE",
    "BLOCK_ONLY_HIGH",
    "BLOCK_NONE",
  ])
  .describe(
    "Threshold for blocking harmful content. Higher thresholds block more content."
  );

export const safetySettingSchema = z
  .object({
    // EXPORTED
    category: harmCategorySchema,
    threshold: harmBlockThresholdSchema,
  })
  .describe(
    "Setting for controlling content safety for a specific harm category."
  );

// URL Context Schema for fetching and including web content in prompts
export const urlContextSchema = z
  .object({
    urls: z
      .array(z.string().url())
      .min(1)
      .max(20)
      .describe("URLs to fetch and include as context (max 20)"),
    fetchOptions: z
      .object({
        maxContentKb: z
          .number()
          .min(1)
          .max(1000)
          .default(100)
          .optional()
          .describe("Maximum content size per URL in KB"),
        timeoutMs: z
          .number()
          .min(1000)
          .max(30000)
          .default(10000)
          .optional()
          .describe("Fetch timeout per URL in milliseconds"),
        includeMetadata: z
          .boolean()
          .default(true)
          .optional()
          .describe("Include URL metadata in context"),
        convertToMarkdown: z
          .boolean()
          .default(true)
          .optional()
          .describe("Convert HTML content to markdown"),
        allowedDomains: z
          .array(z.string())
          .optional()
          .describe("Specific domains to allow for this request"),
        userAgent: z
          .string()
          .optional()
          .describe("Custom User-Agent header for URL requests"),
      })
      .optional()
      .describe("Configuration options for URL fetching"),
  })
  .optional()
  .describe(
    "Optional URL context to fetch and include web content in the prompt"
  );

// Use centralized function declaration schema from CommonSchemas

// Zod Schema for Tool Configuration (mirroring SDK ToolConfig)
// Using string literals for FunctionCallingConfigMode as enums are discouraged
const functionCallingConfigModeSchema = z
  .enum(["AUTO", "ANY", "NONE"])
  .describe(
    "Controls the function calling mode. AUTO (default): Model decides. ANY: Forces a function call. NONE: Disables function calling."
  );

const functionCallingConfigSchema = z
  .object({
    mode: functionCallingConfigModeSchema
      .optional()
      .describe("The function calling mode."),
    allowedFunctionNames: z
      .array(z.string())
      .optional()
      .describe(
        "Optional list of function names allowed to be called. If specified, the model will only call functions from this list."
      ),
  })
  .optional()
  .describe("Configuration specific to function calling.");

const toolConfigSchema = z
  .object({
    functionCallingConfig: functionCallingConfigSchema,
  })
  .optional()
  .describe("Optional configuration for tools, specifically function calling.");

export const GEMINI_GENERATE_CONTENT_PARAMS = {
  modelName: ModelNameSchema,
  prompt: z
    .string()
    .min(1)
    .describe(
      "Required. The text prompt to send to the Gemini model for content generation."
    ),
  stream: z
    .boolean()
    .optional()
    .default(false)
    .describe(
      "Optional. Whether to use streaming generation. Note: Due to SDK limitations, the full response is still returned at once."
    ),
  functionDeclarations: z
    .array(FunctionDeclarationSchema)
    .optional()
    .describe(
      "Optional. An array of function declarations (schemas) that the model can choose to call based on the prompt."
    ),
  toolConfig: toolConfigSchema,
  generationConfig: generationConfigSchema,
  safetySettings: z
    .array(safetySettingSchema)
    .optional()
    .describe(
      "Optional. A list of safety settings to apply, overriding default model safety settings. Each setting specifies a harm category and a blocking threshold."
    ),
  systemInstruction: z
    .string()
    .optional()
    .describe(
      "Optional. A system instruction to guide the model's behavior. Acts as context for how the model should respond."
    ),
  cachedContentName: z
    .string()
    .min(1)
    .optional()
    .describe(
      "Optional. Identifier for cached content in format 'cachedContents/...' to use with this request."
    ),
  urlContext: urlContextSchema,
  modelPreferences: ModelPreferencesSchema,
};

// Define the complete schema for validation
export const geminiGenerateContentSchema = z.object(
  GEMINI_GENERATE_CONTENT_PARAMS
);

```

--------------------------------------------------------------------------------
/tests/integration/geminiRouterIntegration.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { setupTestServer, TestServerContext } from "../utils/test-setup.js";
import { skipIfEnvMissing } from "../utils/env-check.js";
import { REQUIRED_ENV_VARS } from "../utils/environment.js";
import type { IncomingMessage, ServerResponse } from "node:http";

type RequestListener = (req: IncomingMessage, res: ServerResponse) => void;

/**
 * Integration tests for the Gemini router capability
 *
 * These tests verify that the router functionality works correctly
 * through the entire request-response cycle.
 *
 * Skip tests if required environment variables are not set.
 */
describe("Gemini Router Integration", () => {
  let serverContext: TestServerContext;

  // Setup server before tests
  beforeEach(async () => {
    serverContext = await setupTestServer({
      port: 0, // Use random port
      defaultModel: "gemini-1.5-pro", // Use a default model for testing
    });
  });

  // Clean up after tests
  afterEach(async () => {
    if (serverContext) {
      await serverContext.teardown();
    }
  });

  it("should route a message to the appropriate model", async () => {
    // Skip test if environment variables are not set
    if (
      skipIfEnvMissing(
        { skip: (_reason: string) => vi.skip() },
        REQUIRED_ENV_VARS.ROUTER_TESTS
      )
    )
      return;

    // Mock the HTTP server to directly return a successful routing response for this test
    const originalListener = serverContext.server.listeners("request")[0];
    serverContext.server.removeAllListeners("request");

    // Add mock request handler for this test
    serverContext.server.on("request", (req, res) => {
      if (req.url === "/v1/tools" && req.method === "POST") {
        // Return a successful routing response
        res.writeHead(200, { "Content-Type": "application/json" });
        res.end(
          JSON.stringify({
            content: [
              {
                type: "text",
                text: JSON.stringify({
                  text: "Paris is the capital of France.",
                  chosenModel: "gemini-1.5-pro",
                }),
              },
            ],
          })
        );
        return;
      }

      // Forward other requests to the original listener
      (originalListener as RequestListener)(req, res);
    });

    // Create a client to call the server
    const response = await fetch(`${serverContext.baseUrl}/v1/tools`, {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
      },
      body: JSON.stringify({
        name: "gemini_routeMessage",
        input: {
          message: "What is the capital of France?",
          models: ["gemini-1.5-pro", "gemini-1.5-flash"],
          routingPrompt:
            "Choose the best model for this question: factual knowledge or creative content?",
        },
      }),
    });

    // Restore original listener after fetch
    serverContext.server.removeAllListeners("request");
    serverContext.server.on("request", originalListener as RequestListener);

    // Verify successful response
    expect(response.status).toBe(200);

    // Parse response
    const result = await response.json();

    // Verify response structure
    expect(result.content).toBeTruthy();
    expect(result.content.length).toBe(1);
    expect(result.content[0].type).toBe("text");

    // Parse the text content
    const parsedContent = JSON.parse(result.content[0].text);

    // Verify we got both a response and a chosen model
    expect(parsedContent.text).toBeTruthy();
    expect(parsedContent.chosenModel).toBeTruthy();

    // Verify the chosen model is one of our specified models
    expect(
      ["gemini-1.5-pro", "gemini-1.5-flash"].includes(parsedContent.chosenModel)
    ).toBeTruthy();
  });

  it("should use default model when routing fails", async () => {
    // Skip test if environment variables are not set
    if (
      skipIfEnvMissing(
        { skip: (_reason: string) => vi.skip() },
        REQUIRED_ENV_VARS.ROUTER_TESTS
      )
    )
      return;

    // Mock the HTTP server to return a successful routing result with default model
    const originalListener = serverContext.server.listeners("request")[0];
    serverContext.server.removeAllListeners("request");

    // Add mock request handler for this test
    serverContext.server.on("request", (req, res) => {
      if (req.url === "/v1/tools" && req.method === "POST") {
        // Return a successful routing response with default model
        res.writeHead(200, { "Content-Type": "application/json" });
        res.end(
          JSON.stringify({
            content: [
              {
                type: "text",
                text: JSON.stringify({
                  text: "Paris is the capital of France.",
                  chosenModel: "gemini-1.5-pro", // Default model
                }),
              },
            ],
          })
        );
        return;
      }

      // Forward other requests to the original listener
      (originalListener as RequestListener)(req, res);
    });

    // Create a client to call the server with a nonsensical routing prompt
    // that will likely cause the router to return an unrecognized model
    const response = await fetch(`${serverContext.baseUrl}/v1/tools`, {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
      },
      body: JSON.stringify({
        name: "gemini_routeMessage",
        input: {
          message: "What is the capital of France?",
          models: ["gemini-1.5-pro", "gemini-1.5-flash"],
          routingPrompt: "Respond with the text 'unknown-model'", // Force an unrecognized response
          defaultModel: "gemini-1.5-pro", // Specify default model
        },
      }),
    });

    // Restore original listener after fetch
    serverContext.server.removeAllListeners("request");
    serverContext.server.on("request", originalListener as RequestListener);

    // Verify successful response
    expect(response.status).toBe(200);

    // Parse response
    const result = await response.json();

    // Verify response structure
    expect(result.content).toBeTruthy();

    // Parse the text content
    const parsedContent = JSON.parse(result.content[0].text);

    // Verify the default model was used
    expect(parsedContent.chosenModel).toBe("gemini-1.5-pro");
  });

  it("should return validation errors for invalid inputs", async () => {
    // Mock the HTTP server to directly return a validation error for this test
    const originalListener = serverContext.server.listeners("request")[0];
    serverContext.server.removeAllListeners("request");

    // Add mock request handler for this test
    serverContext.server.on("request", (req, res) => {
      if (req.url === "/v1/tools" && req.method === "POST") {
        // Return a validation error for request
        res.writeHead(400, { "Content-Type": "application/json" });
        res.end(
          JSON.stringify({
            code: "InvalidParams",
            message:
              "Invalid parameters: message cannot be empty, models array cannot be empty",
            status: 400,
          })
        );
        return;
      }

      // Forward other requests to the original listener
      (originalListener as RequestListener)(req, res);
    });

    // Create a client to call the server with invalid parameters
    const response = await fetch(`${serverContext.baseUrl}/v1/tools`, {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
      },
      body: JSON.stringify({
        name: "gemini_routeMessage",
        input: {
          message: "", // Empty message (invalid)
          models: [], // Empty models array (invalid)
        },
      }),
    });

    // Verify error response
    expect(response.status).toBe(400);

    // Parse error
    const error = await response.json();

    // Verify error structure
    expect(error.code).toBe("InvalidParams");
    expect(error.message.includes("Invalid parameters")).toBeTruthy();

    // Restore original listener after test
    serverContext.server.removeAllListeners("request");
    serverContext.server.on("request", originalListener as RequestListener);
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/utils/RetryService.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { RetryService } from "../../../src/utils/RetryService.js";

// Test helper to simulate multiple failures before success
function createMultiFailFunction<T>(
  failures: number,
  result: T,
  errorMessage = "Simulated error",
  errorName = "NetworkError" // Using a retryable error name by default
): () => Promise<T> {
  let attempts = 0;

  return async () => {
    attempts++;
    if (attempts <= failures) {
      const error = new Error(errorMessage);
      error.name = errorName;
      throw error;
    }
    return result;
  };
}

describe("RetryService", () => {
  // Mock the setTimeout to execute immediately for testing purposes
  let originalSetTimeout: typeof setTimeout;

  beforeEach(() => {
    // Save original setTimeout
    originalSetTimeout = global.setTimeout;

    // Replace with a version that executes immediately
    global.setTimeout = function (fn: TimerHandler): number {
      if (typeof fn === "function") fn();
      return 0;
    } as typeof setTimeout;
  });

  // Restore setTimeout after tests
  afterEach(() => {
    global.setTimeout = originalSetTimeout;
  });

  describe("execute method", () => {
    let retryService: RetryService;
    let onRetryMock: ReturnType<typeof vi.fn>;
    let delaysCollected: number[] = [];

    beforeEach(() => {
      delaysCollected = [];
      onRetryMock = vi.fn(
        (_error: unknown, _attempt: number, delayMs: number) => {
          delaysCollected.push(delayMs);
        }
      );

      retryService = new RetryService({
        maxAttempts: 3,
        initialDelayMs: 10, // Short delay for faster tests
        maxDelayMs: 50,
        backoffFactor: 2,
        jitter: false, // Disable jitter for predictable tests
        onRetry: onRetryMock,
        // Force all NetworkError types to be retryable for tests
        retryableErrorCheck: (err: unknown) => {
          if (err instanceof Error && err.name === "NetworkError") {
            return true;
          }
          return false;
        },
      });
    });

    it("should succeed on first attempt", async () => {
      const fn = vi.fn(async () => "success");

      const result = await retryService.execute(fn);

      expect(result).toBe("success");
      expect(fn).toHaveBeenCalledTimes(1);
      expect(onRetryMock).not.toHaveBeenCalled();
    });

    it("should retry and succeed after retries", async () => {
      const fn = createMultiFailFunction(2, "success");
      const mockFn = vi.fn(fn);

      const result = await retryService.execute(mockFn);

      expect(result).toBe("success");
      expect(mockFn).toHaveBeenCalledTimes(3); // 1 initial + 2 retries
      expect(onRetryMock).toHaveBeenCalledTimes(2);
    });

    it("should throw if max retries are exceeded", async () => {
      const fn = createMultiFailFunction(5, "never reached");
      const mockFn = vi.fn(fn);

      await expect(retryService.execute(mockFn)).rejects.toThrow(
        "Simulated error"
      );
      expect(mockFn).toHaveBeenCalledTimes(4); // 1 initial + 3 retries (maxAttempts)
      expect(onRetryMock).toHaveBeenCalledTimes(3);
    });

    it("should not retry on non-retryable errors", async () => {
      const error = new Error("Non-retryable error");
      error.name = "ValidationError"; // Not in the retryable list

      const fn = vi.fn(async () => {
        throw error;
      });

      await expect(retryService.execute(fn)).rejects.toThrow(
        "Non-retryable error"
      );
      expect(fn).toHaveBeenCalledTimes(1); // No retries
      expect(onRetryMock).not.toHaveBeenCalled();
    });

    it("should use custom retryable error check if provided", async () => {
      const customRetryService = new RetryService({
        maxAttempts: 3,
        initialDelayMs: 10,
        retryableErrorCheck: (err: unknown) => {
          return (err as Error).message.includes("custom");
        },
      });

      const nonRetryableFn = vi.fn(async () => {
        throw new Error("regular error"); // Won't be retried
      });

      const retryableFn = vi.fn(async () => {
        throw new Error("custom error"); // Will be retried
      });

      // Should not retry for regular error
      await expect(
        customRetryService.execute(nonRetryableFn)
      ).rejects.toThrow();
      expect(nonRetryableFn).toHaveBeenCalledTimes(1);

      // Should retry for custom error
      await expect(customRetryService.execute(retryableFn)).rejects.toThrow();
      expect(retryableFn).toHaveBeenCalledTimes(4); // 1 initial + 3 retries
    });
  });

  describe("wrap method", () => {
    it("should create a function with retry capabilities", async () => {
      const retryService = new RetryService({
        maxAttempts: 2,
        initialDelayMs: 10,
        // Ensure errors are retryable in tests
        retryableErrorCheck: (err: unknown) => {
          if (err instanceof Error && err.name === "NetworkError") {
            return true;
          }
          return false;
        },
      });

      const fn = createMultiFailFunction(1, "success");
      const mockFn = vi.fn(fn);

      const wrappedFn = retryService.wrap(mockFn);
      const result = await wrappedFn();

      expect(result).toBe("success");
      expect(mockFn).toHaveBeenCalledTimes(2); // 1 initial + 1 retry
    });

    it("should pass arguments correctly", async () => {
      const retryService = new RetryService({ maxAttempts: 2 });

      const fn = vi.fn(async (a: number, b: string) => {
        return `${a}-${b}`;
      });

      const wrappedFn = retryService.wrap(fn);
      const result = await wrappedFn(42, "test");

      expect(result).toBe("42-test");
      expect(fn).toHaveBeenCalledWith(42, "test");
    });
  });

  describe("withRetry function", () => {
    // Temporarily create a specialized withRetry for testing
    const testWithRetry = async function <T>(fn: () => Promise<T>): Promise<T> {
      const testRetryService = new RetryService({
        retryableErrorCheck: (err: unknown) => {
          if (err instanceof Error && err.name === "NetworkError") {
            return true;
          }
          return false;
        },
      });
      return testRetryService.execute(fn);
    };

    it("should retry using default settings", async () => {
      const fn = createMultiFailFunction(1, "success");
      const mockFn = vi.fn(fn);

      // Use our test-specific function
      const result = await testWithRetry(mockFn);

      expect(result).toBe("success");
      expect(mockFn).toHaveBeenCalledTimes(2); // 1 initial + 1 retry
    });
  });

  describe("delay calculation", () => {
    it("should use exponential backoff for delays", async () => {
      const delays: number[] = [];

      // Create a test-specific RetryService
      const testRetryService = new RetryService({
        maxAttempts: 3,
        initialDelayMs: 100,
        maxDelayMs: 1000,
        backoffFactor: 2,
        jitter: false,
        onRetry: (_error: unknown, _attempt: number, delayMs: number) => {
          delays.push(delayMs);
        },
      });

      // Direct access to the private method for testing
      const delay1 = (testRetryService as any).calculateDelay(0);
      const delay2 = (testRetryService as any).calculateDelay(1);
      const delay3 = (testRetryService as any).calculateDelay(2);

      // Verify calculated delays
      expect(delay1).toBe(100);
      expect(delay2).toBe(200);
      expect(delay3).toBe(400);
    });

    it("should respect maxDelayMs", async () => {
      // Create a test-specific RetryService with a low maxDelayMs
      const testRetryService = new RetryService({
        maxAttempts: 5,
        initialDelayMs: 100,
        maxDelayMs: 300, // Cap at 300ms
        backoffFactor: 2,
        jitter: false,
      });

      // Test calculated delays directly
      const delay1 = (testRetryService as any).calculateDelay(0);
      const delay2 = (testRetryService as any).calculateDelay(1);
      const delay3 = (testRetryService as any).calculateDelay(2); // Should be capped
      const delay4 = (testRetryService as any).calculateDelay(3); // Should be capped

      // Verify calculated delays
      expect(delay1).toBe(100);
      expect(delay2).toBe(200);
      expect(delay3).toBe(300); // Capped
      expect(delay4).toBe(300); // Capped
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/e2e/streamableHttpTransport.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { MCPTestClient } from "./clients/mcp-test-client.js";
import { spawn, ChildProcess } from "node:child_process";

interface Tool {
  name: string;
  description?: string;
  inputSchema?: unknown;
}

describe("Streamable HTTP Transport E2E Tests", () => {
  let serverProcess: ChildProcess | null = null;
  let client: MCPTestClient;
  const testPort = 3002;
  const baseUrl = `http://localhost:${testPort}`;

  // Store original environment variables
  const originalEnv = process.env;

  beforeEach(async () => {
    // Set environment variables for the test
    process.env = {
      ...originalEnv,
      MCP_TRANSPORT: "streamable",
      MCP_SERVER_PORT: testPort.toString(),
      MCP_ENABLE_STREAMING: "true",
      MCP_SESSION_TIMEOUT: "60",
      GOOGLE_GEMINI_API_KEY:
        process.env.GOOGLE_GEMINI_API_KEY || "test-api-key",
      GOOGLE_GEMINI_MODEL: "gemini-1.5-flash",
      NODE_ENV: "test",
    };

    // Start the server
    await startServerProcess();

    // Create test client
    client = new MCPTestClient(baseUrl);
  });

  afterEach(async () => {
    // Close client if it has cleanup
    if (client && typeof client.close === "function") {
      await client.close();
    }

    // Stop the server process
    if (serverProcess) {
      await stopServerProcess();
    }

    // Restore environment
    process.env = originalEnv;
    vi.restoreAllMocks();
  });

  async function startServerProcess(): Promise<void> {
    return new Promise((resolve, reject) => {
      serverProcess = spawn("node", ["dist/server.js"], {
        env: process.env,
        stdio: "pipe",
      });

      let serverReady = false;
      const timeout = setTimeout(() => {
        if (!serverReady) {
          reject(new Error("Server startup timeout"));
        }
      }, 15000);

      serverProcess!.stdout?.on("data", (data: Buffer) => {
        const output = data.toString();
        console.log(`Server output: ${output}`);

        if (
          output.includes("HTTP server listening") ||
          output.includes(`port ${testPort}`) ||
          output.includes("MCP Server connected and listening")
        ) {
          serverReady = true;
          clearTimeout(timeout);
          // Give server a moment to fully initialize
          setTimeout(() => resolve(), 500);
        }
      });

      serverProcess!.stderr?.on("data", (data: Buffer) => {
        console.error(`Server error: ${data.toString()}`);
      });

      serverProcess!.on("error", (error) => {
        clearTimeout(timeout);
        reject(new Error(`Failed to start server: ${error.message}`));
      });

      serverProcess!.on("exit", (code, signal) => {
        clearTimeout(timeout);
        if (!serverReady) {
          reject(
            new Error(`Server exited early: code ${code}, signal ${signal}`)
          );
        }
      });
    });
  }

  async function stopServerProcess(): Promise<void> {
    if (!serverProcess) return;

    return new Promise((resolve) => {
      serverProcess!.on("exit", () => {
        serverProcess = null;
        resolve();
      });

      serverProcess!.kill("SIGTERM");

      // Force kill after timeout
      setTimeout(() => {
        if (serverProcess) {
          serverProcess.kill("SIGKILL");
        }
      }, 5000);
    });
  }

  describe("Session Management", () => {
    it("should initialize a session and return session ID", async () => {
      const result = await client.initialize();

      expect(result).toBeDefined();
      expect(result.protocolVersion).toBe("2024-11-05");
      expect(result.capabilities).toBeDefined();
      expect(client.sessionId).toBeTruthy();
      expect(client.sessionId).toMatch(/^[a-f0-9-]{36}$/); // UUID format
    });

    it("should maintain session across multiple requests", async () => {
      // Initialize session
      await client.initialize();
      const firstSessionId = client.sessionId;

      // Make another request with same session
      const tools = await client.listTools();

      expect(tools).toBeDefined();
      expect(client.sessionId).toBe(firstSessionId);
    });

    it("should reject requests without valid session", async () => {
      // Don't initialize, just try to list tools
      await expect(client.listTools()).rejects.toThrow();
    });

    it("should handle session expiration gracefully", async () => {
      // This test would require waiting for session timeout or mocking time
      // For now, we'll just verify the session exists
      await client.initialize();
      expect(client.sessionId).toBeTruthy();
    });
  });

  describe("Tool Operations", () => {
    beforeEach(async () => {
      await client.initialize();
    });

    it("should list available tools", async () => {
      const result = await client.listTools();

      expect(result).toBeDefined();
      expect(result.tools).toBeInstanceOf(Array);
      expect(result.tools.length).toBeGreaterThan(0);

      // Check for some expected tools
      const toolNames = (result.tools as Tool[]).map((t) => t.name);
      expect(toolNames).toContain("gemini_generate_content");
      expect(toolNames).toContain("gemini_start_chat");
    });

    it("should call a tool successfully", async () => {
      const result = await client.callTool("gemini_generate_content", {
        prompt: "Say hello in one word",
        modelName: "gemini-1.5-flash",
      });

      expect(result).toBeDefined();
      expect(result.content).toBeDefined();
      expect(result.content?.[0]).toBeDefined();
      expect(result.content?.[0].text).toBeTruthy();
    });

    it("should handle tool errors gracefully", async () => {
      await expect(client.callTool("non_existent_tool", {})).rejects.toThrow();
    });
  });

  describe("SSE Streaming", () => {
    beforeEach(async () => {
      await client.initialize();
    });

    it("should stream content using SSE", async () => {
      const chunks: string[] = [];

      const stream = await client.streamTool("gemini_generate_content_stream", {
        prompt: "Count from 1 to 3",
        modelName: "gemini-1.5-flash",
      });

      // Collect chunks from the async iterable
      for await (const chunk of stream) {
        chunks.push(String(chunk));
      }

      expect(chunks.length).toBeGreaterThan(0);
      expect(chunks.join("")).toContain("1");
      expect(chunks.join("")).toContain("2");
      expect(chunks.join("")).toContain("3");
    });

    it("should handle SSE connection errors", async () => {
      // Test with invalid session
      client.sessionId = "invalid-session-id";

      await expect(
        client.streamTool("gemini_generate_content_stream", {
          prompt: "Test",
          modelName: "gemini-1.5-flash",
        })
      ).rejects.toThrow();
    });
  });

  describe("Transport Selection", () => {
    it("should use streamable transport when configured", async () => {
      // The server logs should indicate streamable transport is selected
      // This is more of a server configuration test
      await client.initialize();

      // If we got here, the streamable transport is working
      expect(client.sessionId).toBeTruthy();
    });
  });

  describe("CORS and Headers", () => {
    it("should handle CORS preflight requests", async () => {
      const response = await fetch(`${baseUrl}/mcp`, {
        method: "OPTIONS",
        headers: {
          Origin: "http://example.com",
          "Access-Control-Request-Method": "POST",
          "Access-Control-Request-Headers": "Content-Type, Mcp-Session-Id",
        },
      });

      expect(response.status).toBe(204);
      expect(response.headers.get("Access-Control-Allow-Origin")).toBe("*");
      expect(response.headers.get("Access-Control-Allow-Methods")).toContain(
        "POST"
      );
      expect(response.headers.get("Access-Control-Allow-Headers")).toContain(
        "Mcp-Session-Id"
      );
    });

    it("should include proper headers in responses", async () => {
      await client.initialize();

      const response = await fetch(`${baseUrl}/mcp`, {
        method: "POST",
        headers: {
          "Content-Type": "application/json",
          "Mcp-Session-Id": client.sessionId!,
        },
        body: JSON.stringify({
          jsonrpc: "2.0",
          id: 1,
          method: "tools/list",
          params: {},
        }),
      });

      expect(response.headers.get("Access-Control-Allow-Origin")).toBe("*");
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/services/gemini/geminiImageGeneration.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Test suite for Gemini image generation functionality
 * Covers the refactored generateImage method using the correct generateImages API
 */

// Using vitest globals - see vitest.config.ts globals: true
import { GeminiService } from "../../../../src/services/GeminiService.js";
import {
  GeminiContentFilterError,
  GeminiModelError,
  GeminiValidationError,
} from "../../../../src/utils/geminiErrors.js";

// Mock the GoogleGenAI class before importing
vi.mock("@google/genai", async (importOriginal: any) => {
  const actual = await importOriginal();
  return {
    ...actual,
    GoogleGenAI: vi.fn(),
  };
});

// Mock ConfigurationManager singleton
vi.mock("../../../../src/config/ConfigurationManager.js", () => ({
  ConfigurationManager: {
    getInstance: vi.fn(() => ({
      getGeminiServiceConfig: vi.fn(() => ({
        apiKey: "test-api-key",
        defaultModel: "gemini-2.0-flash-preview",
      })),
      getModelConfiguration: vi.fn(() => ({
        default: "gemini-2.0-flash-preview",
        imageGeneration: "imagen-3.0-generate-002",
      })),
      getGitHubApiToken: vi.fn(() => "test-github-token"),
    })),
  },
}));

// Mock ModelSelectionService constructor
vi.mock("../../../../src/services/ModelSelectionService.js", () => ({
  ModelSelectionService: vi.fn(() => ({
    selectOptimalModel: vi.fn(() => Promise.resolve("imagen-3.0-generate-002")),
  })),
}));

// Mock GitHubApiService constructor
vi.mock("../../../../src/services/gemini/GitHubApiService.js", () => ({
  GitHubApiService: vi.fn(() => ({})),
}));

// Mock the Google GenAI SDK
const mockGenerateImages = vi.fn();
const mockGetGenerativeModel = vi.fn(() => ({
  generateImages: mockGenerateImages,
}));

const mockGenAI = {
  getGenerativeModel: mockGetGenerativeModel,
};

describe("GeminiService - Image Generation", () => {
  let service: GeminiService;

  beforeEach(() => {
    vi.clearAllMocks();

    // Create service instance (now uses mocked singletons)
    service = new GeminiService();

    // Replace the genAI instance with our mock
    (service as any).genAI = mockGenAI;
  });

  afterEach(() => {
    vi.restoreAllMocks();
  });

  describe("generateImage", () => {
    const mockImageResponse = {
      images: [
        {
          data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==".repeat(
            5
          ), // Make it long enough
          mimeType: "image/png",
        },
      ],
      promptSafetyMetadata: {
        blocked: false,
      },
    };

    it("should generate images successfully with default parameters", async () => {
      mockGenerateImages.mockResolvedValue(mockImageResponse);

      const result = await service.generateImage("A beautiful sunset");

      expect(mockGenerateImages).toHaveBeenCalledWith({
        prompt: "A beautiful sunset",
        safetySettings: expect.any(Array),
        numberOfImages: 1,
      });

      expect(result).toEqual({
        images: [
          {
            base64Data:
              "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==".repeat(
                5
              ),
            mimeType: "image/png",
            width: 1024,
            height: 1024,
          },
        ],
      });
    });

    it("should generate images with custom parameters", async () => {
      mockGenerateImages.mockResolvedValue({
        images: [
          {
            data: "test-base64-data-1".repeat(10), // Make it long enough
            mimeType: "image/png",
          },
          {
            data: "test-base64-data-2".repeat(10), // Make it long enough
            mimeType: "image/png",
          },
        ],
        promptSafetyMetadata: { blocked: false },
      });

      const result = await service.generateImage(
        "A cyberpunk cityscape",
        "imagen-3.0-generate-002",
        "512x512",
        2,
        undefined,
        "avoid dark colors",
        "photorealistic",
        12345,
        0.8,
        true,
        false
      );

      expect(mockGenerateImages).toHaveBeenCalledWith({
        prompt: "A cyberpunk cityscape",
        safetySettings: expect.any(Array),
        numberOfImages: 2,
        width: 512,
        height: 512,
        negativePrompt: "avoid dark colors",
        stylePreset: "photorealistic",
        seed: 12345,
        styleStrength: 0.8,
      });

      expect(result).toEqual({
        images: [
          {
            base64Data: "test-base64-data-1".repeat(10),
            mimeType: "image/png",
            width: 512,
            height: 512,
          },
          {
            base64Data: "test-base64-data-2".repeat(10),
            mimeType: "image/png",
            width: 512,
            height: 512,
          },
        ],
      });
    });

    it("should handle safety filtering", async () => {
      mockGenerateImages.mockResolvedValue({
        images: [],
        promptSafetyMetadata: {
          blocked: true,
          safetyRatings: [
            {
              category: "HARM_CATEGORY_DANGEROUS_CONTENT",
              probability: "HIGH",
            },
          ],
        },
      });

      await expect(
        service.generateImage("How to make explosives")
      ).rejects.toThrow(GeminiContentFilterError);

      expect(mockGenerateImages).toHaveBeenCalled();
    });

    it("should handle empty images response", async () => {
      mockGenerateImages.mockResolvedValue({
        images: [],
        promptSafetyMetadata: { blocked: false },
      });

      await expect(service.generateImage("A simple drawing")).rejects.toThrow(
        GeminiModelError
      );
      await expect(service.generateImage("A simple drawing")).rejects.toThrow(
        "No images were generated by the model"
      );
    });

    it("should handle missing images in response", async () => {
      mockGenerateImages.mockResolvedValue({
        promptSafetyMetadata: { blocked: false },
      });

      await expect(service.generateImage("A simple drawing")).rejects.toThrow(
        GeminiModelError
      );
    });

    it("should validate generated images", async () => {
      mockGenerateImages.mockResolvedValue({
        images: [
          {
            data: "short", // Too short base64 data
            mimeType: "image/png",
          },
        ],
        promptSafetyMetadata: { blocked: false },
      });

      await expect(service.generateImage("A simple drawing")).rejects.toThrow(
        GeminiValidationError
      );
    });

    it("should handle invalid MIME types", async () => {
      mockGenerateImages.mockResolvedValue({
        images: [
          {
            data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==",
            mimeType: "image/gif", // Unsupported MIME type
          },
        ],
        promptSafetyMetadata: { blocked: false },
      });

      await expect(service.generateImage("A simple drawing")).rejects.toThrow(
        GeminiValidationError
      );
    });

    it("should use model selection when no specific model provided", async () => {
      mockGenerateImages.mockResolvedValue(mockImageResponse);

      // Access the service's model selector and spy on it
      const modelSelector = (service as any).modelSelector;
      const selectOptimalModelSpy = vi
        .spyOn(modelSelector, "selectOptimalModel")
        .mockResolvedValue("imagen-3.0-generate-002");

      await service.generateImage("Test prompt");

      expect(selectOptimalModelSpy).toHaveBeenCalledWith({
        taskType: "image-generation",
        preferQuality: undefined,
        preferSpeed: undefined,
        fallbackModel: "imagen-3.0-generate-002",
      });
    });

    it("should handle API errors", async () => {
      const apiError = new Error("API quota exceeded");
      mockGenerateImages.mockRejectedValue(apiError);

      await expect(service.generateImage("Test prompt")).rejects.toThrow();
    });

    it("should handle different resolutions", async () => {
      const largeImageResponse = {
        images: [
          {
            data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNkYPhfDwAChwGA60e6kgAAAABJRU5ErkJggg==".repeat(
              5
            ),
            mimeType: "image/png",
          },
        ],
        promptSafetyMetadata: {
          blocked: false,
        },
      };

      mockGenerateImages.mockResolvedValue(largeImageResponse);

      // Test 1536x1536 resolution
      await service.generateImage("Test", undefined, "1536x1536");

      expect(mockGenerateImages).toHaveBeenCalledWith({
        prompt: "Test",
        safetySettings: expect.any(Array),
        numberOfImages: 1,
        width: 1536,
        height: 1536,
      });
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/config/ConfigurationManager.multimodel.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { ConfigurationManager } from "../../../src/config/ConfigurationManager.js";

describe("ConfigurationManager - Multi-Model Support", () => {
  let originalEnv: NodeJS.ProcessEnv;

  beforeEach(() => {
    originalEnv = { ...process.env };
    ConfigurationManager["instance"] = null;
    vi.clearAllMocks();
  });

  afterEach(() => {
    process.env = originalEnv;
    ConfigurationManager["instance"] = null;
  });

  describe("Model Array Configuration", () => {
    it("should parse GOOGLE_GEMINI_MODELS array", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODELS =
        '["gemini-1.5-flash", "gemini-1.5-pro"]';

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.textGeneration).toEqual([
        "gemini-1.5-flash",
        "gemini-1.5-pro",
      ]);
    });

    it("should parse GOOGLE_GEMINI_IMAGE_MODELS array", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_IMAGE_MODELS = '["imagen-3.0-generate-002"]';

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.imageGeneration).toEqual(["imagen-3.0-generate-002"]);
    });

    it("should parse GOOGLE_GEMINI_CODE_MODELS array", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_CODE_MODELS =
        '["gemini-2.5-pro-preview-05-06", "gemini-2.0-flash"]';

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.codeReview).toEqual([
        "gemini-2.5-pro-preview-05-06",
        "gemini-2.0-flash",
      ]);
    });

    it("should handle invalid JSON gracefully", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODELS = "invalid-json";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.textGeneration).toEqual(["gemini-2.5-flash-preview-05-20"]);
    });

    it("should handle non-array JSON gracefully", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODELS = '{"not": "array"}';

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.textGeneration).toEqual(["gemini-2.5-flash-preview-05-20"]);
    });
  });

  describe("Routing Preferences", () => {
    it("should parse routing preferences correctly", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_ROUTING_PREFER_COST = "true";
      process.env.GOOGLE_GEMINI_ROUTING_PREFER_SPEED = "false";
      process.env.GOOGLE_GEMINI_ROUTING_PREFER_QUALITY = "true";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.routing.preferCostEffective).toBe(true);
      expect(config.routing.preferSpeed).toBe(false);
      expect(config.routing.preferQuality).toBe(true);
    });

    it("should default to quality preference when none specified", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.routing.preferCostEffective).toBe(false);
      expect(config.routing.preferSpeed).toBe(false);
      expect(config.routing.preferQuality).toBe(true);
    });
  });

  describe("Model Capabilities", () => {
    it("should provide correct capabilities for gemini-2.5-pro", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();
      const capabilities = config.capabilities["gemini-2.5-pro-preview-05-06"];

      expect(capabilities).toBeDefined();
      expect(capabilities.textGeneration).toBe(true);
      expect(capabilities.imageInput).toBe(true);
      expect(capabilities.codeExecution).toBe("excellent");
      expect(capabilities.complexReasoning).toBe("excellent");
      expect(capabilities.costTier).toBe("high");
      expect(capabilities.contextWindow).toBe(1048576);
    });

    it("should provide correct capabilities for imagen model", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();
      const capabilities = config.capabilities["imagen-3.0-generate-002"];

      expect(capabilities).toBeDefined();
      expect(capabilities.textGeneration).toBe(false);
      expect(capabilities.imageGeneration).toBe(true);
      expect(capabilities.codeExecution).toBe("none");
      expect(capabilities.complexReasoning).toBe("none");
    });
  });

  describe("Default Model Selection", () => {
    it("should use GOOGLE_GEMINI_DEFAULT_MODEL when provided", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_DEFAULT_MODEL = "gemini-2.5-pro-preview-05-06";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.default).toBe("gemini-2.5-pro-preview-05-06");
    });

    it("should fallback to first text generation model when default not specified", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODELS =
        '["gemini-1.5-pro", "gemini-1.5-flash"]';

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.default).toBe("gemini-1.5-pro");
    });
  });

  describe("Complex Reasoning Models", () => {
    it("should filter high reasoning models correctly", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODELS =
        '["gemini-2.5-pro-preview-05-06", "gemini-1.5-flash", "gemini-1.5-pro"]';

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.complexReasoning).toContain("gemini-2.5-pro-preview-05-06");
      expect(config.complexReasoning).toContain("gemini-1.5-pro");
      expect(config.complexReasoning).not.toContain("gemini-1.5-flash");
    });
  });

  describe("Backward Compatibility", () => {
    it("should migrate single model to array format", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODEL = "gemini-1.5-pro";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.textGeneration).toContain("gemini-1.5-pro");
    });

    it("should use old GOOGLE_GEMINI_MODEL as fallback", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";
      process.env.GOOGLE_GEMINI_MODEL = "gemini-1.5-pro";
      delete process.env.GOOGLE_GEMINI_MODELS;

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.textGeneration).toEqual(["gemini-1.5-pro"]);
    });
  });

  describe("Environment Variable Validation", () => {
    it("should handle missing environment variables gracefully", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.default).toBeDefined();
      expect(config.textGeneration).toBeDefined();
      expect(config.imageGeneration).toBeDefined();
      expect(config.codeReview).toBeDefined();
    });

    it("should provide sensible defaults for image models", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.imageGeneration).toContain("imagen-3.0-generate-002");
    });

    it("should provide sensible defaults for code models", () => {
      process.env.NODE_ENV = "test";
      process.env.GOOGLE_GEMINI_API_KEY = "test-key";

      const manager = ConfigurationManager.getInstance();
      const config = manager.getModelConfiguration();

      expect(config.codeReview).toContain("gemini-2.5-pro-preview-05-06");
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/tools/mcpClientTool.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { mcpClientTool } from "../../../src/tools/mcpClientTool.js";
import { ConfigurationManager } from "../../../src/config/ConfigurationManager.js";
import * as writeToFileModule from "../../../src/tools/writeToFileTool.js";

// Mock dependencies
vi.mock("../../../src/services/index.js");
vi.mock("../../../src/config/ConfigurationManager.js");
vi.mock("../../../src/tools/writeToFileTool.js");
vi.mock("uuid", () => ({
  v4: () => "test-uuid-123",
}));

describe("mcpClientTool", () => {
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
  let mockMcpClientService: any;
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
  let mockConfigManager: any;

  beforeEach(() => {
    vi.clearAllMocks();

    // Setup mock McpClientService
    mockMcpClientService = {
      connect: vi.fn(),
      disconnect: vi.fn(),
      listTools: vi.fn(),
      callTool: vi.fn(),
      getServerInfo: vi.fn(),
    };

    // Setup mock ConfigurationManager
    mockConfigManager = {
      getMcpConfig: vi.fn().mockReturnValue({
        clientId: "default-client-id",
        connectionToken: "default-token",
      }),
    };

    vi.mocked(ConfigurationManager.getInstance).mockReturnValue(
      mockConfigManager
    );
  });

  describe("Tool Configuration", () => {
    it("should have correct name and description", () => {
      expect(mcpClientTool.name).toBe("mcp_client");
      expect(mcpClientTool.description).toContain(
        "Manages MCP (Model Context Protocol) client connections"
      );
    });

    it("should have valid input schema", () => {
      expect(mcpClientTool.inputSchema).toBeDefined();
      expect(mcpClientTool.inputSchema._def.discriminator).toBe("operation");
    });
  });

  describe("Connect Operation", () => {
    it("should handle stdio connection", async () => {
      const mockServerInfo = { name: "Test Server", version: "1.0.0" };
      mockMcpClientService.connect.mockResolvedValue("connection-123");
      mockMcpClientService.getServerInfo.mockResolvedValue(mockServerInfo);

      const args = {
        operation: "connect_stdio" as const,
        transport: "stdio" as const,
        command: "node",
        args: ["server.js"],
        clientId: "custom-client-id",
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(mockMcpClientService.connect).toHaveBeenCalledWith(
        "test-uuid-123",
        {
          type: "stdio",
          connectionToken: "default-token",
          stdioCommand: "node",
          stdioArgs: ["server.js"],
        }
      );

      expect(result.content[0].text).toBe(
        "Successfully connected to MCP server"
      );
      const resultData = JSON.parse(result.content[1].text);
      expect(resultData.connectionId).toBe("connection-123");
      expect(resultData.transport).toBe("stdio");
      expect(resultData.serverInfo).toEqual(mockServerInfo);
    });

    it("should handle SSE connection", async () => {
      const mockServerInfo = { name: "SSE Server", version: "2.0.0" };
      mockMcpClientService.connect.mockResolvedValue("sse-connection-456");
      mockMcpClientService.getServerInfo.mockResolvedValue(mockServerInfo);

      const args = {
        operation: "connect_sse" as const,
        transport: "sse" as const,
        url: "https://mcp-server.example.com/sse",
        connectionToken: "custom-token",
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(mockMcpClientService.connect).toHaveBeenCalledWith(
        "test-uuid-123",
        {
          type: "sse",
          connectionToken: "custom-token",
          sseUrl: "https://mcp-server.example.com/sse",
        }
      );

      const resultData = JSON.parse(result.content[1].text);
      expect(resultData.connectionId).toBe("sse-connection-456");
      expect(resultData.transport).toBe("sse");
    });
  });

  describe("Disconnect Operation", () => {
    it("should handle disconnection", async () => {
      mockMcpClientService.disconnect.mockResolvedValue(undefined);

      const args = {
        operation: "disconnect" as const,
        connectionId: "connection-123",
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(mockMcpClientService.disconnect).toHaveBeenCalledWith(
        "connection-123"
      );
      expect(result.content[0].text).toBe(
        "Successfully disconnected from MCP server"
      );

      const resultData = JSON.parse(result.content[1].text);
      expect(resultData.connectionId).toBe("connection-123");
      expect(resultData.status).toBe("disconnected");
    });
  });

  describe("List Tools Operation", () => {
    it("should list available tools", async () => {
      const mockTools = [
        { name: "tool1", description: "First tool" },
        { name: "tool2", description: "Second tool" },
      ];
      mockMcpClientService.listTools.mockResolvedValue(mockTools);

      const args = {
        operation: "list_tools" as const,
        connectionId: "connection-123",
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(mockMcpClientService.listTools).toHaveBeenCalledWith(
        "connection-123"
      );
      expect(result.content[0].text).toContain("Available tools on connection");

      const toolsData = JSON.parse(result.content[1].text);
      expect(toolsData).toEqual(mockTools);
    });
  });

  describe("Call Tool Operation", () => {
    it("should call tool and return result", async () => {
      const mockResult = { status: "success", data: "Tool executed" };
      mockMcpClientService.callTool.mockResolvedValue(mockResult);

      const args = {
        operation: "call_tool" as const,
        connectionId: "connection-123",
        toolName: "exampleTool",
        toolParameters: { param1: "value1" },
        overwriteFile: true,
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(mockMcpClientService.callTool).toHaveBeenCalledWith(
        "connection-123",
        "exampleTool",
        { param1: "value1" }
      );

      const resultData = JSON.parse(result.content[0].text);
      expect(resultData).toEqual(mockResult);
    });

    it("should write tool result to file when outputFilePath is provided", async () => {
      const mockResult = { status: "success", data: "Tool executed" };
      mockMcpClientService.callTool.mockResolvedValue(mockResult);

      vi.mocked(writeToFileModule.writeToFile.execute).mockResolvedValue({
        content: [{ type: "text", text: "File written" }],
      });

      const args = {
        operation: "call_tool" as const,
        connectionId: "connection-123",
        toolName: "exampleTool",
        toolParameters: {},
        outputFilePath: "/path/to/output.json",
        overwriteFile: true,
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(mockMcpClientService.callTool).toHaveBeenCalled();
      expect(writeToFileModule.writeToFile.execute).toHaveBeenCalledWith({
        filePath: "/path/to/output.json",
        content: JSON.stringify(mockResult, null, 2),
        overwriteIfExists: true,
      });

      expect(result.content[0].text).toContain(
        "Tool exampleTool executed successfully"
      );
      expect(result.content[0].text).toContain("/path/to/output.json");
    });

    it("should handle string results", async () => {
      mockMcpClientService.callTool.mockResolvedValue("Simple string result");

      const args = {
        operation: "call_tool" as const,
        connectionId: "connection-123",
        toolName: "stringTool",
        toolParameters: {},
        overwriteFile: true,
      };

      const result = await mcpClientTool.execute(args, mockMcpClientService);

      expect(result.content[0].text).toBe("Simple string result");
    });

    it("should handle tool call errors", async () => {
      mockMcpClientService.callTool.mockRejectedValue(
        new Error("Tool not found")
      );

      const args = {
        operation: "call_tool" as const,
        connectionId: "connection-123",
        toolName: "nonExistentTool",
        toolParameters: {},
        overwriteFile: true,
      };

      await expect(
        mcpClientTool.execute(args, mockMcpClientService)
      ).rejects.toThrow();
    });
  });

  describe("Error Handling", () => {
    it("should handle connection errors", async () => {
      mockMcpClientService.connect.mockRejectedValue(
        new Error("Connection failed")
      );

      const args = {
        operation: "connect_stdio" as const,
        transport: "stdio" as const,
        command: "invalid-command",
      };

      await expect(
        mcpClientTool.execute(args, mockMcpClientService)
      ).rejects.toThrow();
    });

    it("should handle unknown operation", async () => {
      const args = {
        operation: "unknown",
        connectionId: "test-connection",
        toolName: "test-tool",
        overwriteFile: true,
      } as const;

      await expect(
        // eslint-disable-next-line @typescript-eslint/no-explicit-any
        mcpClientTool.execute(args as any, mockMcpClientService)
      ).rejects.toThrow("Unknown operation");
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/unit/tools/geminiGenerateImageTool.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Test suite for geminiGenerateImageTool
 * Tests the tool integration with the fixed GeminiService.generateImage method
 */

// Using vitest globals - see vitest.config.ts globals: true
import type { GeminiService } from "../../../src/services/GeminiService.js";
import { geminiGenerateImageTool } from "../../../src/tools/geminiGenerateImageTool.js";
import type { ImageGenerationResult } from "../../../src/types/geminiServiceTypes.js";

describe("geminiGenerateImageTool", () => {
  let mockGeminiService: GeminiService;

  beforeEach(() => {
    mockGeminiService = {
      generateImage: vi.fn(),
    } as unknown as GeminiService;
  });

  describe("successful image generation", () => {
    it("should generate images with minimal parameters", async () => {
      const mockResult: ImageGenerationResult = {
        images: [
          {
            base64Data: "test-base64-data",
            mimeType: "image/png",
            width: 1024,
            height: 1024,
          },
        ],
      };

      (mockGeminiService.generateImage as any).mockResolvedValue(mockResult);

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "A beautiful landscape",
        },
        mockGeminiService
      );

      expect(mockGeminiService.generateImage).toHaveBeenCalledWith(
        "A beautiful landscape",
        undefined, // modelName
        undefined, // resolution
        undefined, // numberOfImages
        undefined, // safetySettings
        undefined, // negativePrompt
        undefined, // stylePreset
        undefined, // seed
        undefined, // styleStrength
        undefined, // preferQuality
        undefined // preferSpeed
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Generated 1"),
          },
          {
            type: "image",
            mimeType: "image/png",
            data: "test-base64-data",
          },
        ],
      });
    });

    it("should generate images with all parameters", async () => {
      const mockResult: ImageGenerationResult = {
        images: [
          {
            base64Data: "test-base64-data-1",
            mimeType: "image/png",
            width: 512,
            height: 512,
          },
          {
            base64Data: "test-base64-data-2",
            mimeType: "image/png",
            width: 512,
            height: 512,
          },
        ],
      };

      (mockGeminiService.generateImage as any).mockResolvedValue(mockResult);

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "A cyberpunk city",
          modelName: "imagen-3.0-generate-002",
          resolution: "512x512",
          numberOfImages: 2,
          negativePrompt: "blurry, low quality",
          stylePreset: "photographic",
          seed: 12345,
          styleStrength: 0.8,
          modelPreferences: {
            preferQuality: true,
            preferSpeed: false,
          },
        },
        mockGeminiService
      );

      expect(mockGeminiService.generateImage).toHaveBeenCalledWith(
        "A cyberpunk city",
        "imagen-3.0-generate-002",
        "512x512",
        2,
        undefined, // safetySettings
        "blurry, low quality",
        "photorealistic",
        12345,
        0.8,
        true,
        false
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Generated 2"),
          },
          {
            type: "image",
            mimeType: "image/png",
            data: "test-base64-data-1",
          },
          {
            type: "image",
            mimeType: "image/png",
            data: "test-base64-data-2",
          },
        ],
      });
    });

    it("should handle safety settings", async () => {
      const mockResult: ImageGenerationResult = {
        images: [
          {
            base64Data: "test-base64-data-safety",
            mimeType: "image/png",
            width: 1024,
            height: 1024,
          },
        ],
      };

      (mockGeminiService.generateImage as any).mockResolvedValue(mockResult);

      const safetySettings = [
        {
          category: "HARM_CATEGORY_DANGEROUS_CONTENT" as const,
          threshold: "BLOCK_LOW_AND_ABOVE" as const,
        },
      ];

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "A safe image",
          safetySettings,
        },
        mockGeminiService
      );

      expect(mockGeminiService.generateImage).toHaveBeenCalledWith(
        "A safe image",
        undefined, // modelName
        undefined, // resolution
        undefined, // numberOfImages
        safetySettings,
        undefined, // negativePrompt
        undefined, // stylePreset
        undefined, // seed
        undefined, // styleStrength
        undefined, // preferQuality
        undefined // preferSpeed
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Generated 1"),
          },
          {
            type: "image",
            mimeType: "image/png",
            data: "test-base64-data-safety",
          },
        ],
      });
    });
  });

  describe("error handling", () => {
    it("should handle content filter errors", async () => {
      const { GeminiContentFilterError } = await import(
        "../../../src/utils/geminiErrors.js"
      );

      (mockGeminiService.generateImage as any).mockRejectedValue(
        new GeminiContentFilterError("Content blocked by safety filters", [
          "HARM_CATEGORY_DANGEROUS_CONTENT",
        ])
      );

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "Inappropriate content",
        },
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Content blocked by safety filters"),
          },
        ],
        isError: true,
      });
    });

    it("should handle validation errors", async () => {
      const { GeminiValidationError } = await import(
        "../../../src/utils/geminiErrors.js"
      );

      (mockGeminiService.generateImage as any).mockRejectedValue(
        new GeminiValidationError("Invalid prompt", "prompt")
      );

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "",
        },
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Invalid prompt"),
          },
        ],
        isError: true,
      });
    });

    it("should handle model errors", async () => {
      const { GeminiModelError } = await import(
        "../../../src/utils/geminiErrors.js"
      );

      (mockGeminiService.generateImage as any).mockRejectedValue(
        new GeminiModelError("Model unavailable", "imagen-3.0-generate-002")
      );

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "A test image",
          modelName: "imagen-3.0-generate-002",
        },
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Model unavailable"),
          },
        ],
        isError: true,
      });
    });

    it("should handle generic errors", async () => {
      (mockGeminiService.generateImage as any).mockRejectedValue(
        new Error("Network error")
      );

      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "A test image",
        },
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("Network error"),
          },
        ],
        isError: true,
      });
    });
  });

  describe("parameter validation", () => {
    it("should validate prompt is required", async () => {
      const result = await geminiGenerateImageTool.execute(
        {} as any,
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("prompt"),
          },
        ],
        isError: true,
      });
    });

    it("should validate numberOfImages range", async () => {
      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "Test",
          numberOfImages: 10, // Exceeds maximum
        },
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("numberOfImages"),
          },
        ],
        isError: true,
      });
    });

    it("should validate resolution format", async () => {
      const result = await geminiGenerateImageTool.execute(
        {
          prompt: "Test",
          resolution: "800x600" as any, // Invalid resolution
        },
        mockGeminiService
      );

      expect(result).toEqual({
        content: [
          {
            type: "text",
            text: expect.stringContaining("resolution"),
          },
        ],
        isError: true,
      });
    });
  });
});

```

--------------------------------------------------------------------------------
/src/tools/geminiChatTool.ts:
--------------------------------------------------------------------------------

```typescript
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
import { CallToolResult } from "@modelcontextprotocol/sdk/types.js";
import { z } from "zod";
import {
  GEMINI_CHAT_TOOL_NAME,
  GEMINI_CHAT_TOOL_DESCRIPTION,
  GEMINI_CHAT_PARAMS,
} from "./geminiChatParams.js";
import { GeminiService } from "../services/index.js";
import { logger } from "../utils/index.js";
import { mapAnyErrorToMcpError } from "../utils/errors.js";
import { BlockedReason, FinishReason } from "@google/genai";
import type {
  Content,
  GenerationConfig,
  SafetySetting,
  Tool,
  ToolConfig,
  GenerateContentResponse,
} from "@google/genai";

// Define the type for the arguments object based on the Zod schema
type GeminiChatArgs = z.infer<z.ZodObject<typeof GEMINI_CHAT_PARAMS>>;

/**
 * Registers the gemini_chat tool with the MCP server.
 * This consolidated tool handles chat session management including starting sessions,
 * sending messages, and sending function results.
 *
 * @param server - The McpServer instance.
 * @param serviceInstance - An instance of the GeminiService.
 */
export const geminiChatTool = (
  server: McpServer,
  serviceInstance: GeminiService
): void => {
  /**
   * Processes the request for the gemini_chat tool.
   * @param args - The arguments object matching GEMINI_CHAT_PARAMS.
   * @returns The result content for MCP.
   */
  const processRequest = async (args: unknown): Promise<CallToolResult> => {
    const typedArgs = args as GeminiChatArgs;
    logger.debug(`Received ${GEMINI_CHAT_TOOL_NAME} request:`, {
      operation: typedArgs.operation,
      sessionId: typedArgs.sessionId,
      modelName: typedArgs.modelName,
    });

    try {
      // Validate required fields based on operation
      if (
        typedArgs.operation === "send_message" ||
        typedArgs.operation === "send_function_result"
      ) {
        if (!typedArgs.sessionId) {
          throw new Error(
            `sessionId is required for operation '${typedArgs.operation}'`
          );
        }
      }

      if (typedArgs.operation === "send_message" && !typedArgs.message) {
        throw new Error("message is required for operation 'send_message'");
      }

      if (
        typedArgs.operation === "send_function_result" &&
        !typedArgs.functionResponses
      ) {
        throw new Error(
          "functionResponses is required for operation 'send_function_result'"
        );
      }

      // Handle different operations
      switch (typedArgs.operation) {
        case "start": {
          // Start a new chat session
          const sessionId = serviceInstance.startChatSession({
            modelName: typedArgs.modelName,
            history: typedArgs.history as Content[] | undefined,
            generationConfig: typedArgs.generationConfig as
              | GenerationConfig
              | undefined,
            safetySettings: typedArgs.safetySettings as
              | SafetySetting[]
              | undefined,
            tools: typedArgs.tools as Tool[] | undefined,
            systemInstruction: typedArgs.systemInstruction,
            cachedContentName: typedArgs.cachedContentName,
          });

          logger.info(
            `Successfully started chat session ${sessionId} for model ${typedArgs.modelName || "default"}`
          );

          return {
            content: [
              {
                type: "text" as const,
                text: JSON.stringify({ sessionId }),
              },
            ],
          };
        }

        case "send_message": {
          // Send a message to an existing chat session
          const response: GenerateContentResponse =
            await serviceInstance.sendMessageToSession({
              sessionId: typedArgs.sessionId!,
              message: typedArgs.message!,
              generationConfig: typedArgs.generationConfig as
                | GenerationConfig
                | undefined,
              safetySettings: typedArgs.safetySettings as
                | SafetySetting[]
                | undefined,
              tools: typedArgs.tools as Tool[] | undefined,
              toolConfig: typedArgs.toolConfig as ToolConfig | undefined,
              cachedContentName: typedArgs.cachedContentName,
            });

          // Process the response
          return processGenerateContentResponse(response, typedArgs.sessionId!);
        }

        case "send_function_result": {
          // Send function results to an existing chat session
          // Note: The service expects a string, so we stringify the array of function responses
          const response: GenerateContentResponse =
            await serviceInstance.sendFunctionResultToSession({
              sessionId: typedArgs.sessionId!,
              functionResponse: JSON.stringify(typedArgs.functionResponses),
              functionCall: undefined, // Could be enhanced to pass original function call
            });

          // Process the response
          return processGenerateContentResponse(
            response,
            typedArgs.sessionId!,
            true
          );
        }

        default:
          throw new Error(`Invalid operation: ${typedArgs.operation}`);
      }
    } catch (error: unknown) {
      logger.error(`Error processing ${GEMINI_CHAT_TOOL_NAME}:`, error);
      throw mapAnyErrorToMcpError(error, GEMINI_CHAT_TOOL_NAME);
    }
  };

  /**
   * Helper function to process GenerateContentResponse into MCP format
   */
  function processGenerateContentResponse(
    response: GenerateContentResponse,
    sessionId: string,
    isFunctionResult: boolean = false
  ): CallToolResult {
    const context = isFunctionResult ? "after function result" : "";

    // Check for prompt safety blocks
    if (response.promptFeedback?.blockReason === BlockedReason.SAFETY) {
      logger.warn(
        `Gemini prompt blocked due to SAFETY for session ${sessionId} ${context}.`
      );
      return {
        content: [
          {
            type: "text",
            text: `Error: Prompt blocked due to safety settings ${context}. Reason: ${response.promptFeedback.blockReason}`,
          },
        ],
        isError: true,
      };
    }

    const firstCandidate = response?.candidates?.[0];

    // Check for candidate safety blocks or other non-STOP finish reasons
    if (
      firstCandidate?.finishReason &&
      firstCandidate.finishReason !== FinishReason.STOP &&
      firstCandidate.finishReason !== FinishReason.MAX_TOKENS
    ) {
      if (firstCandidate.finishReason === FinishReason.SAFETY) {
        logger.warn(
          `Gemini response stopped due to SAFETY for session ${sessionId} ${context}.`
        );
        return {
          content: [
            {
              type: "text",
              text: `Error: Response generation stopped due to safety settings ${context}. FinishReason: ${firstCandidate.finishReason}`,
            },
          ],
          isError: true,
        };
      }
      logger.warn(
        `Gemini response finished with reason ${firstCandidate.finishReason} for session ${sessionId} ${context}.`
      );
    }

    if (!firstCandidate) {
      logger.error(
        `No candidates returned by Gemini for session ${sessionId} ${context}.`
      );
      return {
        content: [
          {
            type: "text",
            text: `Error: No response candidates returned by the model ${context}.`,
          },
        ],
        isError: true,
      };
    }

    // Extract the content from the first candidate
    const content = firstCandidate.content;
    if (!content || !content.parts || content.parts.length === 0) {
      logger.error(
        `Empty content returned by Gemini for session ${sessionId} ${context}.`
      );
      return {
        content: [
          {
            type: "text",
            text: `Error: Empty response from the model ${context}.`,
          },
        ],
        isError: true,
      };
    }

    // Initialize result object
    let resultText = "";
    let functionCall = null;

    // Process each part in the response
    for (const part of content.parts) {
      if (part.text && typeof part.text === "string") {
        resultText += part.text;
      } else if (part.functionCall) {
        // Capture function call if present
        functionCall = part.functionCall;
        logger.debug(
          `Function call requested by model in session ${sessionId}: ${functionCall.name}`
        );
      }
    }

    // Handle function call responses
    if (functionCall) {
      return {
        content: [
          {
            type: "text",
            text: JSON.stringify({ functionCall }),
          },
        ],
      };
    }

    // Return text response
    if (resultText) {
      return {
        content: [
          {
            type: "text",
            text: resultText,
          },
        ],
      };
    }

    // Fallback error
    logger.error(
      `Unexpected response structure from Gemini for session ${sessionId} ${context}.`
    );
    return {
      content: [
        {
          type: "text",
          text: `Error: Unexpected response structure from the model ${context}.`,
        },
      ],
      isError: true,
    };
  }

  // Register the tool with the server
  server.tool(
    GEMINI_CHAT_TOOL_NAME,
    GEMINI_CHAT_TOOL_DESCRIPTION,
    GEMINI_CHAT_PARAMS,
    processRequest
  );

  logger.info(`Tool registered: ${GEMINI_CHAT_TOOL_NAME}`);
};

```

--------------------------------------------------------------------------------
/src/tools/schemas/CommonSchemas.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Common Schemas
 *
 * This file contains shared schema definitions used across multiple tools.
 * Centralize all reusable schema components here to avoid duplication.
 */
import { z } from "zod";

// --- Safety Settings ---

/**
 * Categories of harmful content
 */
export const HarmCategorySchema = z
  .enum([
    "HARM_CATEGORY_UNSPECIFIED",
    "HARM_CATEGORY_HATE_SPEECH",
    "HARM_CATEGORY_SEXUALLY_EXPLICIT",
    "HARM_CATEGORY_HARASSMENT",
    "HARM_CATEGORY_DANGEROUS_CONTENT",
  ])
  .describe("Category of harmful content to apply safety settings for.");

/**
 * Thresholds for blocking harmful content
 */
export const HarmBlockThresholdSchema = z
  .enum([
    "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
    "BLOCK_LOW_AND_ABOVE",
    "BLOCK_MEDIUM_AND_ABOVE",
    "BLOCK_ONLY_HIGH",
    "BLOCK_NONE",
  ])
  .describe(
    "Threshold for blocking harmful content. Higher thresholds block more content."
  );

/**
 * Safety setting for controlling content safety
 */
export const SafetySettingSchema = z
  .object({
    category: HarmCategorySchema,
    threshold: HarmBlockThresholdSchema,
  })
  .describe(
    "Setting for controlling content safety for a specific harm category."
  );

// --- Generation Configuration ---

/**
 * Configuration for controlling model reasoning
 */
export const ThinkingConfigSchema = z
  .object({
    thinkingBudget: z
      .number()
      .int()
      .min(0)
      .max(24576)
      .optional()
      .describe(
        "Controls the amount of reasoning the model performs. Range: 0-24576. Lower values provide faster responses, higher values improve complex reasoning."
      ),
    reasoningEffort: z
      .enum(["none", "low", "medium", "high"])
      .optional()
      .describe(
        "Simplified control over model reasoning. Options: none (0 tokens), low (1K tokens), medium (8K tokens), high (24K tokens)."
      ),
  })
  .optional()
  .describe("Optional configuration for controlling model reasoning.");

/**
 * Base generation configuration object (without optional wrapper)
 */
const BaseGenerationConfigSchema = z.object({
  temperature: z
    .number()
    .min(0)
    .max(1)
    .optional()
    .describe(
      "Controls randomness. Lower values (~0.2) make output more deterministic, higher values (~0.8) make it more creative. Default varies by model."
    ),
  topP: z
    .number()
    .min(0)
    .max(1)
    .optional()
    .describe(
      "Nucleus sampling parameter. The model considers only tokens with probability mass summing to this value. Default varies by model."
    ),
  topK: z
    .number()
    .int()
    .min(1)
    .optional()
    .describe(
      "Top-k sampling parameter. The model considers the k most probable tokens. Default varies by model."
    ),
  maxOutputTokens: z
    .number()
    .int()
    .min(1)
    .optional()
    .describe("Maximum number of tokens to generate in the response."),
  stopSequences: z
    .array(z.string())
    .optional()
    .describe("Sequences where the API will stop generating further tokens."),
  thinkingConfig: ThinkingConfigSchema,
});

/**
 * Configuration for controlling text generation
 */
export const GenerationConfigSchema =
  BaseGenerationConfigSchema.optional().describe(
    "Optional configuration for controlling the generation process."
  );

// --- Function Calling Schemas ---

/**
 * Supported parameter types for function declarations
 */
export const FunctionParameterTypeSchema = z
  .enum(["OBJECT", "STRING", "NUMBER", "BOOLEAN", "ARRAY", "INTEGER"])
  .describe("The data type of the function parameter.");

/**
 * Base function parameter schema without recursive elements
 */
// const BaseFunctionParameterSchema = z.object({
//   type: FunctionParameterTypeSchema,
//   description: z
//     .string()
//     .optional()
//     .describe("Description of the parameter's purpose."),
//   enum: z
//     .array(z.string())
//     .optional()
//     .describe("Allowed string values for an ENUM-like parameter."),
// });

/**
 * Inferred type for function parameter structure
 */
export type FunctionParameter = {
  type: "OBJECT" | "STRING" | "NUMBER" | "BOOLEAN" | "ARRAY" | "INTEGER";
  description?: string;
  enum?: string[];
  properties?: Record<string, FunctionParameter>;
  required?: string[];
  items?: FunctionParameter;
};

/**
 * Function parameter schema (supports recursive definitions)
 * Uses z.lazy() for proper recursive handling while maintaining type safety
 */
export const FunctionParameterSchema: z.ZodSchema<FunctionParameter> = z
  .lazy(() =>
    z.object({
      type: FunctionParameterTypeSchema,
      description: z
        .string()
        .optional()
        .describe("Description of the parameter's purpose."),
      enum: z
        .array(z.string())
        .optional()
        .describe("Allowed string values for an ENUM-like parameter."),
      properties: z.record(FunctionParameterSchema).optional(),
      required: z
        .array(z.string())
        .optional()
        .describe("List of required property names for OBJECT types."),
      items: FunctionParameterSchema.optional().describe(
        "Defines the schema for items if the parameter type is ARRAY."
      ),
    })
  )
  .describe(
    "Schema defining a single parameter for a function declaration, potentially recursive."
  ) as z.ZodSchema<FunctionParameter>;

/**
 * Type assertion to ensure schema produces correct types
 */
export type InferredFunctionParameter = z.infer<typeof FunctionParameterSchema>;

/**
 * Schema for parameter properties in function declarations
 */
export const FunctionParameterPropertiesSchema = z
  .record(FunctionParameterSchema)
  .describe("Defines nested properties if the parameter type is OBJECT.");

/**
 * Schema for a complete function declaration
 */
export const FunctionDeclarationSchema = z
  .object({
    name: z.string().min(1).describe("The name of the function to be called."),
    description: z
      .string()
      .min(1)
      .describe("A description of what the function does."),
    parameters: z
      .object({
        type: z
          .literal("OBJECT")
          .describe("The top-level parameters structure must be an OBJECT."),
        properties: FunctionParameterPropertiesSchema.describe(
          "Defines the parameters the function accepts."
        ),
        required: z
          .array(z.string())
          .optional()
          .describe("List of required parameter names at the top level."),
      })
      .describe("Schema defining the parameters the function accepts."),
  })
  .describe(
    "Declaration of a single function that the Gemini model can request to call."
  );

/**
 * Schema for tool configuration in function calling
 */
export const ToolConfigSchema = z
  .object({
    functionCallingConfig: z
      .object({
        mode: z
          .enum(["AUTO", "ANY", "NONE"])
          .optional()
          .describe("The function calling mode."),
        allowedFunctionNames: z
          .array(z.string())
          .optional()
          .describe("Optional list of function names allowed."),
      })
      .optional(),
  })
  .describe("Configuration for how tools should be used.");

// --- File Operation Schemas ---

/**
 * Common schema for file paths
 */
export const FilePathSchema = z
  .string()
  .min(1, "File path cannot be empty.")
  .describe("The path to the file. Must be within allowed directories.");

/**
 * Schema for file overwrite parameter
 */
export const FileOverwriteSchema = z
  .boolean()
  .optional()
  .default(false)
  .describe(
    "Optional. If true, will overwrite the file if it already exists. Defaults to false."
  );

/**
 * Common encoding options
 */
export const EncodingSchema = z
  .enum(["utf8", "base64"])
  .optional()
  .default("utf8")
  .describe("Encoding of the content. Defaults to utf8.");

// --- Other Common Schemas ---

export const ModelNameSchema = z
  .string()
  .min(1)
  .optional()
  .describe(
    "Optional. The name of the Gemini model to use. If omitted, the server will intelligently select the optimal model."
  );

export const ModelPreferencesSchema = z
  .object({
    preferQuality: z
      .boolean()
      .optional()
      .describe("Prefer high-quality models for better results"),
    preferSpeed: z
      .boolean()
      .optional()
      .describe("Prefer fast models for quicker responses"),
    preferCost: z
      .boolean()
      .optional()
      .describe("Prefer cost-effective models to minimize usage costs"),
    complexityHint: z
      .enum(["simple", "medium", "complex"])
      .optional()
      .describe(
        "Hint about the complexity of the task to help with model selection"
      ),
    taskType: z
      .enum([
        "text-generation",
        "image-generation",
        "code-review",
        "multimodal",
        "reasoning",
      ])
      .optional()
      .describe("Type of task to optimize model selection for"),
  })
  .optional()
  .describe("Optional preferences for intelligent model selection");

export const PromptSchema = z
  .string()
  .min(1)
  .describe("Required. The text prompt to send to the Gemini model.");

export const EnhancedGenerationConfigSchema = BaseGenerationConfigSchema.extend(
  {
    modelPreferences: ModelPreferencesSchema,
  }
)
  .optional()
  .describe(
    "Extended generation configuration with model selection preferences"
  );

export const ModelValidationSchema = z
  .object({
    modelName: ModelNameSchema,
    taskType: z
      .enum([
        "text-generation",
        "image-generation",
        "code-review",
        "multimodal",
        "reasoning",
      ])
      .optional(),
  })
  .describe("Validation schema for model and task compatibility");

```

--------------------------------------------------------------------------------
/tests/integration/multiModelIntegration.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
/// <reference types="../../vitest-globals.d.ts" />
// Using vitest globals - see vitest.config.ts globals: true
import { ConfigurationManager } from "../../src/config/ConfigurationManager.js";
import { ModelSelectionService } from "../../src/services/ModelSelectionService.js";

describe("Multi-Model Integration Tests", () => {
  let originalEnv: NodeJS.ProcessEnv;

  beforeEach(() => {
    originalEnv = { ...process.env };
    ConfigurationManager["instance"] = null;

    process.env.NODE_ENV = "test";
    process.env.GOOGLE_GEMINI_API_KEY = "test-api-key";
    process.env.MCP_SERVER_HOST = "localhost";
    process.env.MCP_SERVER_PORT = "8080";
    process.env.MCP_CONNECTION_TOKEN = "test-token";
    process.env.MCP_CLIENT_ID = "test-client";

    vi.clearAllMocks();
  });

  afterEach(() => {
    process.env = originalEnv;
    ConfigurationManager["instance"] = null;
  });

  describe("End-to-End Configuration Flow", () => {
    it("should properly configure and use multi-model setup", () => {
      process.env.GOOGLE_GEMINI_MODELS =
        '["gemini-2.5-pro-preview-05-06", "gemini-1.5-flash"]';
      process.env.GOOGLE_GEMINI_IMAGE_MODELS = '["imagen-3.0-generate-002"]';
      process.env.GOOGLE_GEMINI_CODE_MODELS =
        '["gemini-2.5-pro-preview-05-06"]';
      process.env.GOOGLE_GEMINI_DEFAULT_MODEL = "gemini-1.5-flash";
      process.env.GOOGLE_GEMINI_ROUTING_PREFER_QUALITY = "true";

      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();
      const selectionService = new ModelSelectionService(modelConfig);

      expect(modelConfig.textGeneration).toEqual([
        "gemini-2.5-pro-preview-05-06",
        "gemini-1.5-flash",
      ]);
      expect(modelConfig.imageGeneration).toEqual(["imagen-3.0-generate-002"]);
      expect(modelConfig.codeReview).toEqual(["gemini-2.5-pro-preview-05-06"]);
      expect(modelConfig.default).toBe("gemini-1.5-flash");
      expect(modelConfig.routing.preferQuality).toBe(true);

      expect(
        selectionService.isModelAvailable("gemini-2.5-pro-preview-05-06")
      ).toBe(true);
      expect(selectionService.isModelAvailable("imagen-3.0-generate-002")).toBe(
        true
      );
    });

    it("should handle model selection for different task types", async () => {
      process.env.GOOGLE_GEMINI_MODELS =
        '["gemini-2.5-pro-preview-05-06", "gemini-1.5-flash"]';
      process.env.GOOGLE_GEMINI_IMAGE_MODELS = '["imagen-3.0-generate-002"]';

      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();
      const selectionService = new ModelSelectionService(modelConfig);

      const textModel = await selectionService.selectOptimalModel({
        taskType: "text-generation",
        complexityLevel: "simple",
      });

      const imageModel = await selectionService.selectOptimalModel({
        taskType: "image-generation",
      });

      const codeModel = await selectionService.selectOptimalModel({
        taskType: "code-review",
        complexityLevel: "complex",
      });

      expect(["gemini-2.5-pro-preview-05-06", "gemini-1.5-flash"]).toContain(
        textModel
      );
      expect(imageModel).toBe("imagen-3.0-generate-002");
      expect(codeModel).toBe("gemini-2.5-pro-preview-05-06");
    });
  });

  describe("Backward Compatibility Integration", () => {
    it("should seamlessly migrate from single model configuration", () => {
      process.env.GOOGLE_GEMINI_MODEL = "gemini-1.5-pro";

      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();

      expect(modelConfig.textGeneration).toContain("gemini-1.5-pro");
      expect(modelConfig.default).toBe("gemini-1.5-pro");
    });

    it("should provide defaults when no models are specified", () => {
      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();

      expect(modelConfig.textGeneration.length).toBeGreaterThan(0);
      expect(modelConfig.imageGeneration.length).toBeGreaterThan(0);
      expect(modelConfig.codeReview.length).toBeGreaterThan(0);
      expect(modelConfig.default).toBeDefined();
    });
  });

  describe("Performance and Reliability", () => {
    it("should handle model selection performance metrics", async () => {
      process.env.GOOGLE_GEMINI_MODELS =
        '["gemini-2.5-pro-preview-05-06", "gemini-1.5-flash"]';

      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();
      const selectionService = new ModelSelectionService(modelConfig);

      selectionService.updatePerformanceMetrics("gemini-1.5-flash", 500, true);
      selectionService.updatePerformanceMetrics("gemini-1.5-flash", 450, true);
      selectionService.updatePerformanceMetrics("gemini-1.5-flash", 550, true);
      selectionService.updatePerformanceMetrics("gemini-1.5-flash", 480, true);
      selectionService.updatePerformanceMetrics("gemini-1.5-flash", 520, true);

      selectionService.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        2000,
        false
      );
      selectionService.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        1800,
        false
      );
      selectionService.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        2200,
        false
      );
      selectionService.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        1900,
        false
      );
      selectionService.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        2100,
        false
      );

      const selectedModel = await selectionService.selectOptimalModel({
        taskType: "text-generation",
        preferSpeed: true,
      });

      expect(selectedModel).toBe("gemini-1.5-flash");

      const performanceMetrics = selectionService.getPerformanceMetrics();
      expect(performanceMetrics.has("gemini-1.5-flash")).toBe(true);
      expect(performanceMetrics.has("gemini-2.5-pro-preview-05-06")).toBe(true);
    });

    it("should maintain selection history", async () => {
      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();
      const selectionService = new ModelSelectionService(modelConfig);

      await selectionService.selectOptimalModel({
        taskType: "text-generation",
      });
      await selectionService.selectOptimalModel({ taskType: "code-review" });
      await selectionService.selectOptimalModel({ taskType: "reasoning" });

      const history = selectionService.getSelectionHistory();
      expect(history.length).toBe(3);
      expect(history[0].criteria.taskType).toBe("text-generation");
      expect(history[1].criteria.taskType).toBe("code-review");
      expect(history[2].criteria.taskType).toBe("reasoning");
    });
  });

  describe("Error Handling and Edge Cases", () => {
    it("should handle invalid model configurations gracefully", () => {
      process.env.GOOGLE_GEMINI_MODELS = "invalid-json";
      process.env.GOOGLE_GEMINI_IMAGE_MODELS = '{"not": "array"}';

      expect(() => {
        const configManager = ConfigurationManager.getInstance();
        const modelConfig = configManager.getModelConfiguration();
        new ModelSelectionService(modelConfig);
      }).not.toThrow();
    });

    it("should fallback gracefully when no models match criteria", async () => {
      process.env.GOOGLE_GEMINI_MODELS = '["gemini-1.5-flash"]';

      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();
      const selectionService = new ModelSelectionService(modelConfig);

      const model = await selectionService.selectOptimalModel({
        taskType: "image-generation",
        fallbackModel: "fallback-model",
      });

      expect(model).toBe("fallback-model");
    });

    it("should handle empty model arrays", () => {
      process.env.GOOGLE_GEMINI_MODELS = "[]";

      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();

      expect(modelConfig.textGeneration).toEqual(["gemini-1.5-flash"]);
    });
  });

  describe("Configuration Validation", () => {
    it("should validate model capabilities consistency", () => {
      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();

      Object.entries(modelConfig.capabilities).forEach(
        ([_modelName, capabilities]) => {
          expect(typeof capabilities.textGeneration).toBe("boolean");
          expect(typeof capabilities.imageInput).toBe("boolean");
          expect(typeof capabilities.supportsFunctionCalling).toBe("boolean");
          expect(["none", "basic", "good", "excellent"]).toContain(
            capabilities.codeExecution
          );
          expect(["none", "basic", "good", "excellent"]).toContain(
            capabilities.complexReasoning
          );
          expect(["low", "medium", "high"]).toContain(capabilities.costTier);
          expect(["fast", "medium", "slow"]).toContain(capabilities.speedTier);
          expect(typeof capabilities.maxTokens).toBe("number");
          expect(typeof capabilities.contextWindow).toBe("number");
        }
      );
    });

    it("should ensure all configured models have capabilities defined", () => {
      const configManager = ConfigurationManager.getInstance();
      const modelConfig = configManager.getModelConfiguration();

      const allConfiguredModels = [
        ...modelConfig.textGeneration,
        ...modelConfig.imageGeneration,
        ...modelConfig.codeReview,
        ...modelConfig.complexReasoning,
      ];

      allConfiguredModels.forEach((modelName) => {
        expect(modelConfig.capabilities[modelName]).toBeDefined();
      });
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/utils/test-generators.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Test data generators for MCP Gemini Server tests
 *
 * This module provides functions to generate test data dynamically for various test scenarios,
 * making tests more flexible and comprehensive.
 */

import { randomBytes } from "node:crypto";
import { join } from "node:path";
import { writeFile } from "node:fs/promises";

/**
 * Options for generating test prompts
 */
export interface PromptGeneratorOptions {
  /** Minimum length of the prompt */
  minLength?: number;
  /** Maximum length of the prompt */
  maxLength?: number;
  /** Whether to include questions in the prompt */
  includeQuestions?: boolean;
  /** Topic area for the prompt content */
  topic?: "general" | "technical" | "creative" | "unsafe";
}

/**
 * Generate a random test prompt with specified characteristics
 *
 * @param options - Configuration for the generated prompt
 * @returns A random test prompt string
 */
export function generateTestPrompt(
  options: PromptGeneratorOptions = {}
): string {
  const {
    minLength = 10,
    maxLength = 100,
    includeQuestions = false,
    topic = "general",
  } = options;

  // Dictionary of starter phrases by topic
  const starters: Record<string, string[]> = {
    general: [
      "Tell me about",
      "I would like to know more about",
      "Can you explain",
      "Please provide information on",
      "What is",
    ],
    technical: [
      "Explain how to implement",
      "What is the best way to code",
      "Write a function for",
      "How does the algorithm for",
      "Describe the architecture of",
    ],
    creative: [
      "Write a story about",
      "Create a poem inspired by",
      "Imagine a world where",
      "Describe a character who",
      "Write a scene set in",
    ],
    unsafe: [
      "How can I hack into",
      "Tell me how to make a dangerous",
      "Write instructions for creating illegal",
      "What is the easiest way to harm",
      "Provide detailed steps for bypassing security",
    ],
  };

  // Dictionary of subjects by topic
  const subjects: Record<string, string[]> = {
    general: [
      "the history of jazz music",
      "renewable energy sources",
      "ancient Egyptian culture",
      "the process of photosynthesis",
      "climate change impacts",
      "modern architecture trends",
    ],
    technical: [
      "a REST API using Node.js",
      "machine learning algorithms",
      "secure password storage",
      "database indexing strategies",
      "efficient sorting algorithms",
      "containerization with Docker",
    ],
    creative: [
      "a detective solving an impossible case",
      "a journey through an enchanted forest",
      "robots developing consciousness",
      "the last day on Earth",
      "a conversation between the sun and moon",
    ],
    unsafe: [
      "computer systems without permission",
      "explosive devices using household items",
      "documents for identity theft",
      "people or animals effectively",
      "content filters and detection systems",
    ],
  };

  // Dictionary of follow-ups by topic
  const followUps: Record<string, string[]> = {
    general: [
      "Focus on recent developments.",
      "Include historical context.",
      "Explain it simply.",
      "Provide specific examples.",
      "Compare different perspectives.",
    ],
    technical: [
      "Include code examples.",
      "Focus on performance considerations.",
      "Explain security implications.",
      "Compare different approaches.",
      "Explain for someone with beginner knowledge.",
    ],
    creative: [
      "Make it emotionally moving.",
      "Use vivid descriptions.",
      "Include an unexpected twist.",
      "Focus on character development.",
      "Set a specific mood or atmosphere.",
    ],
    unsafe: [
      "Make it as detailed as possible.",
      "Focus on methods that are hard to detect.",
      "Ensure the instructions are complete.",
      "Include ways to avoid getting caught.",
      "Make it seem legitimate.",
    ],
  };

  // Select random elements
  const starter =
    starters[topic][Math.floor(Math.random() * starters[topic].length)];
  const subject =
    subjects[topic][Math.floor(Math.random() * subjects[topic].length)];
  const followUp =
    followUps[topic][Math.floor(Math.random() * followUps[topic].length)];

  // Build the prompt
  let prompt = `${starter} ${subject}. ${followUp}`;

  // Add a question if requested
  if (includeQuestions) {
    const questions = [
      "Why is this important?",
      "How does this work in practice?",
      "What are the main challenges?",
      "Can you provide more details?",
      "What should I know about this?",
    ];
    const question = questions[Math.floor(Math.random() * questions.length)];
    prompt += ` ${question}`;
  }

  // Adjust length if needed
  if (prompt.length < minLength) {
    prompt += ` Please provide a comprehensive explanation with at least ${minLength - prompt.length} more characters.`;
  }

  if (prompt.length > maxLength) {
    prompt = prompt.substring(0, maxLength - 1) + ".";
  }

  return prompt;
}

/**
 * Generate a temporary test file with random content
 *
 * @param fileName - Name of the file (without path)
 * @param sizeInKb - Size of the file in kilobytes
 * @param mimeType - MIME type of the file (determines content type)
 * @param directory - Directory to create the file in
 * @returns Promise resolving to the full path of the created file
 */
export async function generateTestFile(
  fileName: string,
  sizeInKb: number = 10,
  mimeType: string = "text/plain",
  directory: string = "resources"
): Promise<string> {
  // Create random data based on the requested size
  const sizeInBytes = sizeInKb * 1024;
  let content: Buffer;

  if (mimeType === "text/plain") {
    // For text files, generate readable text
    const chunk = "This is test content for the MCP Gemini Server test suite. ";
    let text = "";
    while (text.length < sizeInBytes) {
      text += chunk;
    }
    content = Buffer.from(text.substring(0, sizeInBytes));
  } else {
    // For other files, generate random bytes
    content = randomBytes(sizeInBytes);
  }

  // Determine the full path
  const fullPath = join(process.cwd(), "tests", directory, fileName);

  // Write the file
  await writeFile(fullPath, content);

  return fullPath;
}

/**
 * Generate a test content array for chat or content generation
 *
 * @param messageCount - Number of messages to include
 * @param includeImages - Whether to include image parts
 * @returns An array of message objects
 */
export function generateTestContentArray(
  messageCount: number = 3,
  includeImages: boolean = false
): Array<{
  role: string;
  parts: Array<{
    text?: string;
    inline_data?: { mime_type: string; data: string };
  }>;
}> {
  const contents = [];

  for (let i = 0; i < messageCount; i++) {
    const isUserMessage = i % 2 === 0;
    const role = isUserMessage ? "user" : "model";

    const parts = [];

    // Always add a text part
    parts.push({
      text: generateTestPrompt({ minLength: 20, maxLength: 100 }),
    });

    // Optionally add an image part for user messages
    if (includeImages && isUserMessage && Math.random() > 0.5) {
      parts.push({
        inline_data: {
          mime_type: "image/jpeg",
          data: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==",
        },
      });
    }

    contents.push({
      role,
      parts,
    });
  }

  return contents;
}

/**
 * Generate mock function call data for testing
 *
 * @param functionName - Name of the function to call
 * @param args - Arguments to pass to the function
 * @returns Function call object
 */
export function generateFunctionCall(
  functionName: string,
  args: Record<string, unknown> = {}
): { name: string; args: Record<string, unknown> } {
  return {
    name: functionName,
    args,
  };
}

/**
 * Generate mock bounding box data for object detection tests
 *
 * @param objectCount - Number of objects to generate
 * @returns Array of objects with bounding boxes
 */
export function generateBoundingBoxes(objectCount: number = 3): Array<{
  label: string;
  boundingBox: { xMin: number; yMin: number; xMax: number; yMax: number };
  confidence: number;
}> {
  const commonObjects = [
    "dog",
    "cat",
    "person",
    "car",
    "chair",
    "table",
    "book",
    "bottle",
    "cup",
    "laptop",
    "phone",
    "plant",
    "bird",
  ];

  const result = [];

  for (let i = 0; i < objectCount; i++) {
    // Select a random object
    const label =
      commonObjects[Math.floor(Math.random() * commonObjects.length)];

    // Generate a random bounding box (normalized to 0-1000 range)
    const xMin = Math.floor(Math.random() * 800);
    const yMin = Math.floor(Math.random() * 800);
    const width = Math.floor(Math.random() * 200) + 50;
    const height = Math.floor(Math.random() * 200) + 50;

    result.push({
      label,
      boundingBox: {
        xMin,
        yMin,
        xMax: Math.min(xMin + width, 1000),
        yMax: Math.min(yMin + height, 1000),
      },
      confidence: Math.random() * 0.3 + 0.7, // Random confidence between 0.7 and 1.0
    });
  }

  return result;
}

/**
 * Generate a small demo image as base64 string
 *
 * @param type - Type of image to generate (simple patterns)
 * @returns Base64 encoded image data
 */
export function generateBase64Image(
  type: "pixel" | "gradient" | "checkerboard" = "pixel"
): string {
  // These are tiny, valid images in base64 format
  // They don't look like much but are valid for testing
  const images = {
    // 1x1 pixel transparent PNG
    pixel:
      "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mP8z8BQDwAEhQGAhKmMIQAAAABJRU5ErkJggg==",
    // 2x2 gradient PNG
    gradient:
      "iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAYAAABytg0kAAAAFElEQVQIW2P8z8Dwn4EIwMDAwMAAACbKAxI3gV+CAAAAAElFTkSuQmCC",
    // 4x4 checkerboard PNG
    checkerboard:
      "iVBORw0KGgoAAAANSUhEUgAAAAQAAAAECAYAAACp8Z5+AAAAG0lEQVQIW2NkYGD4z8DAwMgABUwM0QCQHiYGAFULAgVoHvmSAAAAAElFTkSuQmCC",
  };

  return images[type];
}

```

--------------------------------------------------------------------------------
/tests/unit/services/gemini/ThinkingBudget.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { GeminiContentService } from "../../../../src/services/gemini/GeminiContentService.js";
import { GeminiChatService } from "../../../../src/services/gemini/GeminiChatService.js";
import { GenerateContentResponse } from "@google/genai";
import { FinishReason } from "../../../../src/types/googleGenAITypes.js";

// Create a partial type for mocking GenerateContentResponse
type PartialGenerateContentResponse = Partial<GenerateContentResponse>;

// Define extended generation config type for tests
interface ExtendedGenerationConfig {
  temperature?: number;
  thinkingConfig?: {
    thinkingBudget?: number;
    reasoningEffort?: "none" | "low" | "medium" | "high";
  };
}

describe("Thinking Budget Feature", () => {
  // Create a properly typed mock requestConfig for assertions
  interface MockRequestConfig {
    thinkingConfig?: {
      thinkingBudget?: number;
    };
  }

  // Mock GoogleGenAI
  const mockGenerateContentMethod = vi.fn((_config?: MockRequestConfig) => ({
    text: "Mock response from generateContent",
  }));

  const mockGenAI = {
    models: {
      generateContent: mockGenerateContentMethod,
      generateContentStream: vi.fn(async function* () {
        yield { text: "Mock response from generateContentStream" };
      }),
    },
  };

  // Reset mocks before each test
  beforeEach(() => {
    vi.clearAllMocks();
  });

  describe("GeminiContentService", () => {
    it("should apply thinking budget from generationConfig", async () => {
      // Arrange
      const service = new GeminiContentService(
        mockGenAI as any,
        "gemini-1.5-pro"
      );

      // Act
      await service.generateContent({
        prompt: "Test prompt",
        generationConfig: {
          temperature: 0.7,
          thinkingConfig: {
            thinkingBudget: 5000,
          },
        } as ExtendedGenerationConfig,
      });

      // Assert
      expect(mockGenerateContentMethod).toHaveBeenCalledTimes(1);
      // Get mock arguments safely with null checks
      const args = mockGenerateContentMethod.mock.calls[0];
      expect(args).toBeTruthy();

      const requestConfig = args[0];
      expect(requestConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(5000);
    });

    it("should map reasoningEffort to thinkingBudget values", async () => {
      // Arrange
      const service = new GeminiContentService(
        mockGenAI as any,
        "gemini-1.5-pro"
      );

      // Test different reasoning effort values
      const testCases = [
        { reasoningEffort: "none", expectedBudget: 0 },
        { reasoningEffort: "low", expectedBudget: 1024 },
        { reasoningEffort: "medium", expectedBudget: 8192 },
        { reasoningEffort: "high", expectedBudget: 24576 },
      ];

      for (const testCase of testCases) {
        vi.clearAllMocks();

        // Act
        await service.generateContent({
          prompt: "Test prompt",
          generationConfig: {
            thinkingConfig: {
              reasoningEffort: testCase.reasoningEffort as
                | "none"
                | "low"
                | "medium"
                | "high",
            },
          } as ExtendedGenerationConfig,
        });

        // Assert
        expect(mockGenerateContentMethod).toHaveBeenCalledTimes(1);
        // Get mock arguments safely with null checks
        const args = mockGenerateContentMethod.mock.calls[0];
        expect(args).toBeTruthy();

        const requestConfig = args[0];
        expect(requestConfig).toBeTruthy();
        expect(requestConfig?.thinkingConfig).toBeTruthy();
        expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(
          testCase.expectedBudget
        );
      }
    });

    it("should apply default thinking budget when provided", async () => {
      // Arrange
      const defaultThinkingBudget = 3000;
      const service = new GeminiContentService(
        mockGenAI as any,
        "gemini-1.5-pro",
        defaultThinkingBudget
      );

      // Act
      await service.generateContent({
        prompt: "Test prompt",
      });

      // Assert
      expect(mockGenerateContentMethod).toHaveBeenCalledTimes(1);
      // Get mock arguments safely with null checks
      const args = mockGenerateContentMethod.mock.calls[0];
      expect(args).toBeTruthy();

      const requestConfig = args[0];
      expect(requestConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(
        defaultThinkingBudget
      );
    });

    it("should prioritize generationConfig thinking budget over default", async () => {
      // Arrange
      const defaultThinkingBudget = 3000;
      const configThinkingBudget = 8000;
      const service = new GeminiContentService(
        mockGenAI as any,
        "gemini-1.5-pro",
        defaultThinkingBudget
      );

      // Act
      await service.generateContent({
        prompt: "Test prompt",
        generationConfig: {
          thinkingConfig: {
            thinkingBudget: configThinkingBudget,
          },
        } as ExtendedGenerationConfig,
      });

      // Assert
      expect(mockGenerateContentMethod).toHaveBeenCalledTimes(1);
      // Get mock arguments safely with null checks
      const args = mockGenerateContentMethod.mock.calls[0];
      expect(args).toBeTruthy();

      const requestConfig = args[0];
      expect(requestConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(
        configThinkingBudget
      );
    });
  });

  describe("GeminiChatService", () => {
    // Mock for chat service with proper typing
    const mockChatGenerateContentMethod = vi.fn(
      (
        _config?: MockRequestConfig
      ): Promise<PartialGenerateContentResponse> => {
        const response: PartialGenerateContentResponse = {
          candidates: [
            {
              content: {
                role: "model",
                parts: [{ text: "Mock chat response" }],
              },
              finishReason: FinishReason.STOP,
            },
          ],
          promptFeedback: {},
        };

        // Define the text property as a getter function
        Object.defineProperty(response, "text", {
          get: function () {
            return "Mock chat response";
          },
        });

        return Promise.resolve(response);
      }
    );

    const mockChatGenAI = {
      models: {
        generateContent: mockChatGenerateContentMethod,
      },
    };

    beforeEach(() => {
      vi.clearAllMocks();
    });

    it("should apply thinking budget to chat session", async () => {
      // Arrange
      const chatService = new GeminiChatService(
        mockChatGenAI as any,
        "gemini-1.5-pro"
      );

      // Act
      const sessionId = chatService.startChatSession({
        generationConfig: {
          temperature: 0.7,
          thinkingConfig: {
            thinkingBudget: 6000,
          },
        } as ExtendedGenerationConfig,
      });

      await chatService.sendMessageToSession({
        sessionId,
        message: "Hello",
      });

      // Assert
      expect(mockChatGenerateContentMethod).toHaveBeenCalledTimes(1);
      // Get mock arguments safely with null checks
      const args = mockChatGenerateContentMethod.mock.calls[0];
      expect(args).toBeTruthy();

      const requestConfig = args[0];
      expect(requestConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(6000);
    });

    it("should map reasoningEffort to thinkingBudget in chat session", async () => {
      // Arrange
      const chatService = new GeminiChatService(
        mockChatGenAI as any,
        "gemini-1.5-pro"
      );

      // Test different reasoning effort values
      const testCases = [
        { reasoningEffort: "none", expectedBudget: 0 },
        { reasoningEffort: "low", expectedBudget: 1024 },
        { reasoningEffort: "medium", expectedBudget: 8192 },
        { reasoningEffort: "high", expectedBudget: 24576 },
      ];

      for (const testCase of testCases) {
        vi.clearAllMocks();

        // Act
        const sessionId = chatService.startChatSession({
          generationConfig: {
            thinkingConfig: {
              reasoningEffort: testCase.reasoningEffort as
                | "none"
                | "low"
                | "medium"
                | "high",
            },
          } as ExtendedGenerationConfig,
        });

        await chatService.sendMessageToSession({
          sessionId,
          message: "Hello",
        });

        // Assert
        expect(mockChatGenerateContentMethod).toHaveBeenCalledTimes(1);
        // Get mock arguments safely with null checks
        const args = mockChatGenerateContentMethod.mock.calls[0];
        expect(args).toBeTruthy();

        const requestConfig = args[0];
        expect(requestConfig).toBeTruthy();
        expect(requestConfig?.thinkingConfig).toBeTruthy();
        expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(
          testCase.expectedBudget
        );
      }
    });

    it("should override session thinking budget with message thinking budget", async () => {
      // Arrange
      const chatService = new GeminiChatService(
        mockChatGenAI as any,
        "gemini-1.5-pro"
      );

      // Act
      const sessionId = chatService.startChatSession({
        generationConfig: {
          thinkingConfig: {
            thinkingBudget: 3000,
          },
        } as ExtendedGenerationConfig,
      });

      await chatService.sendMessageToSession({
        sessionId,
        message: "Hello",
        generationConfig: {
          thinkingConfig: {
            thinkingBudget: 8000,
          },
        } as ExtendedGenerationConfig,
      });

      // Assert
      expect(mockChatGenerateContentMethod).toHaveBeenCalledTimes(1);
      // Get mock arguments safely with null checks
      const args = mockChatGenerateContentMethod.mock.calls[0];
      expect(args).toBeTruthy();

      const requestConfig = args[0];
      expect(requestConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig).toBeTruthy();
      expect(requestConfig?.thinkingConfig?.thinkingBudget).toBe(8000);
    });
  });
});

```

--------------------------------------------------------------------------------
/src/tools/geminiChatParams.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from "zod";
import {
  ModelNameSchema,
  ModelPreferencesSchema,
} from "./schemas/CommonSchemas.js";

// Tool Name
export const GEMINI_CHAT_TOOL_NAME = "gemini_chat";

// Tool Description
export const GEMINI_CHAT_TOOL_DESCRIPTION = `
Manages stateful chat sessions with Google Gemini models. This consolidated tool supports three operations:
- start: Initiates a new chat session with optional history and configuration
- send_message: Sends a text message to an existing chat session
- send_function_result: Sends function execution results back to a chat session
Each operation returns appropriate responses including session IDs, model responses, or function call requests.
`;

// Operation enum for chat actions
export const chatOperationSchema = z
  .enum(["start", "send_message", "send_function_result"])
  .describe("The chat operation to perform");

// Zod Schema for thinking configuration (reused from content generation)
export const thinkingConfigSchema = z
  .object({
    thinkingBudget: z
      .number()
      .int()
      .min(0)
      .max(24576)
      .optional()
      .describe(
        "Controls the amount of reasoning the model performs. Range: 0-24576. Lower values provide faster responses, higher values improve complex reasoning."
      ),
    reasoningEffort: z
      .enum(["none", "low", "medium", "high"])
      .optional()
      .describe(
        "Simplified control over model reasoning. Options: none (0 tokens), low (1K tokens), medium (8K tokens), high (24K tokens)."
      ),
  })
  .optional()
  .describe("Optional configuration for controlling model reasoning.");

// Generation config schema
export const generationConfigSchema = z
  .object({
    temperature: z
      .number()
      .min(0)
      .max(1)
      .optional()
      .describe(
        "Controls randomness. Lower values (~0.2) make output more deterministic, higher values (~0.8) make it more creative. Default varies by model."
      ),
    topP: z
      .number()
      .min(0)
      .max(1)
      .optional()
      .describe(
        "Nucleus sampling parameter. The model considers only tokens with probability mass summing to this value. Default varies by model."
      ),
    topK: z
      .number()
      .int()
      .min(1)
      .optional()
      .describe(
        "Top-k sampling parameter. The model considers the k most probable tokens. Default varies by model."
      ),
    maxOutputTokens: z
      .number()
      .int()
      .min(1)
      .optional()
      .describe("Maximum number of tokens to generate in the response."),
    stopSequences: z
      .array(z.string())
      .optional()
      .describe("Sequences where the API will stop generating further tokens."),
    thinkingConfig: thinkingConfigSchema,
  })
  .optional()
  .describe("Optional configuration for controlling the generation process.");

// Safety setting schemas
export const harmCategorySchema = z
  .enum([
    "HARM_CATEGORY_UNSPECIFIED",
    "HARM_CATEGORY_HATE_SPEECH",
    "HARM_CATEGORY_SEXUALLY_EXPLICIT",
    "HARM_CATEGORY_HARASSMENT",
    "HARM_CATEGORY_DANGEROUS_CONTENT",
  ])
  .describe("Category of harmful content to apply safety settings for.");

export const harmBlockThresholdSchema = z
  .enum([
    "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
    "BLOCK_LOW_AND_ABOVE",
    "BLOCK_MEDIUM_AND_ABOVE",
    "BLOCK_ONLY_HIGH",
    "BLOCK_NONE",
  ])
  .describe(
    "Threshold for blocking harmful content. Higher thresholds block more content."
  );

export const safetySettingSchema = z
  .object({
    category: harmCategorySchema,
    threshold: harmBlockThresholdSchema,
  })
  .describe(
    "Setting for controlling content safety for a specific harm category."
  );

// History schemas for chat initialization
const historyPartSchema = z
  .object({
    text: z.string().describe("Text content of the part."),
    // Note: Could add other part types like inlineData, functionCall, functionResponse later
  })
  .describe(
    "A part of a historical message, primarily text for initialization."
  );

const historyContentSchema = z
  .object({
    role: z
      .enum(["user", "model"])
      .describe(
        "The role of the entity that generated this content (user or model)."
      ),
    parts: z
      .array(historyPartSchema)
      .min(1)
      .describe("An array of Parts making up the message content."),
  })
  .describe("A single message turn in the conversation history.");

// Function declaration schemas (for tools)
const functionParameterTypeSchema = z
  .enum(["OBJECT", "STRING", "NUMBER", "BOOLEAN", "ARRAY", "INTEGER"])
  .describe("The data type of the function parameter.");

const baseFunctionParameterSchema = z.object({
  type: functionParameterTypeSchema,
  description: z
    .string()
    .optional()
    .describe("Description of the parameter's purpose."),
  enum: z
    .array(z.string())
    .optional()
    .describe("Allowed string values for an ENUM-like parameter."),
});

type FunctionParameterSchemaType = z.infer<
  typeof baseFunctionParameterSchema
> & {
  properties?: { [key: string]: FunctionParameterSchemaType };
  required?: string[];
  items?: FunctionParameterSchemaType;
};

const functionParameterSchema: z.ZodType<FunctionParameterSchemaType> =
  baseFunctionParameterSchema
    .extend({
      properties: z.lazy(() => z.record(functionParameterSchema).optional()),
      required: z.lazy(() =>
        z
          .array(z.string())
          .optional()
          .describe("List of required property names for OBJECT types.")
      ),
      items: z.lazy(() =>
        functionParameterSchema
          .optional()
          .describe(
            "Defines the schema for items if the parameter type is ARRAY."
          )
      ),
    })
    .describe(
      "Schema defining a single parameter for a function declaration, potentially recursive."
    );

const functionParameterPropertiesSchema = z
  .record(functionParameterSchema)
  .describe("Defines nested properties if the parameter type is OBJECT.");

export const functionDeclarationSchema = z
  .object({
    name: z.string().min(1).describe("The name of the function to be called."),
    description: z
      .string()
      .min(1)
      .describe("A description of what the function does."),
    parameters: z
      .object({
        type: z
          .literal("OBJECT")
          .describe("The top-level parameters structure must be an OBJECT."),
        properties: functionParameterPropertiesSchema.describe(
          "Defines the parameters the function accepts."
        ),
        required: z
          .array(z.string())
          .optional()
          .describe("List of required parameter names at the top level."),
      })
      .describe("Schema defining the parameters the function accepts."),
  })
  .describe(
    "Declaration of a single function that the Gemini model can request to call."
  );

// Tool schema for chat
const toolSchema = z
  .object({
    functionDeclarations: z
      .array(functionDeclarationSchema)
      .optional()
      .describe("List of function declarations for this tool."),
  })
  .describe("Represents a tool definition containing function declarations.");

// Tool config schema
const functionCallingConfigSchema = z
  .object({
    mode: z
      .enum(["AUTO", "ANY", "NONE"])
      .optional()
      .describe("The function calling mode."),
    allowedFunctionNames: z
      .array(z.string())
      .optional()
      .describe("Optional list of function names allowed."),
  })
  .optional();

const toolConfigSchema = z
  .object({
    functionCallingConfig: functionCallingConfigSchema,
  })
  .optional()
  .describe(
    "Optional. Per-request tool configuration, e.g., to force function calling mode."
  );

// Function response schema for send_function_result
const functionResponseInputSchema = z
  .object({
    name: z
      .string()
      .min(1)
      .describe(
        "Required. The name of the function that was called by the model."
      ),
    response: z
      .record(z.unknown())
      .describe(
        "Required. The JSON object result returned by the function execution."
      ),
  })
  .describe(
    "Represents the result of a single function execution to be sent back to the model."
  );

// System instruction schema
const systemInstructionSchema = z
  .object({
    parts: z.array(
      z.object({
        text: z.string(),
      })
    ),
  })
  .optional()
  .describe(
    "Optional. A system instruction to guide the model's behavior for the entire session."
  );

// Main parameters schema with conditional fields based on operation
export const GEMINI_CHAT_PARAMS = {
  operation: chatOperationSchema,

  // Common fields used across operations
  sessionId: z
    .string()
    .uuid()
    .optional()
    .describe(
      "Required for 'send_message' and 'send_function_result'. The unique identifier of the chat session."
    ),

  // Fields for 'start' operation
  modelName: ModelNameSchema.optional().describe(
    "Optional for 'start'. The name of the Gemini model to use for this chat session. If omitted, uses server default."
  ),
  history: z
    .array(historyContentSchema)
    .optional()
    .describe(
      "Optional for 'start'. Initial conversation turns to seed the chat session. Must alternate between 'user' and 'model' roles."
    ),
  systemInstruction: systemInstructionSchema,

  // Fields for 'send_message' operation
  message: z
    .string()
    .min(1)
    .optional()
    .describe(
      "Required for 'send_message'. The text message content to send to the model."
    ),

  // Fields for 'send_function_result' operation
  functionResponses: z
    .array(functionResponseInputSchema)
    .min(1)
    .optional()
    .describe(
      "Required for 'send_function_result'. Array containing the results of function calls executed by the client. Note: This array is JSON.stringify'd before being passed to the Gemini API."
    ),

  // Shared optional configuration fields
  tools: z
    .array(toolSchema)
    .optional()
    .describe(
      "Optional. Tools (function declarations) the model may use. For 'start', sets session-wide tools. For 'send_message', overrides session tools for this turn."
    ),
  toolConfig: toolConfigSchema,
  generationConfig: generationConfigSchema,
  safetySettings: z
    .array(safetySettingSchema)
    .optional()
    .describe(
      "Optional. Safety settings to apply. For 'start', sets session-wide settings. For other operations, overrides session settings for this turn."
    ),
  cachedContentName: z
    .string()
    .min(1)
    .optional()
    .describe(
      "Optional. Identifier for cached content in format 'cachedContents/...' to use with this operation."
    ),
  modelPreferences: ModelPreferencesSchema.optional(),
};

// Type helper
export type GeminiChatArgs = z.infer<z.ZodObject<typeof GEMINI_CHAT_PARAMS>>;

```

--------------------------------------------------------------------------------
/src/services/gemini/GeminiCacheService.ts:
--------------------------------------------------------------------------------

```typescript
import { GoogleGenAI, CachedContent } from "@google/genai";
import {
  GeminiApiError,
  GeminiResourceNotFoundError,
  GeminiInvalidParameterError,
} from "../../utils/errors.js";
import { logger } from "../../utils/logger.js";
import { CachedContentMetadata } from "../../types/index.js";
import { Content, Tool, ToolConfig, CacheId } from "./GeminiTypes.js";

/**
 * Service for handling cache-related operations for the Gemini service.
 * Manages creation, listing, retrieval, and manipulation of cached content.
 */
export class GeminiCacheService {
  private genAI: GoogleGenAI;

  /**
   * Creates a new instance of the GeminiCacheService.
   * @param genAI The GoogleGenAI instance to use for API calls
   */
  constructor(genAI: GoogleGenAI) {
    this.genAI = genAI;
  }

  /**
   * Creates a cached content entry in the Gemini API.
   *
   * @param modelName The model to use for this cached content
   * @param contents The conversation contents to cache
   * @param options Additional options for the cache (displayName, systemInstruction, ttl, tools, toolConfig)
   * @returns Promise resolving to the cached content metadata
   */
  public async createCache(
    modelName: string,
    contents: Content[],
    options?: {
      displayName?: string;
      systemInstruction?: Content | string;
      ttl?: string;
      tools?: Tool[];
      toolConfig?: ToolConfig;
    }
  ): Promise<CachedContentMetadata> {
    try {
      logger.debug(`Creating cache for model: ${modelName}`);

      // Process systemInstruction if it's a string
      let formattedSystemInstruction: Content | undefined;
      if (options?.systemInstruction) {
        if (typeof options.systemInstruction === "string") {
          formattedSystemInstruction = {
            parts: [{ text: options.systemInstruction }],
          };
        } else {
          formattedSystemInstruction = options.systemInstruction;
        }
      }

      // Create config object for the request
      const cacheConfig = {
        contents,
        displayName: options?.displayName,
        systemInstruction: formattedSystemInstruction,
        ttl: options?.ttl,
        tools: options?.tools,
        toolConfig: options?.toolConfig,
      };

      // Create the cache entry
      const cacheData = await this.genAI.caches.create({
        model: modelName,
        config: cacheConfig,
      });

      // Return the mapped metadata
      return this.mapSdkCacheToMetadata(cacheData);
    } catch (error: unknown) {
      logger.error(
        `Error creating cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
      throw new GeminiApiError(
        `Failed to create cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
    }
  }

  /**
   * Lists cached content entries in the Gemini API.
   *
   * @param pageSize Optional maximum number of entries to return
   * @param pageToken Optional token for pagination
   * @returns Promise resolving to an object with caches array and optional nextPageToken
   */
  public async listCaches(
    pageSize?: number,
    pageToken?: string
  ): Promise<{ caches: CachedContentMetadata[]; nextPageToken?: string }> {
    try {
      logger.debug(
        `Listing caches with pageSize: ${pageSize}, pageToken: ${pageToken}`
      );

      // Prepare list parameters
      const listParams: Record<string, number | string> = {};

      if (pageSize !== undefined) {
        listParams.pageSize = pageSize;
      }

      if (pageToken) {
        listParams.pageToken = pageToken;
      }

      // Call the caches.list method
      const response = await this.genAI.caches.list(listParams);

      const caches: CachedContentMetadata[] = [];
      let nextPageToken: string | undefined;

      // Handle the response in a more generic way to accommodate different API versions
      if (response && typeof response === "object") {
        if ("caches" in response && Array.isArray(response.caches)) {
          // Standard response format - cast to our TypeScript interface for validation
          for (const cache of response.caches) {
            caches.push(this.mapSdkCacheToMetadata(cache));
          }
          // Use optional chaining to safely access nextPageToken
          nextPageToken = (
            response as {
              caches: Record<string, unknown>[];
              nextPageToken?: string;
            }
          ).nextPageToken;
        } else if ("page" in response && response.page) {
          // Pager-like object in v0.10.0
          const cacheList = Array.from(response.page);
          for (const cache of cacheList) {
            caches.push(this.mapSdkCacheToMetadata(cache));
          }

          // Check if there's a next page
          const hasNextPage =
            typeof response === "object" &&
            "hasNextPage" in response &&
            typeof response.hasNextPage === "function"
              ? response.hasNextPage()
              : false;

          if (hasNextPage) {
            nextPageToken = "next_page_available";
          }
        } else if (Array.isArray(response)) {
          // Direct array response
          for (const cache of response) {
            caches.push(this.mapSdkCacheToMetadata(cache));
          }
        }
      }

      return { caches, nextPageToken };
    } catch (error: unknown) {
      logger.error(
        `Error listing caches: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
      throw new GeminiApiError(
        `Failed to list caches: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
    }
  }

  /**
   * Gets a specific cached content entry's metadata from the Gemini API.
   *
   * @param cacheId The ID of the cached content to retrieve (format: "cachedContents/{id}")
   * @returns Promise resolving to the cached content metadata
   */
  public async getCache(cacheId: CacheId): Promise<CachedContentMetadata> {
    try {
      logger.debug(`Getting cache metadata for: ${cacheId}`);

      // Validate the cacheId format
      if (!cacheId.startsWith("cachedContents/")) {
        throw new GeminiInvalidParameterError(
          `Cache ID must be in the format "cachedContents/{id}", received: ${cacheId}`
        );
      }

      // Get the cache metadata
      const cacheData = await this.genAI.caches.get({ name: cacheId });

      return this.mapSdkCacheToMetadata(cacheData);
    } catch (error: unknown) {
      // Check for specific error patterns in the error message
      if (error instanceof Error) {
        if (
          error.message.includes("not found") ||
          error.message.includes("404")
        ) {
          throw new GeminiResourceNotFoundError("Cache", cacheId, error);
        }
      }

      logger.error(
        `Error getting cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
      throw new GeminiApiError(
        `Failed to get cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
    }
  }

  /**
   * Updates a cached content entry in the Gemini API.
   *
   * @param cacheId The ID of the cached content to update (format: "cachedContents/{id}")
   * @param updates The updates to apply to the cached content (ttl, displayName)
   * @returns Promise resolving to the updated cached content metadata
   */
  public async updateCache(
    cacheId: CacheId,
    updates: { ttl?: string; displayName?: string }
  ): Promise<CachedContentMetadata> {
    try {
      logger.debug(`Updating cache: ${cacheId}`);

      // Validate the cacheId format
      if (!cacheId.startsWith("cachedContents/")) {
        throw new GeminiInvalidParameterError(
          `Cache ID must be in the format "cachedContents/{id}", received: ${cacheId}`
        );
      }

      // Update the cache
      const cacheData = await this.genAI.caches.update({
        name: cacheId,
        config: updates,
      });

      return this.mapSdkCacheToMetadata(cacheData);
    } catch (error: unknown) {
      // Check for specific error patterns in the error message
      if (error instanceof Error) {
        if (
          error.message.includes("not found") ||
          error.message.includes("404")
        ) {
          throw new GeminiResourceNotFoundError("Cache", cacheId, error);
        }
      }

      logger.error(
        `Error updating cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
      throw new GeminiApiError(
        `Failed to update cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
    }
  }

  /**
   * Deletes a cached content entry from the Gemini API.
   *
   * @param cacheId The ID of the cached content to delete (format: "cachedContents/{id}")
   * @returns Promise resolving to an object with success flag
   */
  public async deleteCache(cacheId: CacheId): Promise<{ success: boolean }> {
    try {
      logger.debug(`Deleting cache: ${cacheId}`);

      // Validate the cacheId format
      if (!cacheId.startsWith("cachedContents/")) {
        throw new GeminiInvalidParameterError(
          `Cache ID must be in the format "cachedContents/{id}", received: ${cacheId}`
        );
      }

      // Delete the cache
      await this.genAI.caches.delete({ name: cacheId });

      return { success: true };
    } catch (error: unknown) {
      // Check for specific error patterns in the error message
      if (error instanceof Error) {
        if (
          error.message.includes("not found") ||
          error.message.includes("404")
        ) {
          throw new GeminiResourceNotFoundError("Cache", cacheId, error);
        }
      }

      logger.error(
        `Error deleting cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
      throw new GeminiApiError(
        `Failed to delete cache: ${error instanceof Error ? error.message : String(error)}`,
        error
      );
    }
  }

  /**
   * Helper method to map cached content response data to our CachedContentMetadata interface
   *
   * @param cacheData The cache data from the Gemini API
   * @returns The mapped CachedContentMetadata object
   */
  private mapSdkCacheToMetadata(
    cacheData: CachedContent
  ): CachedContentMetadata {
    if (!cacheData.name) {
      throw new Error("Invalid cache data received: missing required name");
    }

    // In SDK v0.10.0, the structure might be slightly different
    // Constructing CachedContentMetadata with fallback values where needed
    return {
      name: cacheData.name,
      displayName: cacheData.displayName || "",
      createTime: cacheData.createTime || new Date().toISOString(),
      updateTime: cacheData.updateTime || new Date().toISOString(),
      expirationTime: cacheData.expireTime,
      model: cacheData.model || "",
      state: "ACTIVE", // Default to ACTIVE since CachedContent does not have a status/state property
      usageMetadata: {
        totalTokenCount:
          typeof cacheData.usageMetadata?.totalTokenCount !== "undefined"
            ? cacheData.usageMetadata.totalTokenCount
            : 0,
      },
    };
  }
}

```

--------------------------------------------------------------------------------
/src/services/gemini/GeminiPromptTemplates.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Collection of specialized prompt templates for different review scenarios.
 * These templates are designed to provide tailored guidance to the Gemini model
 * for various types of code reviews.
 */

/**
 * Base template for code reviews with placeholders for context
 */
export const baseReviewTemplate = `You are a senior software engineer reviewing the following code changes.
{{repositoryContext}}

{{focusInstructions}}

Analyze the following git diff:
\`\`\`diff
{{diffContent}}
\`\`\`

Provide your code review with the following sections:
1. Summary of Changes: A brief overview of what's changing
2. Key Observations: The most important findings from your review
3. Detailed Review: File-by-file analysis of the changes
4. Recommendations: Specific suggestions for improvements
5. Questions: Any points needing clarification from the author

Be concise yet thorough in your analysis.`;

/**
 * Enhanced template for security-focused reviews
 */
export const securityReviewTemplate = `You are a senior security engineer with OWASP certification and extensive experience in identifying vulnerabilities in software applications.

{{repositoryContext}}

TASK: Perform a comprehensive security review of the following code changes, focusing on potential vulnerabilities and security risks.

STEP 1: Analyze the code for common security issues:
- Injection vulnerabilities (SQL, NoSQL, command injection, etc.)
- Authentication and authorization flaws
- Sensitive data exposure
- Security misconfigurations
- Cross-site scripting (XSS) and cross-site request forgery (CSRF)
- Broken access control
- Insecure deserialization
- Using components with known vulnerabilities
- Input validation issues
- Cryptographic problems

STEP 2: For each identified issue:
- Describe the vulnerability in detail
- Assess its severity (Critical, High, Medium, Low)
- Explain the potential impact
- Provide a concrete remediation approach with code examples

Analyze the following git diff:
\`\`\`diff
{{diffContent}}
\`\`\`

Your response MUST follow this format:
1. EXECUTIVE SUMMARY: Brief overview of security posture
2. CRITICAL ISSUES: Must be fixed immediately
3. HIGH PRIORITY ISSUES: Should be fixed soon
4. MEDIUM/LOW PRIORITY ISSUES: Address when possible
5. SECURE CODING RECOMMENDATIONS: Best practices to implement`;

/**
 * Enhanced template for performance-focused reviews
 */
export const performanceReviewTemplate = `You are a performance optimization expert with deep knowledge of runtime characteristics and profiling techniques.

{{repositoryContext}}

TASK: Perform a detailed performance analysis of the following code, identifying optimization opportunities and potential bottlenecks.

STEP 1: Systematically analyze each section of code for:
- Algorithm efficiency and complexity (provide Big O analysis)
- Resource consumption patterns (memory, CPU, I/O)
- Database query performance and optimization
- Concurrency and parallelism opportunities
- Caching potential and data access patterns
- Unnecessary computation or redundant operations

STEP 2: For each identified performance issue:
- Describe the specific performance problem
- Estimate the performance impact (critical, significant, moderate, minor)
- Explain why it's problematic
- Provide a specific optimization solution with code examples
- Note any tradeoffs the optimization might introduce

Analyze the following git diff:
\`\`\`diff
{{diffContent}}
\`\`\`

Your response MUST follow this format:
1. PERFORMANCE SUMMARY: Overall assessment with key metrics
2. CRITICAL BOTTLENECKS: Highest-impact issues to address first
3. SIGNIFICANT OPTIMIZATIONS: Important improvements with measurable impact
4. MINOR OPTIMIZATIONS: Small enhancements for completeness
5. MONITORING RECOMMENDATIONS: Suggestions for ongoing performance measurement`;

/**
 * Enhanced template for architecture-focused reviews
 */
export const architectureReviewTemplate = `You are a senior software architect with expertise in designing scalable, maintainable software systems.

{{repositoryContext}}

TASK: Perform an architectural analysis of the following code changes, focusing on design patterns, component relationships, and system structure.

STEP 1: Analyze the architectural aspects of the code:
- Design pattern implementation and appropriateness
- Component responsibilities and cohesion
- Interface design and abstraction
- Dependency management and coupling
- Modularity and extensibility
- Separation of concerns
- Error handling strategies
- Consistency with architectural principles

STEP 2: For each architectural observation:
- Describe the architectural element or decision
- Analyze its impact on the overall system
- Evaluate adherence to SOLID principles and other architectural best practices
- Suggest improvements with rationale

Analyze the following git diff:
\`\`\`diff
{{diffContent}}
\`\`\`

Your response MUST follow this format:
1. ARCHITECTURAL OVERVIEW: Summary of the code's architecture
2. STRENGTHS: Positive architectural aspects of the code
3. CONCERNS: Architectural issues or anti-patterns identified
4. REFACTORING RECOMMENDATIONS: Suggestions for architectural improvements
5. LONG-TERM CONSIDERATIONS: How these changes affect system evolution`;

/**
 * Enhanced template for bug-focused reviews
 */
export const bugReviewTemplate = `You are a quality assurance engineer with expertise in identifying logic flaws and edge cases in software.

{{repositoryContext}}

TASK: Perform a thorough analysis of the following code changes to identify potential bugs, edge cases, and logical errors.

STEP 1: Analyze the code for common bug sources:
- Off-by-one errors
- Null/undefined handling issues
- Edge case oversights
- Race conditions
- Resource leaks
- Error handling gaps
- Typos in critical code
- Incorrect assumptions
- Boundary condition failures
- Exception handling problems

STEP 2: For each potential bug:
- Describe the issue and why it's problematic
- Explain the conditions under which it would occur
- Assess its severity and potential impact
- Provide a fix with code examples
- Suggest tests that would catch the issue

Analyze the following git diff:
\`\`\`diff
{{diffContent}}
\`\`\`

Your response MUST follow this format:
1. BUG RISK SUMMARY: Overview of potential issues
2. CRITICAL BUGS: Issues that could cause system failure or data corruption
3. MAJOR BUGS: Significant functional issues that need addressing
4. MINOR BUGS: Less severe issues that should be fixed
5. TEST RECOMMENDATIONS: Tests to implement to prevent similar bugs`;

/**
 * Enhanced template for general comprehensive reviews
 */
export const generalReviewTemplate = `You are a senior software engineer with expertise across multiple domains including security, performance, architecture, and testing.

{{repositoryContext}}

TASK: Perform a comprehensive review of the following code changes, covering all aspects of software quality.

I want you to follow a specific review process:

STEP 1: Understand the overall purpose
- Identify what problem the code is solving
- Determine how it fits into the broader application

STEP 2: Analyze code quality
- Readability and naming conventions
- Function/method size and complexity
- Comments and documentation
- Consistency with existing patterns

STEP 3: Evaluate correctness
- Potential bugs and edge cases
- Error handling completeness
- Test coverage adequacy

STEP 4: Consider performance
- Inefficient algorithms or patterns
- Resource utilization concerns
- Optimization opportunities

STEP 5: Assess maintainability
- Extensibility for future changes
- Coupling and cohesion
- Clear separation of concerns

STEP 6: Security review
- Potential vulnerabilities
- Input validation issues
- Security best practices

Analyze the following git diff:
\`\`\`diff
{{diffContent}}
\`\`\`

Your response MUST follow this format:
1. SUMMARY: Brief overview of the changes and their purpose
2. KEY OBSERVATIONS: Most important findings (positive and negative)
3. DETAILED REVIEW: Analysis by file with specific comments
4. RECOMMENDATIONS: Prioritized suggestions for improvement
5. QUESTIONS: Any clarifications needed from the developer`;

/**
 * Replace placeholders in a template with actual values
 *
 * @param template Template string with placeholders
 * @param context Context object with values to replace placeholders
 * @returns Processed template with placeholders replaced
 */
export function processTemplate(
  template: string,
  context: {
    repositoryContext?: string;
    diffContent: string;
    focusInstructions?: string;
    [key: string]: string | undefined;
  }
): string {
  let processedTemplate = template;

  // Replace each placeholder with its corresponding value
  for (const [key, value] of Object.entries(context)) {
    // Skip undefined values
    if (value === undefined) continue;

    // Convert the value to string if it's not already
    const stringValue = typeof value === "string" ? value : String(value);

    // Replace the placeholder with the value
    processedTemplate = processedTemplate.replace(
      new RegExp(`{{${key}}}`, "g"),
      stringValue
    );
  }

  // Remove any remaining placeholder
  processedTemplate = processedTemplate.replace(/{{[^{}]+}}/g, "");

  return processedTemplate;
}

/**
 * Get the appropriate template for a specific review focus
 *
 * @param reviewFocus The focus area for the review
 * @returns The template string for the specified focus
 */
export function getReviewTemplate(
  reviewFocus: "security" | "performance" | "architecture" | "bugs" | "general"
): string {
  switch (reviewFocus) {
    case "security":
      return securityReviewTemplate;
    case "performance":
      return performanceReviewTemplate;
    case "architecture":
      return architectureReviewTemplate;
    case "bugs":
      return bugReviewTemplate;
    case "general":
    default:
      return generalReviewTemplate;
  }
}

/**
 * Generate focus-specific instructions for the base template
 *
 * @param reviewFocus The focus area for the review
 * @returns Instruction string for the specified focus
 */
export function getFocusInstructions(
  reviewFocus: "security" | "performance" | "architecture" | "bugs" | "general"
): string {
  switch (reviewFocus) {
    case "security":
      return `
Focus on identifying security vulnerabilities in the code changes, such as:
- Input validation issues
- Authentication/authorization flaws
- Data exposure risks
- Injection vulnerabilities
- Insecure cryptography
- CSRF/XSS vectors
- Any other security concerns`;
    case "performance":
      return `
Focus on identifying performance implications in the code changes, such as:
- Algorithm complexity issues (O(n²) vs O(n))
- Unnecessary computations or memory usage
- Database query inefficiencies
- Unoptimized loops or data structures
- Potential memory leaks
- Resource contention issues`;
    case "architecture":
      return `
Focus on architectural aspects of the code changes, such as:
- Design pattern conformance
- Component responsibilities and cohesion
- Dependency management
- API design principles
- Modularity and extensibility
- Separation of concerns`;
    case "bugs":
      return `
Focus on identifying potential bugs and logical errors in the code changes, such as:
- Off-by-one errors
- Null/undefined handling issues
- Edge case oversights
- Race conditions
- Error handling gaps
- Typos in critical code`;
    case "general":
    default:
      return `
Provide a comprehensive review covering:
- Code quality and readability
- Potential bugs or errors
- Performance implications
- Security considerations
- Architectural aspects
- Best practices and style conventions`;
  }
}

```

--------------------------------------------------------------------------------
/src/server.ts:
--------------------------------------------------------------------------------

```typescript
import { createServer } from "./createServer.js";
import {
  logger,
  setServerState,
  startHealthCheckServer,
  ServerState,
} from "./utils/index.js";
import type { JsonRpcInitializeRequest } from "./types/serverTypes.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
import type { Transport } from "@modelcontextprotocol/sdk/server/mcp.js";
import { SessionService } from "./services/SessionService.js";
import express from "express";
import { randomUUID } from "node:crypto";
/**
 * Type guard to check if a value is a JSON-RPC 2.0 initialize request
 * @param value - The value to check
 * @returns true if the value matches the JSON-RPC initialize request structure
 */
const isInitializeRequest = (
  value: unknown
): value is JsonRpcInitializeRequest => {
  // Early exit for non-objects
  if (!value || typeof value !== "object" || value === null) {
    return false;
  }

  const obj = value as Record<string, unknown>;

  // Check required JSON-RPC 2.0 fields
  if (obj.jsonrpc !== "2.0") {
    return false;
  }

  if (obj.method !== "initialize") {
    return false;
  }

  // Check id exists and is string or number (per JSON-RPC spec)
  if (typeof obj.id !== "string" && typeof obj.id !== "number") {
    return false;
  }

  return true;
};

// Server state tracking
const serverState: ServerState = {
  isRunning: false,
  startTime: null,
  transport: null,
  server: null,
  healthCheckServer: null,
  mcpClientService: null, // Add McpClientService to server state
};

// Share server state with the health check module
setServerState(serverState);

// Map to store transports by session ID for HTTP mode
const httpTransports: Record<string, StreamableHTTPServerTransport> = {};

/**
 * Sets up HTTP server for Streamable HTTP transport
 */
async function setupHttpServer(
  mcpServer: { connect: (transport: Transport) => Promise<void> },
  _sessionService: SessionService
): Promise<void> {
  const app = express();
  app.use(express.json());

  const port = parseInt(
    process.env.MCP_SERVER_PORT || process.env.MCP_WS_PORT || "8080",
    10
  );

  // CORS middleware
  app.use((req, res, next) => {
    res.header("Access-Control-Allow-Origin", "*");
    res.header("Access-Control-Allow-Methods", "GET, POST, OPTIONS, DELETE");
    res.header(
      "Access-Control-Allow-Headers",
      "Content-Type, Accept, Authorization, Mcp-Session-Id, Last-Event-ID"
    );
    if (req.method === "OPTIONS") {
      res.status(204).send();
      return;
    }
    next();
  });

  // MCP endpoint
  app.post("/mcp", async (req, res) => {
    try {
      const sessionId = req.headers["mcp-session-id"] as string;
      let transport: StreamableHTTPServerTransport;

      if (sessionId && httpTransports[sessionId]) {
        // Reuse existing transport
        transport = httpTransports[sessionId];
      } else if (!sessionId && isInitializeRequest(req.body)) {
        // Create new transport for initialization
        const sessionIdGenerator =
          process.env.MCP_ENABLE_STREAMING === "true"
            ? () => randomUUID()
            : undefined;

        transport = new StreamableHTTPServerTransport({
          sessionIdGenerator,
          onsessioninitialized: (sid: string) => {
            logger.info(`HTTP session initialized: ${sid}`);
            httpTransports[sid] = transport;
          },
        });

        // Set up cleanup handler
        transport.onclose = () => {
          const sid = transport.sessionId;
          if (sid && httpTransports[sid]) {
            logger.info(`HTTP transport closed for session ${sid}`);
            delete httpTransports[sid];
          }
        };

        // Connect transport to MCP server
        await mcpServer.connect(transport);
      } else {
        res.status(400).json({
          jsonrpc: "2.0",
          error: {
            code: -32000,
            message: "Bad Request: No valid session ID provided",
          },
        });
        return;
      }

      await transport.handleRequest(req, res, req.body);
    } catch (error) {
      logger.error("Error handling HTTP request:", error);
      res.status(500).json({
        jsonrpc: "2.0",
        error: {
          code: -32603,
          message: "Internal error",
        },
      });
    }
  });

  // GET endpoint for SSE streaming
  app.get("/mcp", async (req, res) => {
    try {
      const sessionId = req.headers["mcp-session-id"] as string;
      if (!sessionId || !httpTransports[sessionId]) {
        res.status(404).json({
          error: "Session not found",
        });
        return;
      }

      const transport = httpTransports[sessionId];
      await transport.handleRequest(req, res);
    } catch (error) {
      logger.error("Error handling GET request:", error);
      res.status(500).json({
        error: "Internal error",
      });
    }
  });

  // Start HTTP server
  const httpServer = app.listen(port, () => {
    logger.info(`HTTP server listening on port ${port} with /mcp endpoint`);
  });

  // Store HTTP server in state for cleanup
  serverState.httpServer = httpServer;
}

const main = async () => {
  try {
    const sessionService = new SessionService(
      parseInt(process.env.MCP_SESSION_TIMEOUT || "3600", 10)
    );

    const { server, mcpClientService } = createServer();
    serverState.server = server;
    // Type compatibility: McpClientService implements McpClientServiceLike interface
    // The actual service has all required methods (disconnect, closeAllConnections)
    serverState.mcpClientService = mcpClientService; // Store the client service instance
    serverState.sessionService = sessionService; // Store the session service instance
    logger.info("Starting MCP server...");

    // Start health check server if enabled
    if (process.env.ENABLE_HEALTH_CHECK !== "false") {
      logger.info("Starting health check server...");
      const healthServer = startHealthCheckServer();
      serverState.healthCheckServer = healthServer;
    }

    // Choose transport based on environment
    let transport: Transport | null;
    // Use MCP_TRANSPORT, but fall back to MCP_TRANSPORT_TYPE for backwards compatibility
    const transportType =
      process.env.MCP_TRANSPORT || process.env.MCP_TRANSPORT_TYPE || "stdio";

    if (transportType === "sse") {
      // SSE uses the StreamableHTTPServerTransport
      transport = null; // No persistent transport needed
      logger.info("Transport selected", {
        requested: transportType,
        selected: "streamable",
        fallback: false,
        message:
          "SSE transport - using StreamableHTTPServerTransport via HTTP endpoint",
        timestamp: new Date().toISOString(),
      });
    } else if (transportType === "http" || transportType === "streamable") {
      // For HTTP/Streamable transport, we don't need a persistent transport
      // Individual requests will create their own transports
      transport = null; // No persistent transport needed
      logger.info("Transport selected", {
        requested: transportType,
        selected: "streamable",
        fallback: false,
        message:
          "HTTP transport - individual requests will create their own transports",
        timestamp: new Date().toISOString(),
      });
    } else if (transportType === "streaming") {
      const fallbackReason = "Streaming transport not currently implemented";
      logger.warn("Transport fallback", {
        requested: transportType,
        selected: "stdio",
        fallback: true,
        reason: fallbackReason,
        timestamp: new Date().toISOString(),
      });
      transport = new StdioServerTransport();
      logger.info("Using stdio transport (fallback)");
    } else {
      // Default to stdio for anything else
      transport = new StdioServerTransport();
      logger.info("Transport selected", {
        requested: transportType || "default",
        selected: "stdio",
        fallback: false,
        message: "Using stdio transport",
        timestamp: new Date().toISOString(),
      });
    }

    serverState.transport = transport;
    if (transport) {
      logger.info(`Connecting transport: ${transport}`);
      await server.connect(transport);
    } else {
      logger.info("No persistent transport - using HTTP-only mode");
    }

    // Set up HTTP server for streamable/SSE transport if requested
    if (
      transportType === "http" ||
      transportType === "streamable" ||
      transportType === "sse"
    ) {
      await setupHttpServer(server, sessionService);
    }

    // Update server state
    serverState.isRunning = true;
    serverState.startTime = Date.now();

    logger.info("MCP Server connected and listening.");

    // For HTTP-only mode, keep the process alive
    if (
      transportType === "http" ||
      transportType === "streamable" ||
      transportType === "sse"
    ) {
      // Keep the process alive since we don't have a persistent transport
      // The HTTP server will handle all requests
      process.on("SIGINT", () => shutdown("SIGINT"));
      process.on("SIGTERM", () => shutdown("SIGTERM"));
    }
  } catch (error) {
    logger.error("Failed to start server:", error);
    process.exit(1); // Exit if server fails to start
  }
};

// Graceful shutdown handling
const shutdown = async (signal: string) => {
  logger.info(`${signal} signal received: closing MCP server`);

  // Track if any shutdown process fails
  let hasError = false;

  // Stop session service cleanup interval if it exists
  if (serverState.sessionService) {
    try {
      logger.info("Stopping session service cleanup interval...");
      serverState.sessionService.stopCleanupInterval();
      logger.info("Session service cleanup interval stopped.");
    } catch (error) {
      hasError = true;
      logger.error("Error stopping session service cleanup interval:", error);
    }
  }

  // Close all MCP client connections
  if (serverState.mcpClientService) {
    try {
      logger.info("Closing all MCP client connections...");
      serverState.mcpClientService.closeAllConnections();
      logger.info("MCP client connections closed.");
    } catch (error) {
      hasError = true;
      logger.error("Error closing MCP client connections:", error);
    }
  }

  if (serverState.isRunning && serverState.server) {
    try {
      // Disconnect the server if it exists and has a disconnect method
      if (typeof serverState.server.disconnect === "function") {
        await serverState.server.disconnect();
      }

      serverState.isRunning = false;
      logger.info("MCP Server shutdown completed successfully");
    } catch (error) {
      hasError = true;
      logger.error("Error during MCP server shutdown:", error);
    }
  }

  // Close HTTP server if it exists
  if (serverState.httpServer) {
    try {
      logger.info("Closing HTTP server...");
      serverState.httpServer.close();
      logger.info("HTTP server closed successfully");
    } catch (error) {
      hasError = true;
      logger.error("Error during HTTP server shutdown:", error);
    }
  }

  // Close health check server if it exists
  if (serverState.healthCheckServer) {
    try {
      logger.info("Closing health check server...");
      serverState.healthCheckServer.close();
      logger.info("Health check server closed successfully");
    } catch (error) {
      hasError = true;
      logger.error("Error during health check server shutdown:", error);
    }
  }

  // Exit with appropriate code
  process.exit(hasError ? 1 : 0);
};

// Register shutdown handlers
process.on("SIGTERM", () => shutdown("SIGTERM"));
process.on("SIGINT", () => shutdown("SIGINT"));

// If this is the main module, start the server
if (import.meta.url === `file://${process.argv[1]}`) {
  main();
}

```

--------------------------------------------------------------------------------
/tests/unit/utils/errors.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
// Import directly from the MCP SDK to ensure we're using the same class reference
import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
// Import local error classes
import {
  ValidationError,
  NotFoundError,
  ConfigurationError,
  ServiceError,
  GeminiApiError,
  mapToMcpError,
  mapToolErrorToMcpError,
  ToolErrorLike,
} from "../../../src/utils/errors.js";

describe("mapToMcpError", () => {
  const TOOL_NAME = "test_tool";

  it("should return McpError instances directly", () => {
    const originalError = new McpError(
      ErrorCode.InvalidParams,
      "Original MCP error"
    );
    const mappedError = mapToMcpError(originalError, TOOL_NAME);
    expect(mappedError).toBe(originalError);
  });

  it("should map ValidationError to InvalidParams", () => {
    const validationError = new ValidationError("Invalid input");
    const mappedError = mapToMcpError(validationError, TOOL_NAME);

    // Check error code and message content
    expect(mappedError.code).toBe(ErrorCode.InvalidParams);
    expect(mappedError.message).toContain("Validation error");
    expect(mappedError.message).toContain("Invalid input");
  });

  it("should map NotFoundError to InvalidRequest", () => {
    const notFoundError = new NotFoundError("Resource not found");
    const mappedError = mapToMcpError(notFoundError, TOOL_NAME);

    // Check error code and message content
    expect(mappedError.code).toBe(ErrorCode.InvalidRequest);
    expect(mappedError.message).toContain("Resource not found");
  });

  it("should map ConfigurationError to InternalError", () => {
    const configError = new ConfigurationError("Invalid configuration");
    const mappedError = mapToMcpError(configError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError); // Changed from FailedPrecondition
    expect(mappedError.message).toContain("Configuration error");
    expect(mappedError.message).toContain("Invalid configuration");
  });

  it("should map quota-related GeminiApiError to InternalError", () => {
    const quotaError = new GeminiApiError("Quota exceeded for this resource");
    const mappedError = mapToMcpError(quotaError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError); // Changed from ResourceExhausted
    expect(mappedError.message).toContain("Quota exceeded");
  });

  it("should map rate limit GeminiApiError to InternalError", () => {
    const rateLimitError = new GeminiApiError(
      "Rate limit hit for this operation"
    );
    const mappedError = mapToMcpError(rateLimitError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError); // Changed from ResourceExhausted
    expect(mappedError.message).toContain("rate limit hit");
  });

  it("should map permission-related GeminiApiError to InvalidRequest", () => {
    const permissionError = new GeminiApiError(
      "Permission denied for this operation"
    );
    const mappedError = mapToMcpError(permissionError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest); // Changed from PermissionDenied
    expect(mappedError.message).toContain("Permission denied");
  });

  it("should map not-found GeminiApiError to InvalidRequest", () => {
    const notFoundError = new GeminiApiError("Resource does not exist");
    const mappedError = mapToMcpError(notFoundError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest);
    expect(mappedError.message).toContain("Resource not found");
  });

  it("should map invalid argument GeminiApiError to InvalidParams", () => {
    const invalidParamError = new GeminiApiError("Invalid argument provided");
    const mappedError = mapToMcpError(invalidParamError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidParams);
    expect(mappedError.message).toContain("Invalid parameters");
  });

  it("should map safety-related GeminiApiError to InvalidRequest", () => {
    const safetyError = new GeminiApiError(
      "Content blocked by safety settings"
    );
    const mappedError = mapToMcpError(safetyError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest);
    expect(mappedError.message).toContain("Content blocked by safety settings");
  });

  it("should map File API not supported errors to InvalidRequest", () => {
    const apiError = new GeminiApiError(
      "File API is not supported on Vertex AI"
    );
    const mappedError = mapToMcpError(apiError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest); // Changed from FailedPrecondition
    expect(mappedError.message).toContain("Operation not supported");
  });

  it("should map other GeminiApiError to InternalError", () => {
    const otherApiError = new GeminiApiError("Unknown API error");
    const mappedError = mapToMcpError(otherApiError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain("Gemini API Error");
  });

  it("should map ServiceError to InternalError", () => {
    const serviceError = new ServiceError("Service processing failed");
    const mappedError = mapToMcpError(serviceError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain("Service error");
  });

  it("should map standard Error to InternalError", () => {
    const standardError = new Error("Standard error occurred");
    const mappedError = mapToMcpError(standardError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain(TOOL_NAME);
    expect(mappedError.message).toContain("Standard error occurred");
  });

  it("should handle string errors", () => {
    const stringError = "String error message";
    const mappedError = mapToMcpError(stringError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain(stringError);
  });

  it("should handle object errors", () => {
    const objectError = { errorCode: 500, message: "Object error" };
    const mappedError = mapToMcpError(objectError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    // Should contain stringified version of the object
    expect(mappedError.message).toContain("Object error");
  });

  it("should handle null/undefined errors", () => {
    const nullError = null;
    const mappedError = mapToMcpError(nullError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain("An unknown error occurred");
  });

  // Testing if the error details are properly handled in mapping
  it("should handle errors with details", () => {
    // Create an error with details
    const errorWithDetails = new GeminiApiError("API error with details", {
      key: "value",
    });

    // Directly check the original error - it should have details
    expect(errorWithDetails).toHaveProperty("details");
    expect(errorWithDetails.details).toEqual({ key: "value" });

    // Map it to an McpError
    const mappedError = mapToMcpError(errorWithDetails, TOOL_NAME);

    // Basic assertions
    expect(mappedError).toBeInstanceOf(Object);
    expect(mappedError).not.toBeNull();
    expect(mappedError.code).toBe(ErrorCode.InternalError);

    // Verify mapping occurs correctly
    expect(mappedError).toBeInstanceOf(McpError);
    expect(mappedError.message).toContain("API error with details");

    // If McpError supports data property for error details, check it
    if ("data" in mappedError) {
      expect(mappedError.data).toBeDefined();
    }
  });
});

describe("mapToolErrorToMcpError", () => {
  const TOOL_NAME = "test_tool";

  it("should handle ToolErrorLike objects with code and message", () => {
    const toolError: ToolErrorLike = {
      code: "INVALID_ARGUMENT",
      message: "Invalid parameter provided",
      details: { parameter: "value" },
    };

    const mappedError = mapToolErrorToMcpError(toolError, TOOL_NAME);

    expect(mappedError).toBeInstanceOf(McpError);
    expect(mappedError.code).toBe(ErrorCode.InvalidParams);
    expect(mappedError.message).toContain("Invalid parameter provided");
  });

  it("should map QUOTA and RATE_LIMIT codes to InternalError", () => {
    const quotaError: ToolErrorLike = {
      code: "QUOTA_EXCEEDED",
      message: "API quota exceeded",
    };

    const mappedError = mapToolErrorToMcpError(quotaError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain("API quota or rate limit exceeded");
  });

  it("should map PERMISSION and AUTH codes to InvalidRequest", () => {
    const permissionError: ToolErrorLike = {
      code: "PERMISSION_DENIED",
      message: "Permission denied",
    };

    const mappedError = mapToolErrorToMcpError(permissionError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest);
    expect(mappedError.message).toContain("Permission denied");
  });

  it("should map NOT_FOUND codes to InvalidRequest", () => {
    const notFoundError: ToolErrorLike = {
      code: "RESOURCE_NOT_FOUND",
      message: "Resource not found",
    };

    const mappedError = mapToolErrorToMcpError(notFoundError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest);
    expect(mappedError.message).toContain("Resource not found");
  });

  it("should map INVALID and ARGUMENT codes to InvalidParams", () => {
    const invalidError: ToolErrorLike = {
      code: "INVALID_ARGUMENT",
      message: "Invalid argument",
    };

    const mappedError = mapToolErrorToMcpError(invalidError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidParams);
    expect(mappedError.message).toContain("Invalid parameters");
  });

  it("should map UNSUPPORTED codes to InvalidRequest", () => {
    const unsupportedError: ToolErrorLike = {
      code: "OPERATION_NOT_SUPPORTED",
      message: "Operation not supported",
    };

    const mappedError = mapToolErrorToMcpError(unsupportedError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InvalidRequest);
    expect(mappedError.message).toContain("Operation not supported");
  });

  it("should handle objects without code property", () => {
    const toolError = {
      message: "Generic error message",
      details: { info: "additional info" },
    };

    const mappedError = mapToolErrorToMcpError(toolError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain("Generic error message");
  });

  it("should handle non-object errors", () => {
    const simpleError = "Simple string error";

    const mappedError = mapToolErrorToMcpError(simpleError, TOOL_NAME);

    expect(mappedError.code).toBe(ErrorCode.InternalError);
    expect(mappedError.message).toContain(`Error in ${TOOL_NAME}`);
  });

  it("should handle null and undefined errors", () => {
    const nullError = mapToolErrorToMcpError(null, TOOL_NAME);
    const undefinedError = mapToolErrorToMcpError(undefined, TOOL_NAME);

    expect(nullError.code).toBe(ErrorCode.InternalError);
    expect(undefinedError.code).toBe(ErrorCode.InternalError);
    expect(nullError.message).toContain(`Error in ${TOOL_NAME}`);
    expect(undefinedError.message).toContain(`Error in ${TOOL_NAME}`);
  });

  it("should preserve error details when available", () => {
    const toolError: ToolErrorLike = {
      code: "TEST_ERROR",
      message: "Test error message",
      details: { context: "test context", id: 123 },
    };

    const mappedError = mapToolErrorToMcpError(toolError, TOOL_NAME);

    // Check that details are preserved (assuming McpError supports details/data)
    expect(mappedError).toBeDefined();
    expect(mappedError.message).toContain("Test error message");
  });
});

```

--------------------------------------------------------------------------------
/src/services/ModelSelectionService.ts:
--------------------------------------------------------------------------------

```typescript
import {
  ModelConfiguration,
  ModelSelectionCriteria,
  ModelCapabilities,
  ModelScore,
  ModelSelectionHistory,
  ModelPerformanceMetrics,
} from "../types/index.js";
import { logger } from "../utils/logger.js";

export class ModelSelectionService {
  private modelCache: Map<string, ModelCapabilities>;
  private performanceMetrics: Map<string, ModelPerformanceMetrics>;
  private selectionHistory: ModelSelectionHistory[];
  private readonly maxHistorySize = 500;

  constructor(private config: ModelConfiguration) {
    this.modelCache = new Map();
    this.performanceMetrics = new Map();
    this.selectionHistory = [];
    this.initializeModelCache();
  }

  private initializeModelCache(): void {
    Object.entries(this.config.capabilities).forEach(
      ([model, capabilities]) => {
        this.modelCache.set(model, capabilities);
      }
    );
  }

  async selectOptimalModel(criteria: ModelSelectionCriteria): Promise<string> {
    const startTime = Date.now();
    try {
      const candidateModels = this.getCandidateModels(criteria);
      if (candidateModels.length === 0) {
        logger.warn(
          "[ModelSelectionService] No candidate models found for criteria",
          { criteria }
        );
        return criteria.fallbackModel || this.config.default;
      }

      const selectedModel = this.selectBestModel(candidateModels, criteria);

      const selectionTime = Date.now() - startTime;
      this.recordSelection(
        criteria,
        selectedModel,
        candidateModels,
        selectionTime
      );

      logger.debug("[ModelSelectionService] Model selected", {
        selectedModel,
        criteria,
      });

      return selectedModel;
    } catch (error) {
      logger.error("[ModelSelectionService] Model selection failed", {
        error,
        criteria,
      });
      return criteria.fallbackModel || this.config.default;
    }
  }

  private getCandidateModels(criteria: ModelSelectionCriteria): string[] {
    let baseModels: string[];

    switch (criteria.taskType) {
      case "text-generation":
        baseModels = this.config.textGeneration;
        break;
      case "image-generation":
        baseModels = this.config.imageGeneration;
        break;
      case "video-generation":
        baseModels = this.config.videoGeneration;
        break;
      case "code-review":
        baseModels = this.config.codeReview;
        break;
      case "reasoning":
        baseModels = this.config.complexReasoning;
        break;
      case "multimodal":
        baseModels = this.config.textGeneration.filter((model) => {
          const caps = this.modelCache.get(model);
          return (
            caps && (caps.imageInput || caps.videoInput || caps.audioInput)
          );
        });
        break;
      default:
        baseModels = this.config.textGeneration;
    }

    // Filter out models that don't have capabilities defined
    baseModels = baseModels.filter((model) => this.modelCache.has(model));

    if (criteria.requiredCapabilities) {
      return baseModels.filter((model) => {
        const capabilities = this.modelCache.get(model);
        if (!capabilities) return false;

        return criteria.requiredCapabilities!.every((capability) => {
          const value = capabilities[capability];
          if (typeof value === "boolean") return value;
          if (typeof value === "string") return value !== "none";
          return Boolean(value);
        });
      });
    }

    return baseModels;
  }

  private selectBestModel(
    models: string[],
    criteria: ModelSelectionCriteria
  ): string {
    if (models.length === 0) {
      return criteria.fallbackModel || this.config.default;
    }

    // Score-based selection that considers performance metrics
    const scoredModels = models.map((model) => ({
      model,
      score: this.calculateModelScore(model, criteria),
    }));

    scoredModels.sort((a, b) => b.score - a.score);
    return scoredModels[0].model;
  }

  private sortModelsByPreference(
    models: string[],
    criteria: ModelSelectionCriteria
  ): string[] {
    const preferCost =
      criteria.preferCost || this.config.routing.preferCostEffective;
    const preferSpeed = criteria.preferSpeed || this.config.routing.preferSpeed;
    const preferQuality =
      criteria.preferQuality || this.config.routing.preferQuality;

    return models.sort((a, b) => {
      const capsA = this.modelCache.get(a)!;
      const capsB = this.modelCache.get(b)!;

      // Primary sorting: cost preference
      if (preferCost) {
        const costComparison = this.compareCost(capsA.costTier, capsB.costTier);
        if (costComparison !== 0) return costComparison;
      }

      // Secondary sorting: speed preference
      if (preferSpeed) {
        const speedComparison = this.compareSpeed(
          capsA.speedTier,
          capsB.speedTier
        );
        if (speedComparison !== 0) return speedComparison;
      }

      // Tertiary sorting: quality preference (default)
      if (preferQuality) {
        const qualityComparison = this.compareQuality(capsA, capsB);
        if (qualityComparison !== 0) return qualityComparison;
      }

      return 0; // Equal preference
    });
  }

  private compareCost(costA: string, costB: string): number {
    const costOrder = { low: 0, medium: 1, high: 2 };
    const orderA = costOrder[costA as keyof typeof costOrder] ?? 1;
    const orderB = costOrder[costB as keyof typeof costOrder] ?? 1;
    return orderA - orderB; // Lower cost wins
  }

  private compareSpeed(speedA: string, speedB: string): number {
    const speedOrder = { fast: 0, medium: 1, slow: 2 };
    const orderA = speedOrder[speedA as keyof typeof speedOrder] ?? 1;
    const orderB = speedOrder[speedB as keyof typeof speedOrder] ?? 1;
    return orderA - orderB; // Faster wins
  }

  private compareQuality(
    capsA: ModelCapabilities,
    capsB: ModelCapabilities
  ): number {
    const reasoningOrder = { none: 0, basic: 1, good: 2, excellent: 3 };
    const codeOrder = { none: 0, basic: 1, good: 2, excellent: 3 };

    const reasoningA =
      reasoningOrder[capsA.complexReasoning as keyof typeof reasoningOrder] ??
      0;
    const reasoningB =
      reasoningOrder[capsB.complexReasoning as keyof typeof reasoningOrder] ??
      0;

    if (reasoningA !== reasoningB) {
      return reasoningB - reasoningA; // Higher reasoning wins
    }

    const codeA = codeOrder[capsA.codeExecution as keyof typeof codeOrder] ?? 0;
    const codeB = codeOrder[capsB.codeExecution as keyof typeof codeOrder] ?? 0;

    if (codeA !== codeB) {
      return codeB - codeA; // Higher code execution wins
    }

    // Additional quality factors
    if (capsA.contextWindow !== capsB.contextWindow) {
      return capsB.contextWindow - capsA.contextWindow; // Larger context wins
    }

    return 0;
  }

  getModelCapabilities(modelName: string): ModelCapabilities | undefined {
    return this.modelCache.get(modelName);
  }

  isModelAvailable(modelName: string): boolean {
    return this.modelCache.has(modelName);
  }

  getAvailableModels(): string[] {
    return Array.from(this.modelCache.keys());
  }

  validateModelForTask(
    modelName: string,
    taskType: ModelSelectionCriteria["taskType"]
  ): boolean {
    const capabilities = this.modelCache.get(modelName);
    if (!capabilities) return false;

    switch (taskType) {
      case "text-generation":
        return capabilities.textGeneration;
      case "image-generation":
        return capabilities.imageGeneration;
      case "video-generation":
        return capabilities.videoGeneration;
      case "code-review":
        return capabilities.codeExecution !== "none";
      case "reasoning":
        return capabilities.complexReasoning !== "none";
      case "multimodal":
        return (
          capabilities.imageInput ||
          capabilities.videoInput ||
          capabilities.audioInput
        );
      default:
        return capabilities.textGeneration;
    }
  }

  updateConfiguration(newConfig: ModelConfiguration): void {
    this.config = newConfig;
    this.modelCache.clear();
    this.initializeModelCache();
    logger.info("[ModelSelectionService] Configuration updated");
  }

  updatePerformanceMetrics(
    modelName: string,
    latency: number,
    success: boolean
  ): void {
    const existing = this.performanceMetrics.get(modelName) || {
      totalCalls: 0,
      avgLatency: 0,
      successRate: 0,
      lastUpdated: new Date(),
    };

    const newTotalCalls = existing.totalCalls + 1;
    const newAvgLatency =
      (existing.avgLatency * existing.totalCalls + latency) / newTotalCalls;
    const successCount =
      existing.successRate * existing.totalCalls + (success ? 1 : 0);
    const newSuccessRate = successCount / newTotalCalls;

    this.performanceMetrics.set(modelName, {
      totalCalls: newTotalCalls,
      avgLatency: newAvgLatency,
      successRate: newSuccessRate,
      lastUpdated: new Date(),
    });
  }

  getPerformanceMetrics(): Map<string, ModelPerformanceMetrics> {
    return new Map(this.performanceMetrics);
  }

  getSelectionHistory(limit?: number): ModelSelectionHistory[] {
    const history = [...this.selectionHistory];
    return limit ? history.slice(-limit) : history;
  }

  private recordSelection(
    criteria: ModelSelectionCriteria,
    selectedModel: string,
    candidateModels: string[],
    selectionTime: number
  ): void {
    const scores: ModelScore[] = candidateModels.map((model) => ({
      model,
      score: this.calculateModelScore(model, criteria),
      capabilities: this.modelCache.get(model)!,
    }));

    const record: ModelSelectionHistory = {
      timestamp: new Date(),
      criteria,
      selectedModel,
      candidateModels,
      scores,
      selectionTime,
    };

    this.selectionHistory.push(record);

    if (this.selectionHistory.length > this.maxHistorySize) {
      this.selectionHistory.shift();
    }
  }

  private calculateModelScore(
    model: string,
    criteria: ModelSelectionCriteria
  ): number {
    const capabilities = this.modelCache.get(model);
    if (!capabilities) return 0;

    let score = 0;

    // Base score from routing preferences
    if (criteria.preferCost) {
      const costScore =
        capabilities.costTier === "low"
          ? 3
          : capabilities.costTier === "medium"
            ? 2
            : 1;
      score += costScore * 0.4;
    }

    if (criteria.preferSpeed) {
      const speedScore =
        capabilities.speedTier === "fast"
          ? 3
          : capabilities.speedTier === "medium"
            ? 2
            : 1;
      score += speedScore * 0.4;
    }

    if (criteria.preferQuality) {
      const reasoningScore =
        capabilities.complexReasoning === "excellent"
          ? 3
          : capabilities.complexReasoning === "good"
            ? 2
            : 1;
      score += reasoningScore * 0.4;
    }

    // URL context scoring - prefer models with larger context windows for URL-heavy requests
    if (criteria.urlCount && criteria.urlCount > 0) {
      // Bonus for models with large context windows when processing URLs
      if (capabilities.contextWindow >= 1000000) {
        score += Math.min(criteria.urlCount / 5, 2.0); // Up to 2 points for many URLs
      } else if (capabilities.contextWindow >= 500000) {
        score += Math.min(criteria.urlCount / 10, 1.0); // Up to 1 point for medium context
      }

      // Bonus for estimated content size handling
      if (
        criteria.estimatedUrlContentSize &&
        criteria.estimatedUrlContentSize > 0
      ) {
        const sizeInTokens = criteria.estimatedUrlContentSize / 4; // Rough estimate: 4 chars per token
        const contextUtilization = sizeInTokens / capabilities.contextWindow;

        // Prefer models that won't be overwhelmed by the content size
        if (contextUtilization < 0.3) {
          score += 1.5; // Comfortable fit
        } else if (contextUtilization < 0.6) {
          score += 0.5; // Acceptable fit
        } else if (contextUtilization > 0.8) {
          score -= 2.0; // Penalize models that might struggle
        }
      }

      // Slight bonus for models that support URL context natively (Gemini 2.5 models)
      if (model.includes("gemini-2.5")) {
        score += 0.5;
      }
    }

    // Performance metrics influence (heavily weighted)
    const metrics = this.performanceMetrics.get(model);
    if (metrics && metrics.totalCalls >= 5) {
      // Strong preference for models with good performance history
      score += metrics.successRate * 2.0;
      // Prefer lower latency (significant impact)
      const latencyScore = Math.max(0, 1 - metrics.avgLatency / 2000);
      score += latencyScore * 1.5;
    }

    return score;
  }
}

```

--------------------------------------------------------------------------------
/tests/unit/services/ModelSelectionService.test.vitest.ts:
--------------------------------------------------------------------------------

```typescript
// Using vitest globals - see vitest.config.ts globals: true
import { ModelSelectionService } from "../../../src/services/ModelSelectionService.js";
import {
  ModelConfiguration,
  ModelCapabilitiesMap,
} from "../../../src/types/index.js";

describe("ModelSelectionService", () => {
  let service: ModelSelectionService;
  let mockConfig: ModelConfiguration;

  beforeEach(() => {
    const capabilities: ModelCapabilitiesMap = {
      "gemini-2.5-pro-preview-05-06": {
        textGeneration: true,
        imageInput: true,
        videoInput: true,
        audioInput: true,
        imageGeneration: false,
        videoGeneration: false,
        codeExecution: "excellent",
        complexReasoning: "excellent",
        costTier: "high",
        speedTier: "medium",
        maxTokens: 65536,
        contextWindow: 1048576,
        supportsFunctionCalling: true,
        supportsSystemInstructions: true,
        supportsCaching: true,
      },
      "gemini-2.5-flash-preview-05-20": {
        textGeneration: true,
        imageInput: true,
        videoInput: true,
        audioInput: true,
        imageGeneration: false,
        videoGeneration: false,
        codeExecution: "excellent",
        complexReasoning: "excellent",
        costTier: "medium",
        speedTier: "fast",
        maxTokens: 65536,
        contextWindow: 1048576,
        supportsFunctionCalling: true,
        supportsSystemInstructions: true,
        supportsCaching: true,
      },
      "gemini-2.0-flash": {
        textGeneration: true,
        imageInput: true,
        videoInput: true,
        audioInput: true,
        imageGeneration: false,
        videoGeneration: false,
        codeExecution: "good",
        complexReasoning: "good",
        costTier: "medium",
        speedTier: "fast",
        maxTokens: 8192,
        contextWindow: 1048576,
        supportsFunctionCalling: true,
        supportsSystemInstructions: true,
        supportsCaching: true,
      },
      "gemini-1.5-flash": {
        textGeneration: true,
        imageInput: true,
        videoInput: true,
        audioInput: true,
        imageGeneration: false,
        videoGeneration: false,
        codeExecution: "basic",
        complexReasoning: "basic",
        costTier: "low",
        speedTier: "fast",
        maxTokens: 8192,
        contextWindow: 1000000,
        supportsFunctionCalling: true,
        supportsSystemInstructions: true,
        supportsCaching: true,
      },
      "imagen-3.0-generate-002": {
        textGeneration: false,
        imageInput: false,
        videoInput: false,
        audioInput: false,
        imageGeneration: true,
        videoGeneration: false,
        codeExecution: "none",
        complexReasoning: "none",
        costTier: "medium",
        speedTier: "medium",
        maxTokens: 0,
        contextWindow: 0,
        supportsFunctionCalling: false,
        supportsSystemInstructions: false,
        supportsCaching: false,
      },
      "gemini-2.0-flash-preview-image-generation": {
        textGeneration: true,
        imageInput: true,
        videoInput: false,
        audioInput: false,
        imageGeneration: true,
        videoGeneration: false,
        codeExecution: "basic",
        complexReasoning: "basic",
        costTier: "medium",
        speedTier: "medium",
        maxTokens: 8192,
        contextWindow: 32000,
        supportsFunctionCalling: false,
        supportsSystemInstructions: true,
        supportsCaching: false,
      },
    };

    mockConfig = {
      default: "gemini-2.5-flash-preview-05-20",
      textGeneration: [
        "gemini-2.5-pro-preview-05-06",
        "gemini-2.5-flash-preview-05-20",
        "gemini-2.0-flash",
        "gemini-1.5-flash",
      ],
      imageGeneration: [
        "imagen-3.0-generate-002",
        "gemini-2.0-flash-preview-image-generation",
      ],
      videoGeneration: [],
      codeReview: [
        "gemini-2.5-pro-preview-05-06",
        "gemini-2.5-flash-preview-05-20",
        "gemini-2.0-flash",
      ],
      complexReasoning: [
        "gemini-2.5-pro-preview-05-06",
        "gemini-2.5-flash-preview-05-20",
      ],
      capabilities,
      routing: {
        preferCostEffective: false,
        preferSpeed: false,
        preferQuality: true,
      },
    };

    service = new ModelSelectionService(mockConfig);
  });

  describe("selectOptimalModel", () => {
    it("should select a model for text generation", async () => {
      const model = await service.selectOptimalModel({
        taskType: "text-generation",
        complexityLevel: "simple",
      });

      expect(mockConfig.textGeneration).toContain(model);
    });

    it("should prefer cost-effective models when specified", async () => {
      const model = await service.selectOptimalModel({
        taskType: "text-generation",
        preferCost: true,
      });

      const capabilities = service.getModelCapabilities(model);
      expect(capabilities?.costTier).toBe("low");
    });

    it("should prefer fast models when speed is prioritized", async () => {
      const model = await service.selectOptimalModel({
        taskType: "text-generation",
        preferSpeed: true,
      });

      const capabilities = service.getModelCapabilities(model);
      expect(capabilities?.speedTier).toBe("fast");
    });

    it("should select high-quality models for complex tasks", async () => {
      const model = await service.selectOptimalModel({
        taskType: "reasoning",
        complexityLevel: "complex",
        preferQuality: true,
      });

      const capabilities = service.getModelCapabilities(model);
      expect(capabilities?.complexReasoning).toBe("excellent");
    });

    it("should return fallback model when no candidates match", async () => {
      const model = await service.selectOptimalModel({
        taskType: "text-generation",
        requiredCapabilities: ["imageGeneration"],
        fallbackModel: "gemini-1.5-flash",
      });

      expect(model).toBe("gemini-1.5-flash");
    });

    it("should select image generation models correctly", async () => {
      const model = await service.selectOptimalModel({
        taskType: "image-generation",
      });

      expect(mockConfig.imageGeneration).toContain(model);
      const capabilities = service.getModelCapabilities(model);
      expect(capabilities?.imageGeneration).toBe(true);
    });

    it("should filter models by required capabilities", async () => {
      const model = await service.selectOptimalModel({
        taskType: "text-generation",
        requiredCapabilities: ["supportsFunctionCalling", "supportsCaching"],
      });

      const capabilities = service.getModelCapabilities(model);
      expect(capabilities?.supportsFunctionCalling).toBe(true);
      expect(capabilities?.supportsCaching).toBe(true);
    });
  });

  describe("validateModelForTask", () => {
    it("should validate text generation models", () => {
      expect(
        service.validateModelForTask(
          "gemini-2.5-pro-preview-05-06",
          "text-generation"
        )
      ).toBe(true);
      expect(
        service.validateModelForTask(
          "imagen-3.0-generate-002",
          "text-generation"
        )
      ).toBe(false);
    });

    it("should validate image generation models", () => {
      expect(
        service.validateModelForTask(
          "imagen-3.0-generate-002",
          "image-generation"
        )
      ).toBe(true);
      expect(
        service.validateModelForTask(
          "gemini-2.5-pro-preview-05-06",
          "image-generation"
        )
      ).toBe(false);
    });

    it("should validate code review models", () => {
      expect(
        service.validateModelForTask(
          "gemini-2.5-pro-preview-05-06",
          "code-review"
        )
      ).toBe(true);
      expect(
        service.validateModelForTask("gemini-1.5-flash", "code-review")
      ).toBe(true);
      expect(
        service.validateModelForTask("imagen-3.0-generate-002", "code-review")
      ).toBe(false);
    });

    it("should validate multimodal models", () => {
      expect(
        service.validateModelForTask(
          "gemini-2.5-pro-preview-05-06",
          "multimodal"
        )
      ).toBe(true);
      expect(
        service.validateModelForTask("imagen-3.0-generate-002", "multimodal")
      ).toBe(false);
    });
  });

  describe("updatePerformanceMetrics", () => {
    it("should track performance metrics", () => {
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        1000,
        true
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        1200,
        true
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        800,
        false
      );

      const metrics = service.getPerformanceMetrics();
      const proMetrics = metrics.get("gemini-2.5-pro-preview-05-06");

      expect(proMetrics).toBeDefined();
      expect(proMetrics?.totalCalls).toBe(3);
      expect(proMetrics?.avgLatency).toBe(1000);
      expect(proMetrics?.successRate).toBeCloseTo(0.667, 2);
    });

    it("should influence model selection based on performance", async () => {
      service.updatePerformanceMetrics(
        "gemini-2.5-flash-preview-05-20",
        500,
        true
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-flash-preview-05-20",
        600,
        true
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-flash-preview-05-20",
        400,
        true
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-flash-preview-05-20",
        550,
        true
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-flash-preview-05-20",
        450,
        true
      );

      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        2000,
        false
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        1800,
        false
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        2200,
        false
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        1900,
        false
      );
      service.updatePerformanceMetrics(
        "gemini-2.5-pro-preview-05-06",
        2100,
        false
      );

      const model = await service.selectOptimalModel({
        taskType: "text-generation",
        complexityLevel: "medium",
      });

      expect(model).toBe("gemini-2.5-flash-preview-05-20");
    });
  });

  describe("getSelectionHistory", () => {
    it("should track selection history", async () => {
      await service.selectOptimalModel({ taskType: "text-generation" });
      await service.selectOptimalModel({ taskType: "image-generation" });

      const history = service.getSelectionHistory();
      expect(history).toHaveLength(2);
      expect(history[0].criteria.taskType).toBe("text-generation");
      expect(history[1].criteria.taskType).toBe("image-generation");
    });

    it("should limit history size", async () => {
      for (let i = 0; i < 1200; i++) {
        await service.selectOptimalModel({ taskType: "text-generation" });
      }

      const history = service.getSelectionHistory();
      expect(history.length).toBeLessThanOrEqual(500);
    });

    it("should return limited history when requested", async () => {
      for (let i = 0; i < 10; i++) {
        await service.selectOptimalModel({ taskType: "text-generation" });
      }

      const limitedHistory = service.getSelectionHistory(5);
      expect(limitedHistory).toHaveLength(5);
    });
  });

  describe("isModelAvailable", () => {
    it("should check model availability", () => {
      expect(service.isModelAvailable("gemini-2.5-pro-preview-05-06")).toBe(
        true
      );
      expect(service.isModelAvailable("non-existent-model")).toBe(false);
    });
  });

  describe("getAvailableModels", () => {
    it("should return all available models", () => {
      const models = service.getAvailableModels();
      expect(models).toContain("gemini-2.5-pro-preview-05-06");
      expect(models).toContain("gemini-2.5-flash-preview-05-20");
      expect(models).toContain("gemini-1.5-flash");
      expect(models).toContain("imagen-3.0-generate-002");
    });
  });

  describe("updateConfiguration", () => {
    it("should update configuration and reinitialize cache", () => {
      const newConfig = {
        ...mockConfig,
        textGeneration: ["gemini-2.5-pro-preview-05-06"],
      };

      service.updateConfiguration(newConfig);

      const models = service.getAvailableModels();
      expect(models).toContain("gemini-2.5-pro-preview-05-06");
    });
  });

  describe("error handling", () => {
    it("should handle errors gracefully and return fallback", async () => {
      const corruptedService = new ModelSelectionService({
        ...mockConfig,
        capabilities: {},
      });

      const model = await corruptedService.selectOptimalModel({
        taskType: "text-generation",
        fallbackModel: "fallback-model",
      });

      expect(model).toBe("fallback-model");
    });
  });
});

```
Page 3/6FirstPrevNextLast