This is page 7 of 8. Use http://codebase.md/bsmi021/mcp-gemini-server?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .env.example
├── .eslintignore
├── .eslintrc.json
├── .gitignore
├── .prettierrc.json
├── Dockerfile
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── review-prompt.txt
├── scripts
│ ├── gemini-review.sh
│ └── run-with-health-check.sh
├── smithery.yaml
├── src
│ ├── config
│ │ └── ConfigurationManager.ts
│ ├── createServer.ts
│ ├── index.ts
│ ├── resources
│ │ └── system-prompt.md
│ ├── server.ts
│ ├── services
│ │ ├── ExampleService.ts
│ │ ├── gemini
│ │ │ ├── GeminiCacheService.ts
│ │ │ ├── GeminiChatService.ts
│ │ │ ├── GeminiContentService.ts
│ │ │ ├── GeminiGitDiffService.ts
│ │ │ ├── GeminiPromptTemplates.ts
│ │ │ ├── GeminiTypes.ts
│ │ │ ├── GeminiUrlContextService.ts
│ │ │ ├── GeminiValidationSchemas.ts
│ │ │ ├── GitHubApiService.ts
│ │ │ ├── GitHubUrlParser.ts
│ │ │ └── ModelMigrationService.ts
│ │ ├── GeminiService.ts
│ │ ├── index.ts
│ │ ├── mcp
│ │ │ ├── index.ts
│ │ │ └── McpClientService.ts
│ │ ├── ModelSelectionService.ts
│ │ ├── session
│ │ │ ├── index.ts
│ │ │ ├── InMemorySessionStore.ts
│ │ │ ├── SessionStore.ts
│ │ │ └── SQLiteSessionStore.ts
│ │ └── SessionService.ts
│ ├── tools
│ │ ├── exampleToolParams.ts
│ │ ├── geminiCacheParams.ts
│ │ ├── geminiCacheTool.ts
│ │ ├── geminiChatParams.ts
│ │ ├── geminiChatTool.ts
│ │ ├── geminiCodeReviewParams.ts
│ │ ├── geminiCodeReviewTool.ts
│ │ ├── geminiGenerateContentConsolidatedParams.ts
│ │ ├── geminiGenerateContentConsolidatedTool.ts
│ │ ├── geminiGenerateImageParams.ts
│ │ ├── geminiGenerateImageTool.ts
│ │ ├── geminiGenericParamSchemas.ts
│ │ ├── geminiRouteMessageParams.ts
│ │ ├── geminiRouteMessageTool.ts
│ │ ├── geminiUrlAnalysisTool.ts
│ │ ├── index.ts
│ │ ├── mcpClientParams.ts
│ │ ├── mcpClientTool.ts
│ │ ├── registration
│ │ │ ├── index.ts
│ │ │ ├── registerAllTools.ts
│ │ │ ├── ToolAdapter.ts
│ │ │ └── ToolRegistry.ts
│ │ ├── schemas
│ │ │ ├── BaseToolSchema.ts
│ │ │ ├── CommonSchemas.ts
│ │ │ ├── index.ts
│ │ │ ├── ToolSchemas.ts
│ │ │ └── writeToFileParams.ts
│ │ └── writeToFileTool.ts
│ ├── types
│ │ ├── exampleServiceTypes.ts
│ │ ├── geminiServiceTypes.ts
│ │ ├── gitdiff-parser.d.ts
│ │ ├── googleGenAI.d.ts
│ │ ├── googleGenAITypes.ts
│ │ ├── index.ts
│ │ ├── micromatch.d.ts
│ │ ├── modelcontextprotocol-sdk.d.ts
│ │ ├── node-fetch.d.ts
│ │ └── serverTypes.ts
│ └── utils
│ ├── errors.ts
│ ├── filePathSecurity.ts
│ ├── FileSecurityService.ts
│ ├── geminiErrors.ts
│ ├── healthCheck.ts
│ ├── index.ts
│ ├── logger.ts
│ ├── RetryService.ts
│ ├── ToolError.ts
│ └── UrlSecurityService.ts
├── tests
│ ├── .env.test.example
│ ├── basic-router.test.vitest.ts
│ ├── e2e
│ │ ├── clients
│ │ │ └── mcp-test-client.ts
│ │ ├── README.md
│ │ └── streamableHttpTransport.test.vitest.ts
│ ├── integration
│ │ ├── dummyMcpServerSse.ts
│ │ ├── dummyMcpServerStdio.ts
│ │ ├── geminiRouterIntegration.test.vitest.ts
│ │ ├── mcpClientIntegration.test.vitest.ts
│ │ ├── multiModelIntegration.test.vitest.ts
│ │ └── urlContextIntegration.test.vitest.ts
│ ├── tsconfig.test.json
│ ├── unit
│ │ ├── config
│ │ │ └── ConfigurationManager.multimodel.test.vitest.ts
│ │ ├── server
│ │ │ └── transportLogic.test.vitest.ts
│ │ ├── services
│ │ │ ├── gemini
│ │ │ │ ├── GeminiChatService.test.vitest.ts
│ │ │ │ ├── GeminiGitDiffService.test.vitest.ts
│ │ │ │ ├── geminiImageGeneration.test.vitest.ts
│ │ │ │ ├── GeminiPromptTemplates.test.vitest.ts
│ │ │ │ ├── GeminiUrlContextService.test.vitest.ts
│ │ │ │ ├── GeminiValidationSchemas.test.vitest.ts
│ │ │ │ ├── GitHubApiService.test.vitest.ts
│ │ │ │ ├── GitHubUrlParser.test.vitest.ts
│ │ │ │ └── ThinkingBudget.test.vitest.ts
│ │ │ ├── mcp
│ │ │ │ └── McpClientService.test.vitest.ts
│ │ │ ├── ModelSelectionService.test.vitest.ts
│ │ │ └── session
│ │ │ └── SQLiteSessionStore.test.vitest.ts
│ │ ├── tools
│ │ │ ├── geminiCacheTool.test.vitest.ts
│ │ │ ├── geminiChatTool.test.vitest.ts
│ │ │ ├── geminiCodeReviewTool.test.vitest.ts
│ │ │ ├── geminiGenerateContentConsolidatedTool.test.vitest.ts
│ │ │ ├── geminiGenerateImageTool.test.vitest.ts
│ │ │ ├── geminiRouteMessageTool.test.vitest.ts
│ │ │ ├── mcpClientTool.test.vitest.ts
│ │ │ ├── mcpToolsTests.test.vitest.ts
│ │ │ └── schemas
│ │ │ ├── BaseToolSchema.test.vitest.ts
│ │ │ ├── ToolParamSchemas.test.vitest.ts
│ │ │ └── ToolSchemas.test.vitest.ts
│ │ └── utils
│ │ ├── errors.test.vitest.ts
│ │ ├── FileSecurityService.test.vitest.ts
│ │ ├── FileSecurityService.vitest.ts
│ │ ├── FileSecurityServiceBasics.test.vitest.ts
│ │ ├── healthCheck.test.vitest.ts
│ │ ├── RetryService.test.vitest.ts
│ │ └── UrlSecurityService.test.vitest.ts
│ └── utils
│ ├── assertions.ts
│ ├── debug-error.ts
│ ├── env-check.ts
│ ├── environment.ts
│ ├── error-helpers.ts
│ ├── express-mocks.ts
│ ├── integration-types.ts
│ ├── mock-types.ts
│ ├── test-fixtures.ts
│ ├── test-generators.ts
│ ├── test-setup.ts
│ └── vitest.d.ts
├── tsconfig.json
├── tsconfig.test.json
├── vitest-globals.d.ts
├── vitest.config.ts
└── vitest.setup.ts
```
# Files
--------------------------------------------------------------------------------
/src/services/gemini/GeminiChatService.ts:
--------------------------------------------------------------------------------
```typescript
1 | import {
2 | GoogleGenAI,
3 | GenerateContentResponse,
4 | HarmCategory,
5 | HarmBlockThreshold,
6 | } from "@google/genai";
7 | import { v4 as uuidv4 } from "uuid";
8 | import {
9 | GeminiApiError,
10 | ValidationError as GeminiValidationError,
11 | } from "../../utils/errors.js";
12 | import { logger } from "../../utils/logger.js";
13 | import {
14 | Content,
15 | GenerationConfig,
16 | SafetySetting,
17 | Tool,
18 | ToolConfig,
19 | FunctionCall,
20 | ChatSession,
21 | ThinkingConfig,
22 | } from "./GeminiTypes.js";
23 | import { RouteMessageParams } from "../GeminiService.js";
24 | import { validateRouteMessageParams } from "./GeminiValidationSchemas.js";
25 | import { ZodError } from "zod";
26 |
27 | /**
28 | * Maps reasoningEffort string values to token budgets
29 | */
30 | const REASONING_EFFORT_MAP: Record<string, number> = {
31 | none: 0,
32 | low: 1024, // 1K tokens
33 | medium: 8192, // 8K tokens
34 | high: 24576, // 24K tokens
35 | };
36 |
37 | /**
38 | * Helper function to process thinkingConfig, mapping reasoningEffort to thinkingBudget if needed
39 | * @param thinkingConfig The thinking configuration object to process
40 | * @returns Processed thinking configuration
41 | */
42 | function processThinkingConfig(
43 | thinkingConfig?: ThinkingConfig
44 | ): ThinkingConfig | undefined {
45 | if (!thinkingConfig) return undefined;
46 |
47 | const processedConfig = { ...thinkingConfig };
48 |
49 | // Map reasoningEffort to thinkingBudget if provided
50 | if (
51 | processedConfig.reasoningEffort &&
52 | REASONING_EFFORT_MAP[processedConfig.reasoningEffort] !== undefined
53 | ) {
54 | processedConfig.thinkingBudget =
55 | REASONING_EFFORT_MAP[processedConfig.reasoningEffort];
56 | logger.debug(
57 | `Mapped reasoning effort '${processedConfig.reasoningEffort}' to thinking budget: ${processedConfig.thinkingBudget} tokens`
58 | );
59 | }
60 |
61 | return processedConfig;
62 | }
63 |
64 | /**
65 | * Helper function to transform validated safety settings to use actual enum values
66 | * @param safetySettings The validated safety settings from Zod
67 | * @returns Safety settings with actual enum values
68 | */
69 | function transformSafetySettings(
70 | safetySettings?: Array<{ category: string; threshold: string }>
71 | ): SafetySetting[] | undefined {
72 | if (!safetySettings) return undefined;
73 |
74 | return safetySettings.map((setting) => ({
75 | category: HarmCategory[setting.category as keyof typeof HarmCategory],
76 | threshold:
77 | HarmBlockThreshold[setting.threshold as keyof typeof HarmBlockThreshold],
78 | }));
79 | }
80 |
81 | /**
82 | * Interface for the parameters of the startChatSession method
83 | */
84 | export interface StartChatParams {
85 | modelName?: string;
86 | history?: Content[];
87 | generationConfig?: GenerationConfig;
88 | safetySettings?: SafetySetting[];
89 | tools?: Tool[];
90 | systemInstruction?: Content | string;
91 | cachedContentName?: string;
92 | }
93 |
94 | /**
95 | * Interface for the parameters of the sendMessageToSession method
96 | */
97 | export interface SendMessageParams {
98 | sessionId: string;
99 | message: string;
100 | generationConfig?: GenerationConfig;
101 | safetySettings?: SafetySetting[];
102 | tools?: Tool[];
103 | toolConfig?: ToolConfig;
104 | cachedContentName?: string;
105 | }
106 |
107 | /**
108 | * Interface for the parameters of the sendFunctionResultToSession method
109 | */
110 | export interface SendFunctionResultParams {
111 | sessionId: string;
112 | functionResponse: string;
113 | functionCall?: FunctionCall;
114 | }
115 |
116 | /**
117 | * Service for handling chat session related operations for the Gemini service.
118 | * Manages chat sessions, sending messages, and handling function responses.
119 | */
120 | export class GeminiChatService {
121 | private genAI: GoogleGenAI;
122 | private defaultModelName?: string;
123 | private chatSessions: Map<string, ChatSession> = new Map();
124 |
125 | /**
126 | * Creates a new instance of the GeminiChatService.
127 | * @param genAI The GoogleGenAI instance to use for API calls
128 | * @param defaultModelName Optional default model name to use if not specified in method calls
129 | */
130 | constructor(genAI: GoogleGenAI, defaultModelName?: string) {
131 | this.genAI = genAI;
132 | this.defaultModelName = defaultModelName;
133 | }
134 |
135 | /**
136 | * Starts a new stateful chat session with the Gemini model.
137 | *
138 | * @param params Parameters for starting a chat session
139 | * @returns A unique session ID to identify this chat session
140 | */
141 | public startChatSession(params: StartChatParams = {}): string {
142 | const {
143 | modelName,
144 | history,
145 | generationConfig,
146 | safetySettings,
147 | tools,
148 | systemInstruction,
149 | cachedContentName,
150 | } = params;
151 |
152 | const effectiveModelName = modelName ?? this.defaultModelName;
153 | if (!effectiveModelName) {
154 | throw new GeminiApiError(
155 | "Model name must be provided either as a parameter or via the GOOGLE_GEMINI_MODEL environment variable."
156 | );
157 | }
158 |
159 | // Process systemInstruction if it's a string
160 | let formattedSystemInstruction: Content | undefined;
161 | if (systemInstruction) {
162 | if (typeof systemInstruction === "string") {
163 | formattedSystemInstruction = {
164 | parts: [{ text: systemInstruction }],
165 | };
166 | } else {
167 | formattedSystemInstruction = systemInstruction;
168 | }
169 | }
170 |
171 | try {
172 | // Create the chat session using the models API
173 | logger.debug(`Creating chat session with model: ${effectiveModelName}`);
174 |
175 | // Create chat configuration for v0.10.0
176 | const chatConfig: {
177 | history?: Content[];
178 | generationConfig?: GenerationConfig;
179 | safetySettings?: SafetySetting[];
180 | tools?: Tool[];
181 | systemInstruction?: Content;
182 | cachedContent?: string;
183 | thinkingConfig?: ThinkingConfig;
184 | } = {};
185 |
186 | // Add optional parameters if provided
187 | if (history && Array.isArray(history)) {
188 | chatConfig.history = history;
189 | }
190 | if (generationConfig) {
191 | chatConfig.generationConfig = generationConfig;
192 |
193 | // Extract thinking config if it exists within generation config
194 | if (generationConfig.thinkingConfig) {
195 | chatConfig.thinkingConfig = processThinkingConfig(
196 | generationConfig.thinkingConfig
197 | );
198 | }
199 | }
200 | if (safetySettings && Array.isArray(safetySettings)) {
201 | chatConfig.safetySettings = safetySettings;
202 | }
203 | if (tools && Array.isArray(tools)) {
204 | chatConfig.tools = tools;
205 | }
206 | if (formattedSystemInstruction) {
207 | chatConfig.systemInstruction = formattedSystemInstruction;
208 | }
209 | if (cachedContentName) {
210 | chatConfig.cachedContent = cachedContentName;
211 | }
212 |
213 | // Generate a unique session ID
214 | const sessionId = uuidv4();
215 |
216 | // Create a mock chat session for storing configuration
217 | // In v0.10.0, we don't have direct chat session objects,
218 | // but we'll store the configuration to use for future messages
219 | this.chatSessions.set(sessionId, {
220 | model: effectiveModelName,
221 | config: chatConfig,
222 | history: history || [],
223 | });
224 |
225 | logger.info(
226 | `Chat session created: ${sessionId} using model ${effectiveModelName}`
227 | );
228 |
229 | return sessionId;
230 | } catch (error: unknown) {
231 | logger.error("Error creating chat session:", error);
232 | throw new GeminiApiError(
233 | `Failed to create chat session: ${(error as Error).message}`,
234 | error
235 | );
236 | }
237 | }
238 |
239 | /**
240 | * Sends a message to an existing chat session.
241 | * Uses the generated content API directly since we're managing chat state ourselves.
242 | *
243 | * @param params Parameters for sending a message
244 | * @returns Promise resolving to the chat response
245 | */
246 | public async sendMessageToSession(
247 | params: SendMessageParams
248 | ): Promise<GenerateContentResponse> {
249 | const {
250 | sessionId,
251 | message,
252 | generationConfig,
253 | safetySettings,
254 | tools,
255 | toolConfig,
256 | cachedContentName,
257 | } = params;
258 |
259 | // Get the chat session
260 | const session = this.chatSessions.get(sessionId);
261 | if (!session) {
262 | throw new GeminiApiError(`Chat session not found: ${sessionId}`);
263 | }
264 |
265 | // Create user content from the message
266 | const userContent: Content = {
267 | role: "user",
268 | parts: [{ text: message }],
269 | };
270 |
271 | // Add the user message to the session history
272 | session.history.push(userContent);
273 |
274 | try {
275 | // Prepare the request configuration
276 | const requestConfig: {
277 | model: string;
278 | contents: Content[];
279 | generationConfig?: GenerationConfig;
280 | safetySettings?: SafetySetting[];
281 | tools?: Tool[];
282 | toolConfig?: ToolConfig;
283 | systemInstruction?: Content;
284 | cachedContent?: string;
285 | thinkingConfig?: ThinkingConfig;
286 | } = {
287 | model: session.model,
288 | contents: session.history,
289 | };
290 |
291 | // Add configuration from the original session configuration
292 | if (session.config.systemInstruction) {
293 | requestConfig.systemInstruction = session.config.systemInstruction;
294 | }
295 |
296 | // Override with any per-message configuration options
297 | if (generationConfig) {
298 | requestConfig.generationConfig = generationConfig;
299 |
300 | // Extract thinking config if it exists within generation config
301 | if (generationConfig.thinkingConfig) {
302 | requestConfig.thinkingConfig = processThinkingConfig(
303 | generationConfig.thinkingConfig
304 | );
305 | }
306 | } else if (session.config.generationConfig) {
307 | requestConfig.generationConfig = session.config.generationConfig;
308 |
309 | // Use thinking config from session if available
310 | if (session.config.thinkingConfig) {
311 | requestConfig.thinkingConfig = processThinkingConfig(
312 | session.config.thinkingConfig
313 | );
314 | }
315 | }
316 |
317 | if (safetySettings) {
318 | requestConfig.safetySettings = safetySettings;
319 | } else if (session.config.safetySettings) {
320 | requestConfig.safetySettings = session.config.safetySettings;
321 | }
322 |
323 | if (tools) {
324 | requestConfig.tools = tools;
325 | } else if (session.config.tools) {
326 | requestConfig.tools = session.config.tools;
327 | }
328 |
329 | if (toolConfig) {
330 | requestConfig.toolConfig = toolConfig;
331 | }
332 |
333 | if (cachedContentName) {
334 | requestConfig.cachedContent = cachedContentName;
335 | } else if (session.config.cachedContent) {
336 | requestConfig.cachedContent = session.config.cachedContent;
337 | }
338 |
339 | logger.debug(
340 | `Sending message to session ${sessionId} using model ${session.model}`
341 | );
342 |
343 | // Call the generateContent API
344 | const response = await this.genAI.models.generateContent(requestConfig);
345 |
346 | // Process the response
347 | if (response.candidates && response.candidates.length > 0) {
348 | const assistantMessage = response.candidates[0].content;
349 | if (assistantMessage) {
350 | // Add the assistant response to the session history
351 | session.history.push(assistantMessage);
352 | }
353 | }
354 |
355 | return response;
356 | } catch (error: unknown) {
357 | logger.error(`Error sending message to session ${sessionId}:`, error);
358 | throw new GeminiApiError(
359 | `Failed to send message to session ${sessionId}: ${(error as Error).message}`,
360 | error
361 | );
362 | }
363 | }
364 |
365 | /**
366 | * Sends the result of a function call back to the chat session.
367 | *
368 | * @param params Parameters for sending a function result
369 | * @returns Promise resolving to the chat response
370 | */
371 | public async sendFunctionResultToSession(
372 | params: SendFunctionResultParams
373 | ): Promise<GenerateContentResponse> {
374 | const { sessionId, functionResponse, functionCall } = params;
375 |
376 | // Get the chat session
377 | const session = this.chatSessions.get(sessionId);
378 | if (!session) {
379 | throw new GeminiApiError(`Chat session not found: ${sessionId}`);
380 | }
381 |
382 | // Create function response message
383 | const responseContent: Content = {
384 | role: "function",
385 | parts: [
386 | {
387 | functionResponse: {
388 | name: functionCall?.name || "function",
389 | response: { content: functionResponse },
390 | },
391 | },
392 | ],
393 | };
394 |
395 | // Add the function response to the session history
396 | session.history.push(responseContent);
397 |
398 | try {
399 | // Prepare the request configuration
400 | const requestConfig: {
401 | model: string;
402 | contents: Content[];
403 | generationConfig?: GenerationConfig;
404 | safetySettings?: SafetySetting[];
405 | tools?: Tool[];
406 | toolConfig?: ToolConfig;
407 | systemInstruction?: Content;
408 | cachedContent?: string;
409 | thinkingConfig?: ThinkingConfig;
410 | } = {
411 | model: session.model,
412 | contents: session.history,
413 | };
414 |
415 | // Add configuration from the session
416 | if (session.config.systemInstruction) {
417 | requestConfig.systemInstruction = session.config.systemInstruction;
418 | }
419 |
420 | if (session.config.generationConfig) {
421 | requestConfig.generationConfig = session.config.generationConfig;
422 |
423 | // Use thinking config from session if available
424 | if (session.config.thinkingConfig) {
425 | requestConfig.thinkingConfig = processThinkingConfig(
426 | session.config.thinkingConfig
427 | );
428 | }
429 | }
430 |
431 | if (session.config.safetySettings) {
432 | requestConfig.safetySettings = session.config.safetySettings;
433 | }
434 |
435 | if (session.config.tools) {
436 | requestConfig.tools = session.config.tools;
437 | }
438 |
439 | if (session.config.cachedContent) {
440 | requestConfig.cachedContent = session.config.cachedContent;
441 | }
442 |
443 | logger.debug(
444 | `Sending function result to session ${sessionId} using model ${session.model}`
445 | );
446 |
447 | // Call the generateContent API directly
448 | const response = await this.genAI.models.generateContent(requestConfig);
449 |
450 | // Process the response
451 | if (response.candidates && response.candidates.length > 0) {
452 | const assistantMessage = response.candidates[0].content;
453 | if (assistantMessage) {
454 | // Add the assistant response to the session history
455 | session.history.push(assistantMessage);
456 | }
457 | }
458 |
459 | return response;
460 | } catch (error: unknown) {
461 | logger.error(
462 | `Error sending function result to session ${sessionId}:`,
463 | error
464 | );
465 | throw new GeminiApiError(
466 | `Failed to send function result to session ${sessionId}: ${(error as Error).message}`,
467 | error
468 | );
469 | }
470 | }
471 |
472 | /**
473 | * Routes a message to the most appropriate model based on a routing prompt.
474 | * Uses a two-step process:
475 | * 1. First asks a routing model to determine which model to use
476 | * 2. Then sends the original message to the chosen model
477 | *
478 | * @param params Parameters for routing a message
479 | * @returns Promise resolving to the chat response from the chosen model
480 | * @throws {GeminiApiError} If routing fails or all models are unavailable
481 | */
482 | public async routeMessage(
483 | params: RouteMessageParams
484 | ): Promise<{ response: GenerateContentResponse; chosenModel: string }> {
485 | let validatedParams;
486 |
487 | try {
488 | // Validate all parameters using Zod schema
489 | validatedParams = validateRouteMessageParams(params);
490 | } catch (validationError) {
491 | if (validationError instanceof ZodError) {
492 | const fieldErrors = validationError.errors
493 | .map((err) => `${err.path.join(".")}: ${err.message}`)
494 | .join(", ");
495 | throw new GeminiValidationError(
496 | `Invalid parameters for message routing: ${fieldErrors}`,
497 | validationError.errors[0]?.path.join(".")
498 | );
499 | }
500 | throw validationError;
501 | }
502 |
503 | const {
504 | message,
505 | models,
506 | routingPrompt,
507 | defaultModel,
508 | generationConfig,
509 | safetySettings,
510 | systemInstruction,
511 | } = validatedParams;
512 |
513 | try {
514 | // Use either a specific routing prompt or a default one
515 | const effectiveRoutingPrompt =
516 | routingPrompt ||
517 | `You are a sophisticated model router. Your task is to analyze the following message and determine which AI model would be best suited to handle it. Choose exactly one model from this list: ${models.join(", ")}. Respond with ONLY the name of the chosen model, nothing else.`;
518 |
519 | // Step 1: Determine the appropriate model using routing prompt
520 | // For routing decisions, we'll use a low temperature to ensure deterministic routing
521 | const routingConfig = {
522 | model: models[0], // Use the first model as the router by default
523 | contents: [
524 | {
525 | role: "user",
526 | parts: [
527 | {
528 | text: `${effectiveRoutingPrompt}\n\nUser message: "${message}"`,
529 | },
530 | ],
531 | },
532 | ],
533 | generationConfig: {
534 | temperature: 0.2,
535 | maxOutputTokens: 20, // Keep it short, we just need the model name
536 | ...generationConfig,
537 | },
538 | safetySettings: transformSafetySettings(safetySettings),
539 | };
540 |
541 | // If system instruction is provided, add it to the routing request
542 | if (systemInstruction) {
543 | if (typeof systemInstruction === "string") {
544 | routingConfig.contents.unshift({
545 | role: "system" as const,
546 | parts: [{ text: systemInstruction }],
547 | });
548 | } else {
549 | const formattedInstruction = {
550 | ...systemInstruction,
551 | role: systemInstruction.role || ("system" as const),
552 | };
553 | routingConfig.contents.unshift(
554 | formattedInstruction as { role: string; parts: { text: string }[] }
555 | );
556 | }
557 | }
558 |
559 | logger.debug(`Routing message using model ${routingConfig.model}`);
560 |
561 | // Send the routing request
562 | const routingResponse =
563 | await this.genAI.models.generateContent(routingConfig);
564 |
565 | if (!routingResponse?.text) {
566 | throw new GeminiApiError("Routing model did not return any text");
567 | }
568 |
569 | // Extract the chosen model from the routing response
570 | // Normalize text by removing whitespace and checking for exact matches
571 | const routingResponseText = routingResponse.text.trim();
572 | const chosenModel =
573 | models.find((model) => routingResponseText.includes(model)) ||
574 | defaultModel;
575 |
576 | if (!chosenModel) {
577 | throw new GeminiApiError(
578 | `Routing failed: couldn't identify a valid model from response "${routingResponseText}"`
579 | );
580 | }
581 |
582 | logger.info(
583 | `Routing complete. Selected model: ${chosenModel} for message`
584 | );
585 |
586 | // Step 2: Send the original message to the chosen model
587 | const requestConfig: {
588 | model: string;
589 | contents: Content[];
590 | generationConfig?: GenerationConfig;
591 | safetySettings?: SafetySetting[];
592 | thinkingConfig?: ThinkingConfig;
593 | } = {
594 | model: chosenModel,
595 | contents: [
596 | {
597 | role: "user",
598 | parts: [{ text: message }],
599 | },
600 | ],
601 | generationConfig: generationConfig,
602 | safetySettings: transformSafetySettings(safetySettings),
603 | };
604 |
605 | // Extract thinking config if it exists within generation config
606 | if (generationConfig?.thinkingConfig) {
607 | requestConfig.thinkingConfig = processThinkingConfig(
608 | generationConfig.thinkingConfig
609 | );
610 | }
611 |
612 | // If system instruction is provided, add it to the final request
613 | if (systemInstruction) {
614 | if (typeof systemInstruction === "string") {
615 | requestConfig.contents.unshift({
616 | role: "system" as const,
617 | parts: [{ text: systemInstruction }],
618 | });
619 | } else {
620 | const formattedInstruction = {
621 | ...systemInstruction,
622 | role: systemInstruction.role || ("system" as const),
623 | };
624 | requestConfig.contents.unshift(
625 | formattedInstruction as { role: string; parts: { text: string }[] }
626 | );
627 | }
628 | }
629 |
630 | logger.debug(`Sending routed message to model ${chosenModel}`);
631 |
632 | // Call the generateContent API with the chosen model
633 | const response = await this.genAI.models.generateContent(requestConfig);
634 |
635 | return {
636 | response,
637 | chosenModel,
638 | };
639 | } catch (error: unknown) {
640 | logger.error(`Error routing message: ${(error as Error).message}`, error);
641 | throw new GeminiApiError(
642 | `Failed to route message: ${(error as Error).message}`,
643 | error
644 | );
645 | }
646 | }
647 | }
648 |
```
--------------------------------------------------------------------------------
/src/services/gemini/GeminiGitDiffService.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { GoogleGenAI } from "@google/genai";
2 | import { logger } from "../../utils/logger.js";
3 | import {
4 | GeminiModelError,
5 | GeminiValidationError,
6 | mapGeminiError,
7 | } from "../../utils/geminiErrors.js";
8 | import {
9 | Content,
10 | GenerationConfig,
11 | SafetySetting,
12 | Tool,
13 | } from "./GeminiTypes.js";
14 | import gitdiffParser from "gitdiff-parser";
15 | import micromatch from "micromatch";
16 | import {
17 | getReviewTemplate,
18 | processTemplate,
19 | getFocusInstructions,
20 | } from "./GeminiPromptTemplates.js";
21 |
22 | // Define interface for gitdiff-parser return type
23 | interface GitDiffParserFile {
24 | oldPath: string;
25 | newPath: string;
26 | oldRevision: string;
27 | newRevision: string;
28 | hunks: Array<{
29 | content: string;
30 | oldStart: number;
31 | newStart: number;
32 | oldLines: number;
33 | newLines: number;
34 | changes: Array<{
35 | content: string;
36 | type: "insert" | "delete" | "normal";
37 | lineNumber?: number;
38 | oldLineNumber?: number;
39 | newLineNumber?: number;
40 | }>;
41 | }>;
42 | isBinary?: boolean;
43 | oldEndingNewLine?: boolean;
44 | newEndingNewLine?: boolean;
45 | oldMode?: string;
46 | newMode?: string;
47 | similarity?: number;
48 | }
49 |
50 | // Define our interface matching the original GoogleGenAI interface
51 | interface GenerativeModel {
52 | generateContent(options: { contents: Content[] }): Promise<{
53 | response: {
54 | text(): string;
55 | };
56 | }>;
57 | generateContentStream(options: { contents: Content[] }): Promise<{
58 | stream: AsyncGenerator<{
59 | text(): string;
60 | }>;
61 | }>;
62 | startChat(options?: {
63 | history?: Content[];
64 | generationConfig?: GenerationConfig;
65 | safetySettings?: SafetySetting[];
66 | tools?: Tool[];
67 | systemInstruction?: Content;
68 | cachedContent?: string;
69 | }): {
70 | sendMessage(text: string): Promise<{ response: { text(): string } }>;
71 | sendMessageStream(
72 | text: string
73 | ): Promise<{ stream: AsyncGenerator<{ text(): string }> }>;
74 | getHistory(): Content[];
75 | };
76 | generateImages(params: {
77 | prompt: string;
78 | safetySettings?: SafetySetting[];
79 | [key: string]: unknown;
80 | }): Promise<{
81 | images?: Array<{ data?: string; mimeType?: string }>;
82 | promptSafetyMetadata?: {
83 | blocked?: boolean;
84 | safetyRatings?: Array<{ category: string; probability: string }>;
85 | };
86 | }>;
87 | }
88 |
89 | // Define interface for GoogleGenAI with getGenerativeModel method
90 | interface ExtendedGoogleGenAI extends GoogleGenAI {
91 | getGenerativeModel(options: {
92 | model: string;
93 | generationConfig?: GenerationConfig;
94 | safetySettings?: SafetySetting[];
95 | }): GenerativeModel;
96 | }
97 |
98 | /**
99 | * Interface for parsed git diff files
100 | */
101 | interface ParsedDiffFile {
102 | oldPath: string;
103 | newPath: string;
104 | oldRevision: string;
105 | newRevision: string;
106 | hunks: Array<{
107 | content: string;
108 | oldStart: number;
109 | newStart: number;
110 | oldLines: number;
111 | newLines: number;
112 | changes: Array<{
113 | content: string;
114 | type: "insert" | "delete" | "normal";
115 | lineNumber?: number;
116 | oldLineNumber?: number;
117 | newLineNumber?: number;
118 | }>;
119 | }>;
120 | isBinary?: boolean;
121 | type: "add" | "delete" | "modify" | "rename";
122 | oldEndingNewLine?: boolean;
123 | newEndingNewLine?: boolean;
124 | oldMode?: string;
125 | newMode?: string;
126 | similarity?: number;
127 | }
128 |
129 | /**
130 | * Options for processing git diffs
131 | */
132 | interface DiffProcessingOptions {
133 | maxFilesToInclude?: number;
134 | excludePatterns?: string[];
135 | prioritizeFiles?: string[];
136 | includeContextLines?: number;
137 | maxDiffSize?: number;
138 | }
139 |
140 | /**
141 | * Parameters for reviewing git diffs
142 | */
143 | export interface GitDiffReviewParams {
144 | diffContent: string;
145 | modelName?: string;
146 | reviewFocus?:
147 | | "security"
148 | | "performance"
149 | | "architecture"
150 | | "bugs"
151 | | "general";
152 | repositoryContext?: string;
153 | diffOptions?: DiffProcessingOptions;
154 | generationConfig?: GenerationConfig;
155 | safetySettings?: SafetySetting[];
156 | systemInstruction?: Content | string;
157 | reasoningEffort?: "none" | "low" | "medium" | "high";
158 | customPrompt?: string;
159 | }
160 |
161 | /**
162 | * Service for processing and analyzing git diffs using Gemini models
163 | */
164 | export class GeminiGitDiffService {
165 | private genAI: ExtendedGoogleGenAI;
166 | private defaultModelName?: string;
167 | private maxDiffSizeBytes: number;
168 | private defaultExcludePatterns: string[];
169 |
170 | /**
171 | * Creates a new instance of GeminiGitDiffService
172 | *
173 | * @param genAI The GoogleGenAI instance
174 | * @param defaultModelName Optional default model name
175 | * @param maxDiffSizeBytes Maximum allowed size for diff content in bytes
176 | * @param defaultExcludePatterns Default patterns to exclude from diff analysis
177 | * @param defaultThinkingBudget Optional default thinking budget in tokens (0-24576)
178 | */
179 | constructor(
180 | genAI: ExtendedGoogleGenAI,
181 | defaultModelName?: string,
182 | maxDiffSizeBytes: number = 1024 * 1024, // 1MB default
183 | defaultExcludePatterns: string[] = [
184 | "package-lock.json",
185 | "yarn.lock",
186 | "*.min.js",
187 | "*.min.css",
188 | "node_modules/**",
189 | "dist/**",
190 | "build/**",
191 | "*.lock",
192 | "**/*.map",
193 | ],
194 | private defaultThinkingBudget?: number
195 | ) {
196 | this.genAI = genAI;
197 | this.defaultModelName = defaultModelName;
198 | this.maxDiffSizeBytes = maxDiffSizeBytes;
199 | this.defaultExcludePatterns = defaultExcludePatterns;
200 | }
201 |
202 | /**
203 | * Parse raw git diff content into a structured format using gitdiff-parser
204 | *
205 | * @param diffContent Raw git diff content as string
206 | * @returns Array of parsed diff files with additional type information
207 | * @throws GeminiValidationError if diff parsing fails
208 | */
209 | private async parseGitDiff(diffContent: string): Promise<ParsedDiffFile[]> {
210 | try {
211 | // Check diff size limits
212 | if (diffContent.length > this.maxDiffSizeBytes) {
213 | throw new GeminiValidationError(
214 | `Diff content exceeds maximum size (${this.maxDiffSizeBytes} bytes)`,
215 | "diffContent"
216 | );
217 | }
218 |
219 | // Parse using gitdiff-parser
220 | // The gitdiff-parser module doesn't export types properly, but we know its structure
221 | const parsedFiles = (
222 | gitdiffParser as { parse: (diffStr: string) => GitDiffParserFile[] }
223 | ).parse(diffContent);
224 |
225 | // Extend with additional type information
226 | return parsedFiles.map((file) => {
227 | // Determine file type based on paths and changes
228 | let type: "add" | "delete" | "modify" | "rename" = "modify";
229 |
230 | if (file.oldPath === "/dev/null") {
231 | type = "add";
232 | } else if (file.newPath === "/dev/null") {
233 | type = "delete";
234 | } else if (file.oldPath !== file.newPath) {
235 | type = "rename";
236 | } else {
237 | type = "modify";
238 | }
239 |
240 | return {
241 | ...file,
242 | type,
243 | };
244 | });
245 | } catch (error: unknown) {
246 | if (error instanceof GeminiValidationError) {
247 | throw error;
248 | }
249 |
250 | logger.error("Failed to parse git diff:", error);
251 | throw new GeminiValidationError(
252 | "Failed to parse git diff content. Ensure it's valid output from git diff.",
253 | "diffContent"
254 | );
255 | }
256 | }
257 |
258 | /**
259 | * Prioritize and filter diff content based on importance using micromatch
260 | *
261 | * @param parsedDiff Array of parsed diff files
262 | * @param options Options for prioritization and filtering
263 | * @returns Filtered and prioritized diff files
264 | */
265 | private filterAndPrioritizeDiff(
266 | parsedDiff: ParsedDiffFile[],
267 | options: DiffProcessingOptions = {}
268 | ): ParsedDiffFile[] {
269 | let result = [...parsedDiff];
270 |
271 | // Apply exclude patterns
272 | const excludePatterns = [...this.defaultExcludePatterns];
273 | if (options.excludePatterns && options.excludePatterns.length > 0) {
274 | excludePatterns.push(...options.excludePatterns);
275 | }
276 |
277 | if (excludePatterns.length > 0) {
278 | // Use micromatch for glob pattern matching
279 | result = result.filter((file) => {
280 | // For each file path, check if it matches any exclude pattern
281 | return !micromatch.isMatch(file.newPath, excludePatterns);
282 | });
283 | }
284 |
285 | // Apply priority patterns if specified
286 | if (options.prioritizeFiles && options.prioritizeFiles.length > 0) {
287 | // Score files based on prioritization patterns
288 | const scoredFiles = result.map((file) => {
289 | // Calculate a priority score based on matching patterns
290 | // Higher score = higher priority
291 | const priorityScore = options.prioritizeFiles!.reduce(
292 | (score, pattern) => {
293 | // If file path matches the pattern, increase its score
294 | if (micromatch.isMatch(file.newPath, pattern)) {
295 | return score + 1;
296 | }
297 | return score;
298 | },
299 | 0
300 | );
301 |
302 | return { file, priorityScore };
303 | });
304 |
305 | // Sort by priority score (descending)
306 | scoredFiles.sort((a, b) => b.priorityScore - a.priorityScore);
307 |
308 | // Extract the sorted files
309 | result = scoredFiles.map((item) => item.file);
310 | }
311 |
312 | // Filter to max files if specified
313 | if (
314 | options.maxFilesToInclude &&
315 | options.maxFilesToInclude > 0 &&
316 | result.length > options.maxFilesToInclude
317 | ) {
318 | // Take only the specified number of files (already sorted by priority if applicable)
319 | result = result.slice(0, options.maxFilesToInclude);
320 | }
321 |
322 | return result;
323 | }
324 |
325 | /**
326 | * Generate a review prompt for the Gemini model based on the processed diff
327 | *
328 | * @param parsedDiff Processed diff files
329 | * @param repositoryContext Optional context about the repository
330 | * @param reviewFocus Optional focus area for the review
331 | * @returns Formatted prompt string
332 | */
333 | private generateReviewPrompt(
334 | parsedDiff: ParsedDiffFile[],
335 | repositoryContext?: string,
336 | reviewFocus:
337 | | "security"
338 | | "performance"
339 | | "architecture"
340 | | "bugs"
341 | | "general" = "general"
342 | ): string {
343 | // Create file summary
344 | const fileSummary = parsedDiff
345 | .map((file) => {
346 | const hunksCount = file.hunks.length;
347 | const addedLines = file.hunks.reduce((count, hunk) => {
348 | return (
349 | count +
350 | hunk.changes.filter((change) => change.type === "insert").length
351 | );
352 | }, 0);
353 | const removedLines = file.hunks.reduce((count, hunk) => {
354 | return (
355 | count +
356 | hunk.changes.filter((change) => change.type === "delete").length
357 | );
358 | }, 0);
359 |
360 | return `- ${file.newPath}: ${hunksCount} chunk(s), +${addedLines} -${removedLines} lines`;
361 | })
362 | .join("\n");
363 |
364 | // Generate diff content with context
365 | let diffContent = "";
366 | for (const file of parsedDiff) {
367 | diffContent += `\n\nFile: ${file.newPath}\n`;
368 |
369 | for (const hunk of file.hunks) {
370 | diffContent += `@@ -${hunk.oldStart},${hunk.oldLines} +${hunk.newStart},${hunk.newLines} @@\n`;
371 | diffContent += hunk.changes
372 | .map((change) => {
373 | if (change.type === "insert") {
374 | return `+${change.content}`;
375 | } else if (change.type === "delete") {
376 | return `-${change.content}`;
377 | } else {
378 | return ` ${change.content}`;
379 | }
380 | })
381 | .join("\n");
382 | }
383 | }
384 |
385 | // Format repository context if provided
386 | const formattedContext = repositoryContext
387 | ? `Repository context:\n${repositoryContext}`
388 | : "";
389 |
390 | // Include file summary in repository context
391 | const fullContext = formattedContext
392 | ? `${formattedContext}\n\nSummary of changes:\n${fileSummary}`
393 | : `Summary of changes:\n${fileSummary}`;
394 |
395 | // Get the appropriate template based on review focus
396 | const template = getReviewTemplate(reviewFocus);
397 |
398 | // Process the template with the context and diff content
399 | return processTemplate(template, {
400 | repositoryContext: fullContext,
401 | diffContent,
402 | focusInstructions: getFocusInstructions(reviewFocus),
403 | });
404 | }
405 |
406 | /**
407 | * Review a git diff and generate analysis using Gemini models
408 | *
409 | * @param params Parameters for the review operation
410 | * @returns Promise resolving to review text
411 | */
412 | public async reviewDiff(params: GitDiffReviewParams): Promise<string> {
413 | try {
414 | const {
415 | diffContent,
416 | modelName,
417 | reviewFocus = "general",
418 | repositoryContext,
419 | diffOptions = {},
420 | generationConfig = {},
421 | safetySettings,
422 | systemInstruction,
423 | reasoningEffort = "medium",
424 | customPrompt,
425 | } = params;
426 |
427 | // Validate input
428 | if (!diffContent || diffContent.trim().length === 0) {
429 | throw new GeminiValidationError(
430 | "Diff content is required",
431 | "diffContent"
432 | );
433 | }
434 |
435 | // Parse the diff
436 | const parsedDiff = await this.parseGitDiff(diffContent);
437 |
438 | // Filter and prioritize diff content
439 | const processedDiff = this.filterAndPrioritizeDiff(
440 | parsedDiff,
441 | diffOptions
442 | );
443 |
444 | if (processedDiff.length === 0) {
445 | return "No files to review after applying filters.";
446 | }
447 |
448 | // Generate the review prompt
449 | let prompt: string;
450 | if (customPrompt) {
451 | // Use custom prompt if provided
452 | prompt = customPrompt;
453 | // Add the diff content to the custom prompt
454 | prompt += `\n\nAnalyze the following git diff:\n\`\`\`diff\n`;
455 |
456 | // Format diff content for the prompt
457 | for (const file of processedDiff) {
458 | prompt += `\n\nFile: ${file.newPath}\n`;
459 |
460 | for (const hunk of file.hunks) {
461 | prompt += `@@ -${hunk.oldStart},${hunk.oldLines} +${hunk.newStart},${hunk.newLines} @@\n`;
462 | prompt += hunk.changes
463 | .map((change) => {
464 | if (change.type === "insert") {
465 | return `+${change.content}`;
466 | } else if (change.type === "delete") {
467 | return `-${change.content}`;
468 | } else {
469 | return ` ${change.content}`;
470 | }
471 | })
472 | .join("\n");
473 | }
474 | }
475 |
476 | prompt += `\n\`\`\``;
477 | } else {
478 | // Use the standard prompt generator
479 | prompt = this.generateReviewPrompt(
480 | processedDiff,
481 | repositoryContext,
482 | reviewFocus
483 | );
484 | }
485 |
486 | // Select the model to use
487 | const effectiveModelName =
488 | modelName || this.defaultModelName || "gemini-flash-2.0"; // Using cheaper Gemini Flash 2.0 as default
489 |
490 | // Map reasoning effort to thinking budget
491 | let thinkingBudget: number | undefined;
492 | switch (reasoningEffort) {
493 | case "none":
494 | thinkingBudget = 0;
495 | break;
496 | case "low":
497 | thinkingBudget = 2048;
498 | break;
499 | case "medium":
500 | thinkingBudget = 4096;
501 | break;
502 | case "high":
503 | thinkingBudget = 8192;
504 | break;
505 | default:
506 | thinkingBudget = this.defaultThinkingBudget;
507 | }
508 |
509 | // Update generation config with thinking budget if specified
510 | const updatedGenerationConfig = {
511 | ...generationConfig,
512 | };
513 |
514 | if (thinkingBudget !== undefined) {
515 | updatedGenerationConfig.thinkingBudget = thinkingBudget;
516 | }
517 |
518 | // Get model instance
519 | const model = this.genAI.getGenerativeModel({
520 | model: effectiveModelName,
521 | generationConfig: updatedGenerationConfig,
522 | safetySettings,
523 | });
524 |
525 | // Create the content parts with system instructions if provided
526 | const contentParts: Content[] = [];
527 |
528 | if (systemInstruction) {
529 | if (typeof systemInstruction === "string") {
530 | contentParts.push({
531 | role: "system",
532 | parts: [{ text: systemInstruction }],
533 | });
534 | } else {
535 | contentParts.push(systemInstruction);
536 | }
537 | }
538 |
539 | contentParts.push({
540 | role: "user",
541 | parts: [{ text: prompt }],
542 | });
543 |
544 | // Generate content
545 | const result = await model.generateContent({
546 | contents: contentParts,
547 | });
548 |
549 | // Extract text from response
550 | if (!result.response.text()) {
551 | throw new GeminiModelError(
552 | "Model returned empty response",
553 | effectiveModelName
554 | );
555 | }
556 |
557 | return result.response.text();
558 | } catch (error: unknown) {
559 | logger.error("Error reviewing git diff:", error);
560 | throw mapGeminiError(error, "reviewGitDiff");
561 | }
562 | }
563 |
564 | /**
565 | * Stream review content for a git diff
566 | *
567 | * @param params Parameters for the review operation
568 | * @returns AsyncGenerator yielding review content chunks
569 | */
570 | public async *reviewDiffStream(
571 | params: GitDiffReviewParams
572 | ): AsyncGenerator<string> {
573 | try {
574 | const {
575 | diffContent,
576 | modelName,
577 | reviewFocus = "general",
578 | repositoryContext,
579 | diffOptions = {},
580 | generationConfig = {},
581 | safetySettings,
582 | systemInstruction,
583 | reasoningEffort = "medium",
584 | customPrompt,
585 | } = params;
586 |
587 | // Validate input
588 | if (!diffContent || diffContent.trim().length === 0) {
589 | throw new GeminiValidationError(
590 | "Diff content is required",
591 | "diffContent"
592 | );
593 | }
594 |
595 | // Parse the diff
596 | const parsedDiff = await this.parseGitDiff(diffContent);
597 |
598 | // Filter and prioritize diff content
599 | const processedDiff = this.filterAndPrioritizeDiff(
600 | parsedDiff,
601 | diffOptions
602 | );
603 |
604 | if (processedDiff.length === 0) {
605 | yield "No files to review after applying filters.";
606 | return;
607 | }
608 |
609 | // Generate the review prompt
610 | let prompt: string;
611 | if (customPrompt) {
612 | // Use custom prompt if provided
613 | prompt = customPrompt;
614 | // Add the diff content to the custom prompt
615 | prompt += `\n\nAnalyze the following git diff:\n\`\`\`diff\n`;
616 |
617 | // Format diff content for the prompt
618 | for (const file of processedDiff) {
619 | prompt += `\n\nFile: ${file.newPath}\n`;
620 |
621 | for (const hunk of file.hunks) {
622 | prompt += `@@ -${hunk.oldStart},${hunk.oldLines} +${hunk.newStart},${hunk.newLines} @@\n`;
623 | prompt += hunk.changes
624 | .map((change) => {
625 | if (change.type === "insert") {
626 | return `+${change.content}`;
627 | } else if (change.type === "delete") {
628 | return `-${change.content}`;
629 | } else {
630 | return ` ${change.content}`;
631 | }
632 | })
633 | .join("\n");
634 | }
635 | }
636 |
637 | prompt += `\n\`\`\``;
638 | } else {
639 | // Use the standard prompt generator
640 | prompt = this.generateReviewPrompt(
641 | processedDiff,
642 | repositoryContext,
643 | reviewFocus
644 | );
645 | }
646 |
647 | // Select the model to use
648 | const effectiveModelName =
649 | modelName || this.defaultModelName || "gemini-flash-2.0"; // Using cheaper Gemini Flash 2.0 as default
650 |
651 | // Map reasoning effort to thinking budget
652 | let thinkingBudget: number | undefined;
653 | switch (reasoningEffort) {
654 | case "none":
655 | thinkingBudget = 0;
656 | break;
657 | case "low":
658 | thinkingBudget = 2048;
659 | break;
660 | case "medium":
661 | thinkingBudget = 4096;
662 | break;
663 | case "high":
664 | thinkingBudget = 8192;
665 | break;
666 | default:
667 | thinkingBudget = this.defaultThinkingBudget;
668 | }
669 |
670 | // Update generation config with thinking budget if specified
671 | const updatedGenerationConfig = {
672 | ...generationConfig,
673 | };
674 |
675 | if (thinkingBudget !== undefined) {
676 | updatedGenerationConfig.thinkingBudget = thinkingBudget;
677 | }
678 |
679 | // Get model instance
680 | const model = this.genAI.getGenerativeModel({
681 | model: effectiveModelName,
682 | generationConfig: updatedGenerationConfig,
683 | safetySettings,
684 | });
685 |
686 | // Create the content parts with system instructions if provided
687 | const contentParts: Content[] = [];
688 |
689 | if (systemInstruction) {
690 | if (typeof systemInstruction === "string") {
691 | contentParts.push({
692 | role: "system",
693 | parts: [{ text: systemInstruction }],
694 | });
695 | } else {
696 | contentParts.push(systemInstruction);
697 | }
698 | }
699 |
700 | contentParts.push({
701 | role: "user",
702 | parts: [{ text: prompt }],
703 | });
704 |
705 | // Generate content with streaming
706 | const result = await model.generateContentStream({
707 | contents: contentParts,
708 | });
709 |
710 | // Stream chunks
711 | for await (const chunk of result.stream) {
712 | const chunkText = chunk.text();
713 | if (chunkText) {
714 | yield chunkText;
715 | }
716 | }
717 | } catch (error: unknown) {
718 | logger.error("Error streaming git diff review:", error);
719 | throw mapGeminiError(error, "reviewGitDiffStream");
720 | }
721 | }
722 | }
723 |
```
--------------------------------------------------------------------------------
/src/services/gemini/GitHubApiService.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { Octokit } from "@octokit/rest";
2 | import { graphql } from "@octokit/graphql";
3 | import { RequestError } from "@octokit/request-error";
4 | import { logger } from "../../utils/logger.js";
5 | import { ConfigurationManager } from "../../config/ConfigurationManager.js";
6 | import { GeminiValidationError } from "../../utils/geminiErrors.js";
7 | import { GitHubUrlParser } from "./GitHubUrlParser.js";
8 | import KeyV from "keyv";
9 |
10 | /**
11 | * Interface for repository content
12 | */
13 | export interface RepoContent {
14 | name: string;
15 | path: string;
16 | content: string;
17 | type: "file" | "dir" | "symlink";
18 | size: number;
19 | sha: string;
20 | url: string;
21 | html_url: string;
22 | }
23 |
24 | /**
25 | * Interface for a pull request
26 | */
27 | export interface PullRequest {
28 | number: number;
29 | title: string;
30 | body: string;
31 | state: string;
32 | head: {
33 | ref: string;
34 | sha: string;
35 | repo: {
36 | full_name: string;
37 | };
38 | };
39 | base: {
40 | ref: string;
41 | sha: string;
42 | repo: {
43 | full_name: string;
44 | };
45 | };
46 | user: {
47 | login: string;
48 | };
49 | html_url: string;
50 | created_at: string;
51 | updated_at: string;
52 | merged_at: string | null;
53 | mergeable: boolean | null;
54 | mergeable_state: string;
55 | changed_files: number;
56 | additions: number;
57 | deletions: number;
58 | }
59 |
60 | /**
61 | * Interface for a PR file
62 | */
63 | export interface PrFile {
64 | filename: string;
65 | status: string;
66 | additions: number;
67 | deletions: number;
68 | changes: number;
69 | patch?: string;
70 | contents_url: string;
71 | }
72 |
73 | /**
74 | * Interface for cache configuration
75 | */
76 | interface CacheConfig {
77 | enabled: boolean;
78 | ttl: number; // Time-to-live in seconds
79 | }
80 |
81 | /**
82 | * Service for interacting with the GitHub API
83 | * Provides methods for fetching repository content, PR information, and diffs
84 | */
85 | export class GitHubApiService {
86 | private octokit: Octokit;
87 | private graphqlWithAuth: typeof graphql;
88 | private cache: KeyV;
89 | private cacheConfig: CacheConfig;
90 | private rateLimitRemaining: number = 5000; // Default for authenticated users
91 | private rateLimitResetTime: Date = new Date();
92 | private requestCount: number = 0;
93 |
94 | /**
95 | * Creates a new instance of GitHubApiService
96 | * @param apiToken Optional GitHub API token, will use token from ConfigurationManager if not provided
97 | * @param cacheEnabled Whether to enable caching (default: true)
98 | * @param cacheTtl Time-to-live for cache entries in seconds (default: 3600 = 1 hour)
99 | */
100 | constructor(
101 | apiToken?: string,
102 | cacheEnabled: boolean = true,
103 | cacheTtl: number = 3600
104 | ) {
105 | // Get token from ConfigurationManager if not provided
106 | if (!apiToken) {
107 | const configManager = ConfigurationManager.getInstance();
108 | apiToken = configManager.getGitHubApiToken();
109 |
110 | if (!apiToken) {
111 | logger.warn(
112 | "GitHub API token not provided. Some operations may be rate-limited or fail for private repositories."
113 | );
114 | }
115 | }
116 |
117 | // Initialize Octokit
118 | this.octokit = new Octokit({
119 | auth: apiToken,
120 | });
121 |
122 | // Initialize GraphQL with auth
123 | this.graphqlWithAuth = graphql.defaults({
124 | headers: {
125 | authorization: `token ${apiToken}`,
126 | },
127 | });
128 |
129 | // Configure caching
130 | this.cacheConfig = {
131 | enabled: cacheEnabled,
132 | ttl: cacheTtl,
133 | };
134 |
135 | // Initialize cache
136 | this.cache = new KeyV({
137 | namespace: "github-api-cache",
138 | ttl: cacheTtl * 1000, // Convert to milliseconds
139 | });
140 |
141 | // Check the rate limit initially
142 | this.checkRateLimit().catch((error) => {
143 | logger.warn("Failed to check initial rate limit", { error });
144 | });
145 | }
146 |
147 | /**
148 | * Check the current rate limit status
149 | * @returns Promise resolving to the rate limit info
150 | */
151 | public async checkRateLimit(): Promise<{
152 | limit: number;
153 | remaining: number;
154 | resetDate: Date;
155 | }> {
156 | try {
157 | const response = await this.octokit.rateLimit.get();
158 | const { limit, remaining, reset } = response.data.resources.core;
159 |
160 | this.rateLimitRemaining = remaining;
161 | this.rateLimitResetTime = new Date(reset * 1000);
162 |
163 | // Log warning if rate limit is getting low
164 | if (remaining < limit * 0.2) {
165 | logger.warn(
166 | `GitHub API rate limit is getting low: ${remaining}/${limit} remaining, resets at ${this.rateLimitResetTime.toISOString()}`
167 | );
168 | }
169 |
170 | return {
171 | limit,
172 | remaining,
173 | resetDate: this.rateLimitResetTime,
174 | };
175 | } catch (error: unknown) {
176 | logger.error("Failed to check rate limit", { error });
177 | throw new Error("Failed to check GitHub API rate limit");
178 | }
179 | }
180 |
181 | /**
182 | * Check if we can make a request, considering rate limits
183 | * @throws Error if rate limit is exceeded
184 | */
185 | private async checkBeforeRequest(): Promise<void> {
186 | this.requestCount++;
187 |
188 | // Periodically check the rate limit (every 20 requests)
189 | if (this.requestCount % 20 === 0) {
190 | await this.checkRateLimit();
191 | }
192 |
193 | // Check if we're close to the rate limit
194 | if (this.rateLimitRemaining < 10) {
195 | const now = new Date();
196 | const minutesUntilReset = Math.ceil(
197 | (this.rateLimitResetTime.getTime() - now.getTime()) / (60 * 1000)
198 | );
199 |
200 | throw new Error(
201 | `GitHub API rate limit nearly exceeded. ${this.rateLimitRemaining} requests remaining. Resets in ${minutesUntilReset} minutes.`
202 | );
203 | }
204 | }
205 |
206 | /**
207 | * Get the cached value or fetch it if not in cache
208 | * @param cacheKey The cache key
209 | * @param fetchFn Function to fetch the value if not in cache
210 | * @returns The cached or freshly fetched value
211 | */
212 | private async getCachedOrFetch<T>(
213 | cacheKey: string,
214 | fetchFn: () => Promise<T>
215 | ): Promise<T> {
216 | if (this.cacheConfig.enabled) {
217 | // Try to get from cache
218 | const cachedValue = await this.cache.get(cacheKey);
219 | if (cachedValue !== undefined) {
220 | logger.debug(`Cache hit for ${cacheKey}`);
221 | return cachedValue as T;
222 | }
223 | }
224 |
225 | // Not in cache or caching disabled, fetch fresh data
226 | logger.debug(`Cache miss for ${cacheKey}, fetching fresh data`);
227 | const freshValue = await fetchFn();
228 |
229 | // Store in cache if enabled
230 | if (this.cacheConfig.enabled) {
231 | await this.cache.set(cacheKey, freshValue);
232 | }
233 |
234 | return freshValue;
235 | }
236 |
237 | /**
238 | * Get the contents of a file in a repository
239 | * @param owner Repository owner
240 | * @param repo Repository name
241 | * @param path Path to the file
242 | * @param ref Optional reference (branch, tag, or commit SHA)
243 | * @returns Promise resolving to the file content
244 | */
245 | public async getFileContent(
246 | owner: string,
247 | repo: string,
248 | path: string,
249 | ref?: string
250 | ): Promise<string> {
251 | const cacheKey = `file:${owner}/${repo}/${path}${ref ? `@${ref}` : ""}`;
252 |
253 | return this.getCachedOrFetch(cacheKey, async () => {
254 | await this.checkBeforeRequest();
255 |
256 | try {
257 | const response = await this.octokit.repos.getContent({
258 | owner,
259 | repo,
260 | path,
261 | ref,
262 | });
263 |
264 | // Handle directory case
265 | if (Array.isArray(response.data)) {
266 | throw new Error(`Path ${path} is a directory, not a file`);
267 | }
268 |
269 | // Handle file case
270 | const fileData = response.data as {
271 | type: string;
272 | content?: string;
273 | encoding?: string;
274 | };
275 |
276 | if (fileData.type !== "file" || !fileData.content) {
277 | throw new Error(`Path ${path} is not a file or has no content`);
278 | }
279 |
280 | // Decode content (usually base64)
281 | if (fileData.encoding === "base64") {
282 | return Buffer.from(fileData.content, "base64").toString("utf-8");
283 | }
284 |
285 | return fileData.content;
286 | } catch (error: unknown) {
287 | if (error instanceof RequestError && error.status === 404) {
288 | throw new GeminiValidationError(
289 | `File not found: ${path} in ${owner}/${repo}`,
290 | "path"
291 | );
292 | }
293 | logger.error("Error fetching file content", { error });
294 | throw new Error(
295 | `Failed to fetch file content for ${path} in ${owner}/${repo}`
296 | );
297 | }
298 | });
299 | }
300 |
301 | /**
302 | * List files in a repository directory
303 | * @param owner Repository owner
304 | * @param repo Repository name
305 | * @param path Path to the directory
306 | * @param ref Optional reference (branch, tag, or commit SHA)
307 | * @returns Promise resolving to an array of repository content items
308 | */
309 | public async listDirectory(
310 | owner: string,
311 | repo: string,
312 | path: string = "",
313 | ref?: string
314 | ): Promise<RepoContent[]> {
315 | const cacheKey = `dir:${owner}/${repo}/${path}${ref ? `@${ref}` : ""}`;
316 |
317 | return this.getCachedOrFetch(cacheKey, async () => {
318 | await this.checkBeforeRequest();
319 |
320 | try {
321 | const response = await this.octokit.repos.getContent({
322 | owner,
323 | repo,
324 | path,
325 | ref,
326 | });
327 |
328 | // Handle file case (should be a directory)
329 | if (!Array.isArray(response.data)) {
330 | throw new Error(`Path ${path} is a file, not a directory`);
331 | }
332 |
333 | // Map to standardized structure and ensure html_url is never null
334 | return response.data.map((item) => ({
335 | name: item.name,
336 | path: item.path,
337 | content: "",
338 | type: item.type as "file" | "dir" | "symlink",
339 | size: item.size,
340 | sha: item.sha,
341 | url: item.url,
342 | html_url: item.html_url || "", // Convert null to empty string
343 | }));
344 | } catch (error: unknown) {
345 | if (error instanceof RequestError && error.status === 404) {
346 | throw new GeminiValidationError(
347 | `Directory not found: ${path} in ${owner}/${repo}`,
348 | "path"
349 | );
350 | }
351 | logger.error("Error listing directory", { error });
352 | throw new Error(
353 | `Failed to list directory for ${path} in ${owner}/${repo}`
354 | );
355 | }
356 | });
357 | }
358 |
359 | /**
360 | * Get Pull Request details
361 | * @param owner Repository owner
362 | * @param repo Repository name
363 | * @param prNumber Pull request number
364 | * @returns Promise resolving to pull request details
365 | */
366 | public async getPullRequest(
367 | owner: string,
368 | repo: string,
369 | prNumber: number
370 | ): Promise<PullRequest> {
371 | const cacheKey = `pr:${owner}/${repo}/${prNumber}`;
372 |
373 | return this.getCachedOrFetch(cacheKey, async () => {
374 | await this.checkBeforeRequest();
375 |
376 | try {
377 | const response = await this.octokit.pulls.get({
378 | owner,
379 | repo,
380 | pull_number: prNumber,
381 | });
382 |
383 | return response.data as PullRequest;
384 | } catch (error: unknown) {
385 | if (error instanceof RequestError && error.status === 404) {
386 | throw new GeminiValidationError(
387 | `Pull request not found: #${prNumber} in ${owner}/${repo}`,
388 | "prNumber"
389 | );
390 | }
391 | logger.error("Error fetching pull request", { error });
392 | throw new Error(
393 | `Failed to fetch pull request #${prNumber} from ${owner}/${repo}`
394 | );
395 | }
396 | });
397 | }
398 |
399 | /**
400 | * Get files changed in a Pull Request
401 | * @param owner Repository owner
402 | * @param repo Repository name
403 | * @param prNumber Pull request number
404 | * @returns Promise resolving to an array of changed files
405 | */
406 | public async getPullRequestFiles(
407 | owner: string,
408 | repo: string,
409 | prNumber: number
410 | ): Promise<PrFile[]> {
411 | const cacheKey = `pr-files:${owner}/${repo}/${prNumber}`;
412 |
413 | return this.getCachedOrFetch(cacheKey, async () => {
414 | await this.checkBeforeRequest();
415 |
416 | try {
417 | const response = await this.octokit.pulls.listFiles({
418 | owner,
419 | repo,
420 | pull_number: prNumber,
421 | per_page: 100, // Get up to 100 files per page
422 | });
423 |
424 | return response.data as PrFile[];
425 | } catch (error: unknown) {
426 | if (error instanceof RequestError && error.status === 404) {
427 | throw new GeminiValidationError(
428 | `Pull request not found: #${prNumber} in ${owner}/${repo}`,
429 | "prNumber"
430 | );
431 | }
432 | logger.error("Error fetching pull request files", { error });
433 | throw new Error(
434 | `Failed to fetch files for PR #${prNumber} from ${owner}/${repo}`
435 | );
436 | }
437 | });
438 | }
439 |
440 | /**
441 | * Get the git diff for a Pull Request
442 | * @param owner Repository owner
443 | * @param repo Repository name
444 | * @param prNumber Pull request number
445 | * @returns Promise resolving to the PR diff as a string
446 | */
447 | public async getPullRequestDiff(
448 | owner: string,
449 | repo: string,
450 | prNumber: number
451 | ): Promise<string> {
452 | const cacheKey = `pr-diff:${owner}/${repo}/${prNumber}`;
453 |
454 | return this.getCachedOrFetch(cacheKey, async () => {
455 | await this.checkBeforeRequest();
456 |
457 | try {
458 | // Get the diff directly using the GitHub API's raw format
459 | const response = await this.octokit.request(
460 | `GET /repos/{owner}/{repo}/pulls/{pull_number}`,
461 | {
462 | owner,
463 | repo,
464 | pull_number: prNumber,
465 | headers: {
466 | accept: "application/vnd.github.v3.diff",
467 | },
468 | }
469 | );
470 |
471 | // The API returns a diff as text when using the diff content type
472 | return String(response.data);
473 | } catch (error: unknown) {
474 | if (error instanceof RequestError && error.status === 404) {
475 | throw new GeminiValidationError(
476 | `Pull request not found: #${prNumber} in ${owner}/${repo}`,
477 | "prNumber"
478 | );
479 | }
480 | logger.error("Error fetching pull request diff", { error });
481 | throw new Error(
482 | `Failed to fetch diff for PR #${prNumber} from ${owner}/${repo}`
483 | );
484 | }
485 | });
486 | }
487 |
488 | /**
489 | * Get information about the default branch
490 | * @param owner Repository owner
491 | * @param repo Repository name
492 | * @returns Promise resolving to the default branch name
493 | */
494 | public async getDefaultBranch(owner: string, repo: string): Promise<string> {
495 | const cacheKey = `default-branch:${owner}/${repo}`;
496 |
497 | return this.getCachedOrFetch(cacheKey, async () => {
498 | await this.checkBeforeRequest();
499 |
500 | try {
501 | const response = await this.octokit.repos.get({
502 | owner,
503 | repo,
504 | });
505 |
506 | return response.data.default_branch;
507 | } catch (error: unknown) {
508 | if (error instanceof RequestError && error.status === 404) {
509 | throw new GeminiValidationError(
510 | `Repository not found: ${owner}/${repo}`,
511 | "repo"
512 | );
513 | }
514 | logger.error("Error fetching repository info", { error });
515 | throw new Error(
516 | `Failed to fetch repository information for ${owner}/${repo}`
517 | );
518 | }
519 | });
520 | }
521 |
522 | /**
523 | * Get repository contents using a GitHub URL
524 | * @param githubUrl GitHub URL (repo, branch, PR, etc.)
525 | * @returns Promise resolving to repository information and contents
526 | */
527 | public async getRepositoryInfoFromUrl(githubUrl: string): Promise<{
528 | owner: string;
529 | repo: string;
530 | type: string;
531 | branch?: string;
532 | prNumber?: number;
533 | issueNumber?: number;
534 | }> {
535 | // Parse the GitHub URL
536 | const parsedUrl = GitHubUrlParser.parse(githubUrl);
537 | if (!parsedUrl) {
538 | throw new GeminiValidationError(
539 | `Invalid GitHub URL: ${githubUrl}`,
540 | "githubUrl"
541 | );
542 | }
543 |
544 | const { owner, repo, type } = parsedUrl;
545 | const result: {
546 | owner: string;
547 | repo: string;
548 | type: string;
549 | branch?: string;
550 | prNumber?: number;
551 | issueNumber?: number;
552 | } = { owner, repo, type };
553 |
554 | // Add type-specific information
555 | if (parsedUrl.branch) {
556 | result.branch = parsedUrl.branch;
557 | } else if (parsedUrl.prNumber) {
558 | result.prNumber = parseInt(parsedUrl.prNumber, 10);
559 | } else if (parsedUrl.issueNumber) {
560 | result.issueNumber = parseInt(parsedUrl.issueNumber, 10);
561 | }
562 |
563 | return result;
564 | }
565 |
566 | /**
567 | * Processing repository data using GraphQL for more efficient querying
568 | * @param owner Repository owner
569 | * @param repo Repository name
570 | * @returns Promise resolving to repository information
571 | */
572 | public async getRepositoryOverview(
573 | owner: string,
574 | repo: string
575 | ): Promise<{
576 | name: string;
577 | description: string;
578 | defaultBranch: string;
579 | language: string;
580 | languages: Array<{ name: string; percentage: number }>;
581 | stars: number;
582 | forks: number;
583 | openIssues: number;
584 | openPRs: number;
585 | lastUpdated: string;
586 | }> {
587 | const cacheKey = `repo-overview:${owner}/${repo}`;
588 |
589 | return this.getCachedOrFetch(cacheKey, async () => {
590 | await this.checkBeforeRequest();
591 |
592 | try {
593 | // Define the expected type of the GraphQL result
594 | interface GraphQLRepoResult {
595 | repository: {
596 | name: string;
597 | description: string | null;
598 | defaultBranchRef: {
599 | name: string;
600 | };
601 | primaryLanguage: {
602 | name: string;
603 | } | null;
604 | languages: {
605 | edges: Array<{
606 | node: {
607 | name: string;
608 | };
609 | size: number;
610 | }>;
611 | totalSize: number;
612 | };
613 | stargazerCount: number;
614 | forkCount: number;
615 | issues: {
616 | totalCount: number;
617 | };
618 | pullRequests: {
619 | totalCount: number;
620 | };
621 | updatedAt: string;
622 | };
623 | }
624 |
625 | const result = await this.graphqlWithAuth<GraphQLRepoResult>(
626 | `
627 | query getRepoOverview($owner: String!, $repo: String!) {
628 | repository(owner: $owner, name: $repo) {
629 | name
630 | description
631 | defaultBranchRef {
632 | name
633 | }
634 | primaryLanguage {
635 | name
636 | }
637 | languages(first: 10, orderBy: {field: SIZE, direction: DESC}) {
638 | edges {
639 | node {
640 | name
641 | }
642 | size
643 | }
644 | totalSize
645 | }
646 | stargazerCount
647 | forkCount
648 | issues(states: OPEN) {
649 | totalCount
650 | }
651 | pullRequests(states: OPEN) {
652 | totalCount
653 | }
654 | updatedAt
655 | }
656 | }
657 | `,
658 | {
659 | owner,
660 | repo,
661 | }
662 | );
663 |
664 | // Process languages data
665 | const totalSize = result.repository.languages.totalSize;
666 | const languages = result.repository.languages.edges.map((edge) => ({
667 | name: edge.node.name,
668 | percentage: Math.round((edge.size / totalSize) * 100),
669 | }));
670 |
671 | return {
672 | name: result.repository.name,
673 | description: result.repository.description || "",
674 | defaultBranch: result.repository.defaultBranchRef.name,
675 | language: result.repository.primaryLanguage?.name || "Unknown",
676 | languages,
677 | stars: result.repository.stargazerCount,
678 | forks: result.repository.forkCount,
679 | openIssues: result.repository.issues.totalCount,
680 | openPRs: result.repository.pullRequests.totalCount,
681 | lastUpdated: result.repository.updatedAt,
682 | };
683 | } catch (error: unknown) {
684 | if (error instanceof RequestError && error.status === 404) {
685 | throw new GeminiValidationError(
686 | `Repository not found: ${owner}/${repo}`,
687 | "repo"
688 | );
689 | }
690 | logger.error("Error fetching repository overview", { error });
691 | throw new Error(
692 | `Failed to fetch repository overview for ${owner}/${repo}`
693 | );
694 | }
695 | });
696 | }
697 |
698 | /**
699 | * Get a combined diff from comparing two branches
700 | * @param owner Repository owner
701 | * @param repo Repository name
702 | * @param baseBranch Base branch name
703 | * @param headBranch Head branch name
704 | * @returns Promise resolving to the diff as a string
705 | */
706 | public async getComparisonDiff(
707 | owner: string,
708 | repo: string,
709 | baseBranch: string,
710 | headBranch: string
711 | ): Promise<string> {
712 | const cacheKey = `comparison-diff:${owner}/${repo}/${baseBranch}...${headBranch}`;
713 |
714 | return this.getCachedOrFetch(cacheKey, async () => {
715 | await this.checkBeforeRequest();
716 |
717 | try {
718 | // Get the diff using the comparison API with diff format
719 | const response = await this.octokit.request(
720 | `GET /repos/{owner}/{repo}/compare/{basehead}`,
721 | {
722 | owner,
723 | repo,
724 | basehead: `${baseBranch}...${headBranch}`,
725 | headers: {
726 | accept: "application/vnd.github.v3.diff",
727 | },
728 | }
729 | );
730 |
731 | // The API returns a diff as text when using the diff content type
732 | return String(response.data);
733 | } catch (error: unknown) {
734 | if (error instanceof RequestError) {
735 | if (error.status === 404) {
736 | throw new GeminiValidationError(
737 | `Repository or branches not found: ${owner}/${repo} ${baseBranch}...${headBranch}`,
738 | "branches"
739 | );
740 | }
741 | // Handle 422 error for when the branches don't have common history
742 | if (error.status === 422) {
743 | throw new GeminiValidationError(
744 | `Cannot compare branches: ${baseBranch} and ${headBranch} don't have common history`,
745 | "branches"
746 | );
747 | }
748 | }
749 | logger.error("Error fetching comparison diff", { error });
750 | throw new Error(
751 | `Failed to fetch comparison diff for ${baseBranch}...${headBranch} in ${owner}/${repo}`
752 | );
753 | }
754 | });
755 | }
756 |
757 | /**
758 | * Invalidate a cache entry manually
759 | * @param cacheKey The key to invalidate
760 | */
761 | public async invalidateCache(cacheKey: string): Promise<void> {
762 | if (this.cacheConfig.enabled) {
763 | await this.cache.delete(cacheKey);
764 | logger.debug(`Invalidated cache for ${cacheKey}`);
765 | }
766 | }
767 |
768 | /**
769 | * Clear the entire cache
770 | */
771 | public async clearCache(): Promise<void> {
772 | if (this.cacheConfig.enabled) {
773 | await this.cache.clear();
774 | logger.info("Cleared GitHub API cache");
775 | }
776 | }
777 | }
778 |
```
--------------------------------------------------------------------------------
/tests/integration/mcpClientIntegration.test.vitest.ts:
--------------------------------------------------------------------------------
```typescript
1 | // Using vitest globals - see vitest.config.ts globals: true
2 |
3 | // Skip these flaky integration tests for now
4 | const itSkipIntegration = it.skip;
5 | import { spawn, ChildProcess } from "child_process";
6 | import path from "path";
7 | import fs from "fs/promises";
8 | import os from "os";
9 |
10 | // Import MCP client service
11 | import { McpClientService } from "../../src/services/mcp/McpClientService.js";
12 |
13 | // Import tool processors for direct invocation
14 | import { mcpClientTool } from "../../src/tools/mcpClientTool.js";
15 | import { writeToFile } from "../../src/tools/writeToFileTool.js";
16 |
17 | // Import Configuration manager
18 | import { ConfigurationManager } from "../../src/config/ConfigurationManager.js";
19 |
20 | // Import integration test types
21 | import { ToolProcessor, ToolProcessors } from "../utils/integration-types.js";
22 |
23 | // Response types are defined inline where needed to avoid unused variable warnings
24 |
25 | // Helper functions to set up integration environment
26 | function createTempOutputDir(): Promise<string> {
27 | // Create a temporary directory for test file outputs
28 | const tempDir = path.join(os.tmpdir(), `mcp-client-test-${Date.now()}`);
29 | return fs.mkdir(tempDir, { recursive: true }).then(() => tempDir);
30 | }
31 |
32 | async function cleanupTempDir(tempDir: string): Promise<void> {
33 | try {
34 | // Recursively delete the temporary directory
35 | await fs.rm(tempDir, { recursive: true, force: true });
36 | } catch (error) {
37 | console.error(`Error cleaning up temp directory: ${error}`);
38 | }
39 | }
40 |
41 | // Helper to mock the ConfigurationManager
42 | function mockConfigurationManager(tempDir: string): void {
43 | // Backup the original getInstance
44 | const originalGetInstance = ConfigurationManager.getInstance;
45 |
46 | // Mock getInstance
47 | ConfigurationManager.getInstance = function getInstance() {
48 | const instance = originalGetInstance.call(ConfigurationManager);
49 |
50 | // Mock the getAllowedOutputPaths method
51 | instance.getAllowedOutputPaths = () => [tempDir];
52 |
53 | // The getAllowedOutputPaths method is already mocked above
54 |
55 | // Mock the getMcpConfig method
56 | instance.getMcpConfig = () => ({
57 | host: "localhost",
58 | port: 3456,
59 | connectionToken: "test-token",
60 | clientId: "test-client",
61 | logLevel: "info",
62 | transport: "stdio",
63 | enableStreaming: false,
64 | sessionTimeoutSeconds: 60,
65 | });
66 |
67 | return instance;
68 | };
69 | }
70 |
71 | // Helper to restore the original ConfigurationManager
72 | function restoreConfigurationManager(): void {
73 | // Restore the original getInstance method
74 | delete (
75 | ConfigurationManager as unknown as {
76 | getInstance?: () => ConfigurationManager;
77 | }
78 | ).getInstance;
79 | }
80 |
81 | // Generic function to create a tool processor from current tool objects
82 | function createToolProcessor(
83 | tool: {
84 | execute: (
85 | args: any,
86 | service: McpClientService
87 | ) => Promise<{ content: { type: string; text: string }[] }>;
88 | },
89 | mcpClientService: McpClientService
90 | ): ToolProcessor;
91 | function createToolProcessor(
92 | tool: {
93 | execute: (
94 | args: any
95 | ) => Promise<{ content: { type: string; text: string }[] }>;
96 | },
97 | mcpClientService: McpClientService
98 | ): ToolProcessor;
99 | function createToolProcessor(
100 | tool: any,
101 | mcpClientService: McpClientService
102 | ): ToolProcessor {
103 | return async (args: any) => {
104 | if (tool.execute.length === 1) {
105 | // Tool doesn't need service parameter (like writeToFile)
106 | return await tool.execute(args);
107 | } else {
108 | // Tool needs service parameter (like mcpClientTool)
109 | return await tool.execute(args, mcpClientService);
110 | }
111 | };
112 | }
113 |
114 | // Start dummy MCP server (stdio)
115 | async function startDummyMcpServerStdio(): Promise<ChildProcess> {
116 | const currentDir = path.dirname(new URL(import.meta.url).pathname);
117 | const serverPath = path.resolve(currentDir, "./dummyMcpServerStdio.ts");
118 | console.debug(`Starting STDIO server at path: ${serverPath}`);
119 |
120 | // Verify the file exists
121 | try {
122 | await fs.access(serverPath);
123 | } catch (error) {
124 | throw new Error(`Dummy server file not found at: ${serverPath}`);
125 | }
126 |
127 | // Start the child process with ts-node for TypeScript execution
128 | const nodeProcess = spawn("node", ["--loader", "ts-node/esm", serverPath], {
129 | stdio: ["pipe", "pipe", "pipe"],
130 | env: {
131 | ...process.env,
132 | NODE_OPTIONS: "--no-warnings --experimental-specifier-resolution=node",
133 | },
134 | });
135 |
136 | // Create a Promise that resolves when the server is ready
137 | return new Promise((resolve, reject) => {
138 | let errorOutput = "";
139 |
140 | // Listen for data on stderr to detect when server is ready
141 | nodeProcess.stderr.on("data", (data) => {
142 | const message = data.toString();
143 | errorOutput += message;
144 | console.debug(`[STDIO Server stderr]: ${message}`);
145 |
146 | // When we see the server ready message, resolve
147 | if (message.includes("Dummy MCP Server (stdio) started")) {
148 | resolve(nodeProcess);
149 | }
150 | });
151 |
152 | // Also listen on stdout for any output
153 | nodeProcess.stdout.on("data", (data) => {
154 | console.debug(`[STDIO Server stdout]: ${data.toString()}`);
155 | });
156 |
157 | // Handle startup failure
158 | nodeProcess.on("error", (err) => {
159 | reject(new Error(`Failed to start dummy server: ${err.message}`));
160 | });
161 |
162 | // Set a timeout in case the server doesn't start
163 | const timeout = setTimeout(() => {
164 | nodeProcess.kill();
165 | reject(
166 | new Error(
167 | `Timeout waiting for dummy server to start. Last output: ${errorOutput}`
168 | )
169 | );
170 | }, 15000); // Increased timeout to 15 seconds
171 |
172 | // Clear the timeout if we resolve or reject
173 | nodeProcess.on("exit", () => {
174 | clearTimeout(timeout);
175 | });
176 | });
177 | }
178 |
179 | // Start dummy MCP server (SSE)
180 | async function startDummyMcpServerSse(port = 3456): Promise<ChildProcess> {
181 | const currentDir = path.dirname(new URL(import.meta.url).pathname);
182 | const serverPath = path.resolve(currentDir, "./dummyMcpServerSse.ts");
183 | console.debug(`Starting SSE server at path: ${serverPath}`);
184 |
185 | // Start the child process with ts-node for TypeScript execution
186 | const nodeProcess = spawn(
187 | "node",
188 | ["--loader", "ts-node/esm", serverPath, port.toString()],
189 | {
190 | stdio: ["pipe", "pipe", "pipe"],
191 | env: {
192 | ...process.env,
193 | NODE_OPTIONS: "--no-warnings --experimental-specifier-resolution=node",
194 | },
195 | }
196 | );
197 |
198 | // Create a Promise that resolves when the server is ready
199 | return new Promise((resolve, reject) => {
200 | let errorOutput = "";
201 |
202 | // Listen for data on stderr to detect when server is ready
203 | nodeProcess.stderr.on("data", (data) => {
204 | const message = data.toString();
205 | errorOutput += message;
206 | console.debug(`[SSE Server stderr]: ${message}`);
207 |
208 | // When we see the server ready message, resolve
209 | if (message.includes(`Dummy MCP Server (SSE) started on port ${port}`)) {
210 | resolve(nodeProcess);
211 | }
212 | });
213 |
214 | // Also listen on stdout for any output
215 | nodeProcess.stdout.on("data", (data) => {
216 | console.debug(`[SSE Server stdout]: ${data.toString()}`);
217 | });
218 |
219 | // Handle startup failure
220 | nodeProcess.on("error", (err) => {
221 | reject(new Error(`Failed to start dummy server: ${err.message}`));
222 | });
223 |
224 | // Set a timeout in case the server doesn't start
225 | const timeout = setTimeout(() => {
226 | nodeProcess.kill();
227 | reject(
228 | new Error(
229 | `Timeout waiting for dummy server to start. Last output: ${errorOutput}`
230 | )
231 | );
232 | }, 15000); // Increased timeout to 15 seconds
233 |
234 | // Clear the timeout if we resolve or reject
235 | nodeProcess.on("exit", () => {
236 | clearTimeout(timeout);
237 | });
238 | });
239 | }
240 |
241 | describe("MCP Client Integration Tests", () => {
242 | let mcpClientService: McpClientService;
243 | let processors: ToolProcessors;
244 | let tempDir: string;
245 | let stdioServer: ChildProcess | null = null;
246 | let sseServer: ChildProcess | null = null;
247 |
248 | // Set up test environment before all tests
249 | beforeAll(async function () {
250 | // Create a temporary directory for test outputs
251 | tempDir = await createTempOutputDir();
252 |
253 | // Set the environment variable for file security
254 | process.env.GEMINI_SAFE_FILE_BASE_DIR = tempDir;
255 |
256 | // Mock ConfigurationManager to use our test settings
257 | mockConfigurationManager(tempDir);
258 |
259 | // Initialize the MCP client service
260 | mcpClientService = new McpClientService();
261 |
262 | // Create tool processors for testing
263 | processors = {
264 | connect: createToolProcessor(mcpClientTool, mcpClientService),
265 | listTools: createToolProcessor(mcpClientTool, mcpClientService),
266 | callServerTool: createToolProcessor(mcpClientTool, mcpClientService),
267 | disconnect: createToolProcessor(mcpClientTool, mcpClientService),
268 | writeToFile: createToolProcessor(writeToFile, mcpClientService),
269 | };
270 | });
271 |
272 | // Clean up after all tests
273 | afterAll(async function () {
274 | // Close any open MCP connections
275 | mcpClientService.closeAllConnections();
276 |
277 | // Kill the server processes if they're still running
278 | if (stdioServer) {
279 | stdioServer.kill();
280 | }
281 |
282 | if (sseServer) {
283 | sseServer.kill();
284 | }
285 |
286 | // Restore the original ConfigurationManager
287 | restoreConfigurationManager();
288 |
289 | // Clean up environment variable
290 | delete process.env.GEMINI_SAFE_FILE_BASE_DIR;
291 |
292 | // Clean up temporary directory
293 | await cleanupTempDir(tempDir);
294 | });
295 |
296 | describe("STDIO Transport Tests", () => {
297 | // Set up stdio server before each test in this group
298 | beforeEach(async function () {
299 | // Start the dummy stdio server
300 | stdioServer = await startDummyMcpServerStdio();
301 | }, 20000); // Increase timeout to 20 seconds
302 |
303 | // Clean up stdio server after each test
304 | afterEach(function () {
305 | // Kill the stdio server
306 | if (stdioServer) {
307 | stdioServer.kill();
308 | stdioServer = null;
309 | }
310 |
311 | // Close any connections
312 | mcpClientService.closeAllConnections();
313 | });
314 |
315 | itSkipIntegration(
316 | "should connect to a stdio server, list tools, call a tool, and disconnect",
317 | async () => {
318 | // Step 1: Call the connect processor to connect to the dummy stdio server
319 | const connectArgs = {
320 | transport: "stdio",
321 | connectionDetails: {
322 | transport: "stdio",
323 | command: "node",
324 | args: [
325 | "--loader",
326 | "ts-node/esm",
327 | "./tests/integration/dummyMcpServerStdio.ts",
328 | ],
329 | },
330 | };
331 |
332 | // Connect to the server
333 | const connectResult = await processors.connect(connectArgs);
334 |
335 | // Extract the connection ID from the result
336 | const resultJson = JSON.parse(connectResult.content[0].text);
337 | const connectionId = resultJson.connectionId;
338 |
339 | // Verify connection ID was returned and is a string
340 | expect(connectionId).toBeTruthy();
341 | expect(typeof connectionId).toBe("string");
342 |
343 | // Step 2: List tools on the connected server
344 | const listToolsArgs = {
345 | connectionId,
346 | };
347 |
348 | const listToolsResult = await processors.listTools(listToolsArgs);
349 |
350 | // Parse the tools list
351 | const toolsList = JSON.parse(listToolsResult.content[0].text);
352 |
353 | // Verify tools list
354 | expect(Array.isArray(toolsList)).toBeTruthy();
355 | expect(toolsList.length).toBeGreaterThanOrEqual(3);
356 |
357 | // Verify expected tools are in the list
358 | const toolNames = toolsList.map((tool: { name: string }) => tool.name);
359 | expect(toolNames.includes("echoTool")).toBeTruthy();
360 | expect(toolNames.includes("addTool")).toBeTruthy();
361 | expect(toolNames.includes("complexDataTool")).toBeTruthy();
362 |
363 | // Step 3: Call the echo tool
364 | const echoMessage = "Hello from integration test";
365 | const callToolArgs = {
366 | connectionId,
367 | toolName: "echoTool",
368 | toolParameters: {
369 | message: echoMessage,
370 | },
371 | };
372 |
373 | const callToolResult = await processors.callServerTool(callToolArgs);
374 |
375 | // Parse the result
376 | const echoResult = JSON.parse(callToolResult.content[0].text);
377 |
378 | // Verify echo result
379 | expect(echoResult.message).toBe(echoMessage);
380 | expect(echoResult.timestamp).toBeTruthy();
381 |
382 | // Step 4: Call the add tool
383 | const addArgs = {
384 | connectionId,
385 | toolName: "addTool",
386 | toolParameters: {
387 | a: 5,
388 | b: 7,
389 | },
390 | };
391 |
392 | const addResult = await processors.callServerTool(addArgs);
393 |
394 | // Parse the result
395 | const addOutput = JSON.parse(addResult.content[0].text);
396 |
397 | // Verify add result
398 | expect(addOutput.sum).toBe(12);
399 | expect(addOutput.inputs).toEqual({ a: 5, b: 7 });
400 |
401 | // Step 5: Disconnect from the server
402 | const disconnectArgs = {
403 | connectionId,
404 | };
405 |
406 | const disconnectResult = await processors.disconnect(disconnectArgs);
407 |
408 | // Parse the disconnect result
409 | const disconnectOutput = JSON.parse(disconnectResult.content[0].text);
410 |
411 | // Verify disconnect result
412 | expect(disconnectOutput.connectionId).toBe(connectionId);
413 | expect(
414 | disconnectOutput.message.includes("Connection closed")
415 | ).toBeTruthy();
416 |
417 | // Verify the connection is no longer in the active connections list
418 | expect(mcpClientService.getActiveStdioConnectionIds().length).toBe(0);
419 | }
420 | );
421 |
422 | itSkipIntegration(
423 | "should call a tool and write output to a file",
424 | async () => {
425 | // Step 1: Connect to the dummy stdio server
426 | const connectArgs = {
427 | transport: "stdio",
428 | connectionDetails: {
429 | transport: "stdio",
430 | command: "node",
431 | args: [
432 | "--loader",
433 | "ts-node/esm",
434 | "./tests/integration/dummyMcpServerStdio.ts",
435 | ],
436 | },
437 | };
438 |
439 | const connectResult = await processors.connect(connectArgs);
440 | const resultJson = JSON.parse(connectResult.content[0].text);
441 | const connectionId = resultJson.connectionId;
442 |
443 | // Step 2: Call the complexDataTool and write output to a file
444 | const outputPath = path.join(tempDir, "complex-data-output.json");
445 | const callToolArgs = {
446 | connectionId,
447 | toolName: "complexDataTool",
448 | toolParameters: {
449 | depth: 2,
450 | itemCount: 3,
451 | },
452 | outputFilePath: outputPath,
453 | };
454 |
455 | const callToolResult = await processors.callServerTool(callToolArgs);
456 |
457 | // Parse the result
458 | const callToolOutput = JSON.parse(callToolResult.content[0].text);
459 |
460 | // Verify the result contains the expected information
461 | expect(callToolOutput.message).toBe("Output written to file");
462 | expect(callToolOutput.filePath).toBe(outputPath);
463 |
464 | // Verify the file exists and contains the expected data
465 | const fileExists = await fs
466 | .access(outputPath)
467 | .then(() => true)
468 | .catch(() => false);
469 | expect(fileExists).toBeTruthy();
470 |
471 | // Read the file contents
472 | const fileContent = await fs.readFile(outputPath, "utf8");
473 | const fileData = JSON.parse(fileContent);
474 |
475 | // Verify file content structure
476 | expect(fileData.level).toBe(1);
477 | expect(fileData.items.length).toBe(3);
478 | expect(fileData.items[0].level).toBe(2);
479 |
480 | // Clean up - disconnect from the server
481 | await processors.disconnect({ connectionId });
482 | }
483 | );
484 | });
485 |
486 | describe("SSE Transport Tests", () => {
487 | // Set up SSE server before each test in this group
488 | beforeEach(async function () {
489 | // Start the dummy SSE server
490 | sseServer = await startDummyMcpServerSse();
491 | }, 20000); // Increase timeout to 20 seconds
492 |
493 | // Clean up SSE server after each test
494 | afterEach(function () {
495 | // Kill the SSE server
496 | if (sseServer) {
497 | sseServer.kill();
498 | sseServer = null;
499 | }
500 |
501 | // Close any connections
502 | mcpClientService.closeAllConnections();
503 | });
504 |
505 | itSkipIntegration(
506 | "should connect to an SSE server, list tools, call a tool, and disconnect",
507 | async () => {
508 | // Step 1: Call the connect processor to connect to the dummy SSE server
509 | const ssePort = 3456;
510 | const connectArgs = {
511 | transport: "sse",
512 | connectionDetails: {
513 | transport: "sse",
514 | url: `http://localhost:${ssePort}/mcp`,
515 | },
516 | };
517 |
518 | // Connect to the server
519 | const connectResult = await processors.connect(connectArgs);
520 |
521 | // Extract the connection ID from the result
522 | const resultJson = JSON.parse(connectResult.content[0].text);
523 | const connectionId = resultJson.connectionId;
524 |
525 | // Verify connection ID was returned and is a string
526 | expect(connectionId).toBeTruthy();
527 | expect(typeof connectionId).toBe("string");
528 |
529 | // Step 2: List tools on the connected server
530 | const listToolsArgs = {
531 | connectionId,
532 | };
533 |
534 | const listToolsResult = await processors.listTools(listToolsArgs);
535 |
536 | // Parse the tools list
537 | const toolsList = JSON.parse(listToolsResult.content[0].text);
538 |
539 | // Verify tools list
540 | expect(Array.isArray(toolsList)).toBeTruthy();
541 | expect(toolsList.length).toBeGreaterThanOrEqual(3);
542 |
543 | // Verify expected tools are in the list
544 | const toolNames = toolsList.map((tool: { name: string }) => tool.name);
545 | expect(toolNames.includes("echoTool")).toBeTruthy();
546 | expect(toolNames.includes("addTool")).toBeTruthy();
547 | expect(toolNames.includes("complexDataTool")).toBeTruthy();
548 |
549 | // Step 3: Call the echo tool
550 | const echoMessage = "Hello from SSE integration test";
551 | const callToolArgs = {
552 | connectionId,
553 | toolName: "echoTool",
554 | toolParameters: {
555 | message: echoMessage,
556 | },
557 | };
558 |
559 | const callToolResult = await processors.callServerTool(callToolArgs);
560 |
561 | // Parse the result
562 | const echoResult = JSON.parse(callToolResult.content[0].text);
563 |
564 | // Verify echo result
565 | expect(echoResult.message).toBe(echoMessage);
566 | expect(echoResult.timestamp).toBeTruthy();
567 |
568 | // Step 4: Disconnect from the server
569 | const disconnectArgs = {
570 | connectionId,
571 | };
572 |
573 | const disconnectResult = await processors.disconnect(disconnectArgs);
574 |
575 | // Parse the disconnect result
576 | const disconnectOutput = JSON.parse(disconnectResult.content[0].text);
577 |
578 | // Verify disconnect result
579 | expect(disconnectOutput.connectionId).toBe(connectionId);
580 | expect(
581 | disconnectOutput.message.includes("Connection closed")
582 | ).toBeTruthy();
583 |
584 | // Verify the connection is no longer in the active connections list
585 | expect(mcpClientService.getActiveSseConnectionIds().length).toBe(0);
586 | }
587 | );
588 | });
589 |
590 | describe("Write to File Tool Tests", () => {
591 | let writeToFileProcessor: ToolProcessor;
592 |
593 | beforeEach(function () {
594 | // Create the writeToFile processor
595 | writeToFileProcessor = createToolProcessor(writeToFile, mcpClientService);
596 | });
597 |
598 | itSkipIntegration("should write a string to a file", async () => {
599 | // Create the file path for the test
600 | const testFilePath = path.join(tempDir, "test-utf8-output.txt");
601 | const testContent =
602 | "This is a test string to write to a file\nWith multiple lines\nAnd special chars: €£¥©®™";
603 |
604 | // Call the writeToFile processor
605 | const args = {
606 | filePath: testFilePath,
607 | content: testContent,
608 | encoding: "utf8",
609 | };
610 |
611 | const result = await writeToFileProcessor(args);
612 |
613 | // Parse the result
614 | const resultJson = JSON.parse(result.content[0].text);
615 |
616 | // Verify the result contains the expected information
617 | expect(resultJson.message).toBe("Content written to file successfully.");
618 | expect(resultJson.filePath).toBe(testFilePath);
619 |
620 | // Verify the file exists and contains the correct data
621 | const fileExists = await fs
622 | .access(testFilePath)
623 | .then(() => true)
624 | .catch(() => false);
625 | expect(fileExists).toBeTruthy();
626 |
627 | // Read the file and compare the content
628 | const fileContent = await fs.readFile(testFilePath, "utf8");
629 | expect(fileContent).toBe(testContent);
630 | });
631 |
632 | itSkipIntegration(
633 | "should write a base64 encoded string to a file",
634 | async () => {
635 | // Create the file path for the test
636 | const testFilePath = path.join(tempDir, "test-base64-output.txt");
637 |
638 | // Create a test string and encode it to base64
639 | const originalString =
640 | "This is a test string that will be base64 encoded\nWith multiple lines\nAnd special chars: €£¥©®™";
641 | const base64Content = Buffer.from(originalString).toString("base64");
642 |
643 | // Call the writeToFile processor
644 | const args = {
645 | filePath: testFilePath,
646 | content: base64Content,
647 | encoding: "base64",
648 | };
649 |
650 | const result = await writeToFileProcessor(args);
651 |
652 | // Parse the result
653 | const resultJson = JSON.parse(result.content[0].text);
654 |
655 | // Verify the result contains the expected information
656 | expect(resultJson.message).toBe(
657 | "Content written to file successfully."
658 | );
659 | expect(resultJson.filePath).toBe(testFilePath);
660 |
661 | // Verify the file exists and contains the correct data
662 | const fileExists = await fs
663 | .access(testFilePath)
664 | .then(() => true)
665 | .catch(() => false);
666 | expect(fileExists).toBeTruthy();
667 |
668 | // Read the file and compare the content
669 | const fileContent = await fs.readFile(testFilePath, "utf8");
670 | expect(fileContent).toBe(originalString);
671 | }
672 | );
673 |
674 | itSkipIntegration(
675 | "should fail when writing to a path outside allowed directories",
676 | async () => {
677 | // Try to write to an absolute path outside the allowed directory
678 | const nonAllowedPath = path.join(
679 | os.tmpdir(),
680 | "..",
681 | "non-allowed-dir",
682 | "test.txt"
683 | );
684 |
685 | const args = {
686 | filePath: nonAllowedPath,
687 | content: "This should not be written",
688 | encoding: "utf8",
689 | };
690 |
691 | // The call should reject because the path is not allowed
692 | await expect(writeToFileProcessor(args)).rejects.toThrow(
693 | /Security error|not within the allowed output|InvalidParams/
694 | );
695 |
696 | // Verify the file does not exist
697 | const fileExists = await fs
698 | .access(nonAllowedPath)
699 | .then(() => true)
700 | .catch(() => false);
701 | expect(fileExists).toBe(false);
702 | }
703 | );
704 | });
705 | });
706 |
```
--------------------------------------------------------------------------------
/src/services/GeminiService.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { GoogleGenAI, GenerateContentResponse } from "@google/genai";
2 | import { ConfigurationManager } from "../config/ConfigurationManager.js";
3 | import { ModelSelectionService } from "./ModelSelectionService.js";
4 | import { logger } from "../utils/logger.js";
5 | import {
6 | CachedContentMetadata,
7 | ModelSelectionCriteria,
8 | ImageGenerationResult,
9 | } from "../types/index.js";
10 | import {
11 | GeminiGitDiffService,
12 | GitDiffReviewParams,
13 | } from "./gemini/GeminiGitDiffService.js";
14 | import { GitHubApiService } from "./gemini/GitHubApiService.js";
15 |
16 | // Import specialized services
17 | import { GeminiChatService } from "./gemini/GeminiChatService.js";
18 | import { GeminiContentService } from "./gemini/GeminiContentService.js";
19 | import { GeminiCacheService } from "./gemini/GeminiCacheService.js";
20 | import {
21 | Content,
22 | Tool,
23 | ToolConfig,
24 | GenerationConfig,
25 | SafetySetting,
26 | CacheId,
27 | FunctionCall,
28 | } from "./gemini/GeminiTypes.js";
29 |
30 | /**
31 | * Service for interacting with the Google Gemini API.
32 | * This is a facade that delegates to specialized services for different functionality.
33 | */
34 | export class GeminiService {
35 | private genAI: GoogleGenAI;
36 | private defaultModelName?: string;
37 | private modelSelector: ModelSelectionService;
38 | private configManager: ConfigurationManager;
39 |
40 | private chatService: GeminiChatService;
41 | private contentService: GeminiContentService;
42 | private cacheService: GeminiCacheService;
43 | private gitDiffService: GeminiGitDiffService;
44 | private gitHubApiService: GitHubApiService;
45 |
46 | constructor() {
47 | this.configManager = ConfigurationManager.getInstance();
48 | const config = this.configManager.getGeminiServiceConfig();
49 |
50 | this.modelSelector = new ModelSelectionService(
51 | this.configManager.getModelConfiguration()
52 | );
53 |
54 | if (!config.apiKey) {
55 | throw new Error("Gemini API key is required");
56 | }
57 |
58 | // Initialize with the apiKey property in an object as required in v0.10.0
59 | this.genAI = new GoogleGenAI({ apiKey: config.apiKey });
60 | this.defaultModelName = config.defaultModel;
61 |
62 | // File security service is no longer needed since file operations were removed
63 |
64 | // Initialize specialized services
65 | this.contentService = new GeminiContentService(
66 | this.genAI,
67 | this.defaultModelName,
68 | config.defaultThinkingBudget
69 | );
70 | this.chatService = new GeminiChatService(this.genAI, this.defaultModelName);
71 | this.cacheService = new GeminiCacheService(this.genAI);
72 | this.gitDiffService = new GeminiGitDiffService(
73 | this.genAI,
74 | this.defaultModelName,
75 | 1024 * 1024, // 1MB default
76 | [
77 | "package-lock.json",
78 | "yarn.lock",
79 | "*.min.js",
80 | "*.min.css",
81 | "node_modules/**",
82 | "dist/**",
83 | "build/**",
84 | "*.lock",
85 | "**/*.map",
86 | ],
87 | config.defaultThinkingBudget
88 | );
89 |
90 | const githubApiToken = this.configManager.getGitHubApiToken();
91 | this.gitHubApiService = new GitHubApiService(githubApiToken);
92 | }
93 |
94 | public async *generateContentStream(
95 | params: GenerateContentParams & {
96 | preferQuality?: boolean;
97 | preferSpeed?: boolean;
98 | preferCost?: boolean;
99 | complexityHint?: "simple" | "medium" | "complex";
100 | taskType?: ModelSelectionCriteria["taskType"];
101 | }
102 | ): AsyncGenerator<string> {
103 | const selectedModel = await this.selectModelForGeneration(params);
104 | yield* this.contentService.generateContentStream({
105 | ...params,
106 | modelName: selectedModel,
107 | });
108 | }
109 |
110 | public async generateContent(
111 | params: GenerateContentParams & {
112 | preferQuality?: boolean;
113 | preferSpeed?: boolean;
114 | preferCost?: boolean;
115 | complexityHint?: "simple" | "medium" | "complex";
116 | taskType?: ModelSelectionCriteria["taskType"];
117 | }
118 | ): Promise<string> {
119 | const selectedModel = await this.selectModelForGeneration(params);
120 | const result = await this.contentService.generateContent({
121 | ...params,
122 | modelName: selectedModel,
123 | });
124 |
125 | return result;
126 | }
127 |
128 | /**
129 | * Starts a new stateful chat session with the Gemini model.
130 | *
131 | * @param params Parameters for starting a chat session
132 | * @returns A unique session ID to identify this chat session
133 | */
134 | public startChatSession(params: StartChatParams = {}): string {
135 | return this.chatService.startChatSession(params);
136 | }
137 |
138 | /**
139 | * Sends a message to an existing chat session.
140 | * Uses the generated content API directly since we're managing chat state ourselves.
141 | *
142 | * @param params Parameters for sending a message
143 | * @returns Promise resolving to the chat response
144 | */
145 | public async sendMessageToSession(
146 | params: SendMessageParams
147 | ): Promise<GenerateContentResponse> {
148 | return this.chatService.sendMessageToSession(params);
149 | }
150 |
151 | /**
152 | * Sends the result of a function call back to the chat session.
153 | *
154 | * @param params Parameters for sending a function result
155 | * @returns Promise resolving to the chat response
156 | */
157 | public async sendFunctionResultToSession(
158 | params: SendFunctionResultParams
159 | ): Promise<GenerateContentResponse> {
160 | return this.chatService.sendFunctionResultToSession(params);
161 | }
162 |
163 | /**
164 | * Creates a cached content entry in the Gemini API.
165 | *
166 | * @param modelName The model to use for this cached content
167 | * @param contents The conversation contents to cache
168 | * @param options Additional options for the cache (displayName, systemInstruction, ttl, tools, toolConfig)
169 | * @returns Promise resolving to the cached content metadata
170 | */
171 | public async createCache(
172 | modelName: string,
173 | contents: Content[],
174 | options?: {
175 | displayName?: string;
176 | systemInstruction?: Content | string;
177 | ttl?: string;
178 | tools?: Tool[];
179 | toolConfig?: ToolConfig;
180 | }
181 | ): Promise<CachedContentMetadata> {
182 | return this.cacheService.createCache(modelName, contents, options);
183 | }
184 |
185 | /**
186 | * Lists cached content entries in the Gemini API.
187 | *
188 | * @param pageSize Optional maximum number of entries to return
189 | * @param pageToken Optional token for pagination
190 | * @returns Promise resolving to an object with caches array and optional nextPageToken
191 | */
192 | public async listCaches(
193 | pageSize?: number,
194 | pageToken?: string
195 | ): Promise<{ caches: CachedContentMetadata[]; nextPageToken?: string }> {
196 | return this.cacheService.listCaches(pageSize, pageToken);
197 | }
198 |
199 | /**
200 | * Gets a specific cached content entry's metadata from the Gemini API.
201 | *
202 | * @param cacheId The ID of the cached content to retrieve (format: "cachedContents/{id}")
203 | * @returns Promise resolving to the cached content metadata
204 | */
205 | public async getCache(cacheId: CacheId): Promise<CachedContentMetadata> {
206 | return this.cacheService.getCache(cacheId);
207 | }
208 |
209 | /**
210 | * Updates a cached content entry in the Gemini API.
211 | *
212 | * @param cacheId The ID of the cached content to update (format: "cachedContents/{id}")
213 | * @param updates The updates to apply to the cached content (ttl, displayName)
214 | * @returns Promise resolving to the updated cached content metadata
215 | */
216 | public async updateCache(
217 | cacheId: CacheId,
218 | updates: { ttl?: string; displayName?: string }
219 | ): Promise<CachedContentMetadata> {
220 | return this.cacheService.updateCache(cacheId, updates);
221 | }
222 |
223 | /**
224 | * Deletes a cached content entry from the Gemini API.
225 | *
226 | * @param cacheId The ID of the cached content to delete (format: "cachedContents/{id}")
227 | * @returns Promise resolving to an object with success flag
228 | */
229 | public async deleteCache(cacheId: CacheId): Promise<{ success: boolean }> {
230 | return this.cacheService.deleteCache(cacheId);
231 | }
232 |
233 | /**
234 | * Routes a message to the most appropriate model based on a routing prompt.
235 | * This is useful when you have multiple specialized models and want to automatically
236 | * select the best one for the specific query type.
237 | *
238 | * @param params Parameters for routing a message across models
239 | * @returns Promise resolving to an object with the chat response and the chosen model
240 | * @throws {GeminiApiError} If routing fails or all models are unavailable
241 | */
242 | public async routeMessage(
243 | params: RouteMessageParams
244 | ): Promise<{ response: GenerateContentResponse; chosenModel: string }> {
245 | return this.chatService.routeMessage(params);
246 | }
247 |
248 | /**
249 | * Reviews a git diff and generates analysis using Gemini models
250 | *
251 | * @param params Parameters for the git diff review
252 | * @returns Promise resolving to the review text
253 | */
254 | public async reviewGitDiff(params: GitDiffReviewParams): Promise<string> {
255 | return this.gitDiffService.reviewDiff(params);
256 | }
257 |
258 | /**
259 | * Streams a git diff review content using Gemini models
260 | *
261 | * @param params Parameters for the git diff review
262 | * @returns AsyncGenerator yielding review content chunks as they become available
263 | */
264 | public async *reviewGitDiffStream(
265 | params: GitDiffReviewParams
266 | ): AsyncGenerator<string> {
267 | yield* this.gitDiffService.reviewDiffStream(params);
268 | }
269 |
270 | /**
271 | * Reviews a GitHub repository and generates analysis using Gemini models.
272 | *
273 | * IMPORTANT: This method uses a special approach to analyze repository contents by
274 | * creating a diff against an empty tree. While effective for getting an overview of
275 | * the repository, be aware of these limitations:
276 | *
277 | * 1. Token Usage: This approach consumes a significant number of tokens, especially
278 | * for large repositories, as it treats the entire repository as one large diff.
279 | *
280 | * 2. Performance Impact: For very large repositories, this may result in slow
281 | * response times and potential timeout errors.
282 | *
283 | * 3. Cost Considerations: The token consumption directly impacts API costs.
284 | * Consider using the maxFilesToInclude and excludePatterns options to limit scope.
285 | *
286 | * 4. Scale Issues: Repositories with many files or large files may exceed context
287 | * limits of the model, resulting in incomplete analysis.
288 | *
289 | * For large repositories, consider reviewing specific directories or files instead,
290 | * or focusing on a particular branch or PR.
291 | *
292 | * @param params Parameters for the GitHub repository review
293 | * @returns Promise resolving to the review text
294 | */
295 | public async reviewGitHubRepository(params: {
296 | owner: string;
297 | repo: string;
298 | branch?: string;
299 | modelName?: string;
300 | reasoningEffort?: "none" | "low" | "medium" | "high";
301 | reviewFocus?:
302 | | "security"
303 | | "performance"
304 | | "architecture"
305 | | "bugs"
306 | | "general";
307 | maxFilesToInclude?: number;
308 | excludePatterns?: string[];
309 | prioritizeFiles?: string[];
310 | customPrompt?: string;
311 | }): Promise<string> {
312 | try {
313 | const {
314 | owner,
315 | repo,
316 | branch,
317 | modelName,
318 | reasoningEffort = "medium",
319 | reviewFocus = "general",
320 | maxFilesToInclude = 50,
321 | excludePatterns = [],
322 | prioritizeFiles,
323 | customPrompt,
324 | } = params;
325 |
326 | // Get repository overview using GitHub API
327 | const repoOverview = await this.gitHubApiService.getRepositoryOverview(
328 | owner,
329 | repo
330 | );
331 |
332 | // Get default branch if not specified
333 | const targetBranch = branch || repoOverview.defaultBranch;
334 |
335 | // Create repository context for Gemini prompt
336 | const repositoryContext = `Repository: ${owner}/${repo}
337 | Primary Language: ${repoOverview.language}
338 | Languages: ${repoOverview.languages.map((l) => `${l.name} (${l.percentage}%)`).join(", ")}
339 | Description: ${repoOverview.description || "No description"}
340 | Default Branch: ${repoOverview.defaultBranch}
341 | Target Branch: ${targetBranch}
342 | Stars: ${repoOverview.stars}
343 | Forks: ${repoOverview.forks}`;
344 |
345 | // Get content from repository files that match our criteria
346 | // For now, we'll use a git diff approach by getting a comparison diff
347 | // between the empty state and the target branch
348 | const diff = await this.gitHubApiService.getComparisonDiff(
349 | owner,
350 | repo,
351 | // Use a known empty reference as the base
352 | "4b825dc642cb6eb9a060e54bf8d69288fbee4904",
353 | targetBranch
354 | );
355 |
356 | // Use the git diff service to analyze the repository content
357 | return this.gitDiffService.reviewDiff({
358 | diffContent: diff,
359 | modelName,
360 | reviewFocus,
361 | repositoryContext,
362 | diffOptions: {
363 | maxFilesToInclude,
364 | excludePatterns,
365 | prioritizeFiles,
366 | },
367 | reasoningEffort,
368 | customPrompt,
369 | });
370 | } catch (error: unknown) {
371 | logger.error("Error reviewing GitHub repository:", error);
372 | throw error;
373 | }
374 | }
375 |
376 | /**
377 | * Reviews a GitHub Pull Request and generates analysis using Gemini models
378 | *
379 | * @param params Parameters for the GitHub PR review
380 | * @returns Promise resolving to the review text
381 | */
382 | public async reviewGitHubPullRequest(params: {
383 | owner: string;
384 | repo: string;
385 | prNumber: number;
386 | modelName?: string;
387 | reasoningEffort?: "none" | "low" | "medium" | "high";
388 | reviewFocus?:
389 | | "security"
390 | | "performance"
391 | | "architecture"
392 | | "bugs"
393 | | "general";
394 | excludePatterns?: string[];
395 | customPrompt?: string;
396 | }): Promise<string> {
397 | try {
398 | const {
399 | owner,
400 | repo,
401 | prNumber,
402 | modelName,
403 | reasoningEffort = "medium",
404 | reviewFocus = "general",
405 | excludePatterns = [],
406 | customPrompt,
407 | } = params;
408 |
409 | // Get PR details using GitHub API
410 | const pullRequest = await this.gitHubApiService.getPullRequest(
411 | owner,
412 | repo,
413 | prNumber
414 | );
415 |
416 | // Create repository context for Gemini prompt
417 | const repositoryContext = `Repository: ${owner}/${repo}
418 | Pull Request: #${prNumber} - ${pullRequest.title}
419 | Author: ${pullRequest.user.login}
420 | Base Branch: ${pullRequest.base.ref}
421 | Head Branch: ${pullRequest.head.ref}
422 | Files Changed: ${pullRequest.changed_files}
423 | Additions: ${pullRequest.additions}
424 | Deletions: ${pullRequest.deletions}
425 | Description: ${pullRequest.body || "No description"}`;
426 |
427 | // Get PR diff using GitHub API
428 | const diff = await this.gitHubApiService.getPullRequestDiff(
429 | owner,
430 | repo,
431 | prNumber
432 | );
433 |
434 | // Use the git diff service to analyze the PR
435 | return this.gitDiffService.reviewDiff({
436 | diffContent: diff,
437 | modelName,
438 | reviewFocus,
439 | repositoryContext,
440 | diffOptions: {
441 | excludePatterns,
442 | },
443 | reasoningEffort,
444 | customPrompt,
445 | });
446 | } catch (error: unknown) {
447 | logger.error("Error reviewing GitHub Pull Request:", error);
448 | throw error;
449 | }
450 | }
451 |
452 | /**
453 | * Generates images from text prompts using Google's image generation models
454 | * Supports both Gemini and Imagen models for image generation
455 | *
456 | * @param prompt - The text prompt describing the desired image
457 | * @param modelName - Optional model name (defaults to optimal model selection)
458 | * @param resolution - Optional image resolution (512x512, 1024x1024, 1536x1536)
459 | * @param numberOfImages - Optional number of images to generate (1-8)
460 | * @param safetySettings - Optional safety settings for content filtering
461 | * @param negativePrompt - Optional text describing what to avoid in the image
462 | * @param stylePreset - Optional visual style to apply
463 | * @param seed - Optional seed for reproducible generation
464 | * @param styleStrength - Optional strength of style preset (0.0-1.0)
465 | * @param preferQuality - Optional preference for quality over speed
466 | * @param preferSpeed - Optional preference for speed over quality
467 | * @returns Promise resolving to image generation result with base64 data
468 | * @throws {GeminiValidationError} If parameters are invalid
469 | * @throws {GeminiContentFilterError} If content is blocked by safety filters
470 | * @throws {GeminiQuotaError} If API quota is exceeded
471 | * @throws {GeminiModelError} If the model encounters issues generating the image
472 | * @throws {GeminiNetworkError} If a network error occurs
473 | * @throws {GeminiApiError} For any other errors
474 | */
475 | public async generateImage(
476 | prompt: string,
477 | modelName?: string,
478 | resolution?: "512x512" | "1024x1024" | "1536x1536",
479 | numberOfImages?: number,
480 | safetySettings?: SafetySetting[],
481 | negativePrompt?: string,
482 | stylePreset?: string,
483 | seed?: number,
484 | styleStrength?: number,
485 | preferQuality?: boolean,
486 | preferSpeed?: boolean
487 | ): Promise<ImageGenerationResult> {
488 | // Log with truncated prompt for privacy/security
489 | logger.debug(`Generating image with prompt: ${prompt.substring(0, 30)}...`);
490 |
491 | try {
492 | // Import validation schemas and error handling
493 | const { validateImageGenerationParams, DEFAULT_SAFETY_SETTINGS } =
494 | await import("./gemini/GeminiValidationSchemas.js");
495 | const {
496 | GeminiContentFilterError,
497 | GeminiModelError,
498 | GeminiErrorMessages,
499 | } = await import("../utils/geminiErrors.js");
500 |
501 | // Validate parameters using Zod schemas
502 | const validatedParams = validateImageGenerationParams(
503 | prompt,
504 | modelName,
505 | resolution,
506 | numberOfImages,
507 | safetySettings,
508 | negativePrompt,
509 | stylePreset,
510 | seed,
511 | styleStrength
512 | );
513 |
514 | const effectiveModel =
515 | validatedParams.modelName ||
516 | (await this.modelSelector.selectOptimalModel({
517 | taskType: "image-generation",
518 | preferQuality,
519 | preferSpeed,
520 | fallbackModel: "imagen-3.0-generate-002",
521 | }));
522 |
523 | // Get the model from the SDK
524 | const model = this.genAI.getGenerativeModel({
525 | model: effectiveModel,
526 | });
527 |
528 | // Build generation config based on validated parameters
529 | const generationConfig: {
530 | numberOfImages: number;
531 | width?: number;
532 | height?: number;
533 | negativePrompt?: string;
534 | stylePreset?: string;
535 | seed?: number;
536 | styleStrength?: number;
537 | } = {
538 | numberOfImages: validatedParams.numberOfImages,
539 | };
540 |
541 | if (validatedParams.resolution) {
542 | const [width, height] = validatedParams.resolution
543 | .split("x")
544 | .map((dim) => parseInt(dim, 10));
545 | generationConfig.width = width;
546 | generationConfig.height = height;
547 | }
548 |
549 | if (validatedParams.negativePrompt) {
550 | generationConfig.negativePrompt = validatedParams.negativePrompt;
551 | }
552 |
553 | if (validatedParams.stylePreset) {
554 | generationConfig.stylePreset = validatedParams.stylePreset;
555 | }
556 |
557 | if (validatedParams.seed !== undefined) {
558 | generationConfig.seed = validatedParams.seed;
559 | }
560 |
561 | if (validatedParams.styleStrength !== undefined) {
562 | generationConfig.styleStrength = validatedParams.styleStrength;
563 | }
564 |
565 | // Apply default safety settings if none provided
566 | const effectiveSafetySettings =
567 | validatedParams.safetySettings ||
568 | (DEFAULT_SAFETY_SETTINGS as SafetySetting[]);
569 |
570 | // Generate the images using the correct generateImages API
571 | const result = await model.generateImages({
572 | prompt: validatedParams.prompt,
573 | safetySettings: effectiveSafetySettings as SafetySetting[],
574 | ...generationConfig,
575 | });
576 |
577 | // Check for safety blocks first (higher priority than empty results)
578 | if (result.promptSafetyMetadata?.blocked) {
579 | const safetyRatings = result.promptSafetyMetadata.safetyRatings || [];
580 | throw new GeminiContentFilterError(
581 | GeminiErrorMessages.CONTENT_FILTERED,
582 | safetyRatings.map((rating) => rating.category)
583 | );
584 | }
585 |
586 | // Check if images were generated successfully
587 | if (!result.images || result.images.length === 0) {
588 | throw new GeminiModelError(
589 | "No images were generated by the model",
590 | "image_generation"
591 | );
592 | }
593 |
594 | // Parse resolution for width/height
595 | const [width, height] = (validatedParams.resolution || "1024x1024")
596 | .split("x")
597 | .map((dim) => parseInt(dim, 10));
598 |
599 | // Format the images according to our expected structure
600 | const formattedImages = result.images.map((image) => ({
601 | base64Data: image.data || "",
602 | mimeType: image.mimeType || "image/png",
603 | width,
604 | height,
605 | }));
606 |
607 | const formattedResult: ImageGenerationResult = {
608 | images: formattedImages,
609 | };
610 |
611 | // Validate the generated images
612 | await this.validateGeneratedImages(formattedResult);
613 |
614 | return formattedResult;
615 | } catch (error: unknown) {
616 | const { mapGeminiError } = await import("../utils/geminiErrors.js");
617 | // Map to appropriate error type
618 | throw mapGeminiError(error, "generateImage");
619 | }
620 | }
621 |
622 | /**
623 | * Validates generated images to ensure they meet quality and safety standards
624 | * @param result - The image generation result to validate
625 | * @throws {GeminiValidationError} If validation fails
626 | */
627 | private async validateGeneratedImages(
628 | result: ImageGenerationResult
629 | ): Promise<void> {
630 | // Import validation error from utils
631 | const { GeminiValidationError } = await import("../utils/geminiErrors.js");
632 |
633 | // Check that each image has proper data
634 | for (const [index, image] of result.images.entries()) {
635 | // Verify base64 data is present and valid
636 | if (!image.base64Data || image.base64Data.length < 100) {
637 | throw new GeminiValidationError(
638 | `Image ${index} has invalid or missing data`,
639 | "base64Data"
640 | );
641 | }
642 |
643 | // Verify MIME type is supported
644 | const supportedMimeTypes = ["image/png", "image/jpeg", "image/webp"];
645 | if (!supportedMimeTypes.includes(image.mimeType)) {
646 | throw new GeminiValidationError(
647 | `Image ${index} has unsupported MIME type: ${image.mimeType}`,
648 | "mimeType"
649 | );
650 | }
651 |
652 | // Verify dimensions are positive numbers
653 | if (image.width <= 0 || image.height <= 0) {
654 | throw new GeminiValidationError(
655 | `Image ${index} has invalid dimensions: ${image.width}x${image.height}`,
656 | "dimensions"
657 | );
658 | }
659 | }
660 | }
661 |
662 | private async selectModelForGeneration(params: {
663 | modelName?: string;
664 | preferQuality?: boolean;
665 | preferSpeed?: boolean;
666 | preferCost?: boolean;
667 | complexityHint?: "simple" | "medium" | "complex";
668 | taskType?: ModelSelectionCriteria["taskType"];
669 | prompt?: string;
670 | }): Promise<string> {
671 | if (params.modelName) {
672 | return params.modelName;
673 | }
674 |
675 | const complexity =
676 | params.complexityHint ||
677 | this.analyzePromptComplexity(params.prompt || "");
678 |
679 | return this.modelSelector.selectOptimalModel({
680 | taskType: params.taskType || "text-generation",
681 | complexityLevel: complexity,
682 | preferQuality: params.preferQuality,
683 | preferSpeed: params.preferSpeed,
684 | preferCost: params.preferCost,
685 | fallbackModel:
686 | this.defaultModelName ||
687 | this.configManager.getModelConfiguration().default,
688 | });
689 | }
690 |
691 | private analyzePromptComplexity(
692 | prompt: string
693 | ): "simple" | "medium" | "complex" {
694 | const complexKeywords = [
695 | "analyze",
696 | "compare",
697 | "evaluate",
698 | "synthesize",
699 | "reasoning",
700 | "complex",
701 | "detailed analysis",
702 | "comprehensive",
703 | "explain why",
704 | "what are the implications",
705 | "trade-offs",
706 | "pros and cons",
707 | "algorithm",
708 | "architecture",
709 | "design pattern",
710 | ];
711 |
712 | const codeKeywords = [
713 | "function",
714 | "class",
715 | "import",
716 | "export",
717 | "const",
718 | "let",
719 | "var",
720 | "if",
721 | "else",
722 | "for",
723 | "while",
724 | "return",
725 | "async",
726 | "await",
727 | ];
728 |
729 | const wordCount = prompt.split(/\s+/).length;
730 | const hasComplexKeywords = complexKeywords.some((keyword) =>
731 | prompt.toLowerCase().includes(keyword.toLowerCase())
732 | );
733 | const hasCodeKeywords = codeKeywords.some((keyword) =>
734 | prompt.toLowerCase().includes(keyword.toLowerCase())
735 | );
736 |
737 | if (hasComplexKeywords || hasCodeKeywords || wordCount > 100) {
738 | return "complex";
739 | } else if (wordCount > 20) {
740 | return "medium";
741 | } else {
742 | return "simple";
743 | }
744 | }
745 |
746 | public getModelSelector(): ModelSelectionService {
747 | return this.modelSelector;
748 | }
749 |
750 | public async getOptimalModelForTask(
751 | criteria: ModelSelectionCriteria
752 | ): Promise<string> {
753 | return this.modelSelector.selectOptimalModel(criteria);
754 | }
755 |
756 | public isModelAvailable(modelName: string): boolean {
757 | return this.modelSelector.isModelAvailable(modelName);
758 | }
759 |
760 | public getAvailableModels(): string[] {
761 | return this.modelSelector.getAvailableModels();
762 | }
763 |
764 | public validateModelForTask(
765 | modelName: string,
766 | taskType: ModelSelectionCriteria["taskType"]
767 | ): boolean {
768 | return this.modelSelector.validateModelForTask(modelName, taskType);
769 | }
770 |
771 | // Model selection history and performance metrics methods removed
772 | // These were not implemented in ModelSelectionService
773 | }
774 |
775 | // Define interfaces directly to avoid circular dependencies
776 | export interface GenerateContentParams {
777 | prompt: string;
778 | modelName?: string;
779 | generationConfig?: GenerationConfig;
780 | safetySettings?: SafetySetting[];
781 | systemInstruction?: Content | string;
782 | cachedContentName?: string;
783 | urlContext?: {
784 | urls: string[];
785 | fetchOptions?: {
786 | maxContentKb?: number;
787 | timeoutMs?: number;
788 | includeMetadata?: boolean;
789 | convertToMarkdown?: boolean;
790 | allowedDomains?: string[];
791 | userAgent?: string;
792 | };
793 | };
794 | preferQuality?: boolean;
795 | preferSpeed?: boolean;
796 | preferCost?: boolean;
797 | complexityHint?: "simple" | "medium" | "complex";
798 | taskType?: ModelSelectionCriteria["taskType"];
799 | urlCount?: number;
800 | estimatedUrlContentSize?: number;
801 | }
802 |
803 | export interface StartChatParams {
804 | modelName?: string;
805 | history?: Content[];
806 | generationConfig?: GenerationConfig;
807 | safetySettings?: SafetySetting[];
808 | tools?: Tool[];
809 | systemInstruction?: Content | string;
810 | cachedContentName?: string;
811 | }
812 |
813 | export interface SendMessageParams {
814 | sessionId: string;
815 | message: string;
816 | generationConfig?: GenerationConfig;
817 | safetySettings?: SafetySetting[];
818 | tools?: Tool[];
819 | toolConfig?: ToolConfig;
820 | cachedContentName?: string;
821 | }
822 |
823 | export interface SendFunctionResultParams {
824 | sessionId: string;
825 | functionResponse: string;
826 | functionCall?: FunctionCall;
827 | }
828 |
829 | /**
830 | * Interface for the routing parameters when sending messages to multiple models
831 | */
832 | export interface RouteMessageParams {
833 | message: string;
834 | models: string[];
835 | routingPrompt?: string;
836 | defaultModel?: string;
837 | generationConfig?: GenerationConfig;
838 | safetySettings?: SafetySetting[];
839 | systemInstruction?: Content | string;
840 | }
841 |
842 | // Re-export other types for backwards compatibility
843 | export type {
844 | Content,
845 | Tool,
846 | ToolConfig,
847 | GenerationConfig,
848 | SafetySetting,
849 | Part,
850 | FunctionCall,
851 | CacheId,
852 | } from "./gemini/GeminiTypes.js";
853 |
```
--------------------------------------------------------------------------------
/src/config/ConfigurationManager.ts:
--------------------------------------------------------------------------------
```typescript
1 | import * as path from "path";
2 | import {
3 | ExampleServiceConfig,
4 | GeminiServiceConfig,
5 | ModelConfiguration,
6 | ModelCapabilitiesMap,
7 | } from "../types/index.js";
8 | import { FileSecurityService } from "../utils/FileSecurityService.js";
9 | import { ModelMigrationService } from "../services/gemini/ModelMigrationService.js";
10 | import { logger } from "../utils/logger.js";
11 |
12 | // Define the structure for all configurations managed
13 | interface ManagedConfigs {
14 | exampleService: Required<ExampleServiceConfig>;
15 | geminiService: GeminiServiceConfig;
16 | github: {
17 | apiToken: string;
18 | };
19 | allowedOutputPaths: string[];
20 | mcpConfig: {
21 | host: string;
22 | port: number;
23 | connectionToken: string;
24 | clientId: string;
25 | logLevel?: "debug" | "info" | "warn" | "error";
26 | transport?: "stdio" | "sse";
27 | enableStreaming?: boolean;
28 | sessionTimeoutSeconds?: number;
29 | };
30 | urlContext: {
31 | enabled: boolean;
32 | maxUrlsPerRequest: number;
33 | defaultMaxContentKb: number;
34 | defaultTimeoutMs: number;
35 | allowedDomains: string[];
36 | blocklistedDomains: string[];
37 | convertToMarkdown: boolean;
38 | includeMetadata: boolean;
39 | enableCaching: boolean;
40 | cacheExpiryMinutes: number;
41 | maxCacheSize: number;
42 | rateLimitPerDomainPerMinute: number;
43 | userAgent: string;
44 | };
45 | modelConfiguration: ModelConfiguration;
46 | }
47 |
48 | /**
49 | * Centralized configuration management for all services.
50 | * Implements singleton pattern to ensure consistent configuration.
51 | */
52 | export class ConfigurationManager {
53 | private static instance: ConfigurationManager | null = null;
54 | private static instanceLock = false;
55 |
56 | private config: ManagedConfigs;
57 |
58 | private constructor() {
59 | // Initialize with default configurations
60 | this.config = {
61 | exampleService: {
62 | // Define defaults for ExampleService
63 | greeting: "Hello",
64 | enableDetailedLogs: false,
65 | },
66 | geminiService: {
67 | apiKey: "",
68 | defaultModel: undefined,
69 | defaultImageResolution: "1024x1024",
70 | maxImageSizeMB: 10,
71 | supportedImageFormats: ["image/jpeg", "image/png", "image/webp"],
72 | defaultThinkingBudget: undefined,
73 | },
74 | modelConfiguration: this.buildDefaultModelConfiguration(),
75 | github: {
76 | // Default GitHub API token is empty; will be loaded from environment variable
77 | apiToken: "",
78 | },
79 | allowedOutputPaths: [],
80 | mcpConfig: {
81 | // Initialize MCP config
82 | host: "localhost",
83 | port: 8080,
84 | connectionToken: "", // Must be set via env
85 | clientId: "gemini-sdk-client",
86 | logLevel: "info",
87 | transport: "stdio",
88 | },
89 | urlContext: {
90 | // Initialize URL context config with secure defaults
91 | enabled: false, // Disabled by default for security
92 | maxUrlsPerRequest: 20,
93 | defaultMaxContentKb: 100,
94 | defaultTimeoutMs: 10000,
95 | allowedDomains: ["*"], // Allow all by default (can be restricted)
96 | blocklistedDomains: [], // Empty by default
97 | convertToMarkdown: true,
98 | includeMetadata: true,
99 | enableCaching: true,
100 | cacheExpiryMinutes: 15,
101 | maxCacheSize: 1000,
102 | rateLimitPerDomainPerMinute: 10,
103 | userAgent:
104 | "MCP-Gemini-Server/1.0 (+https://github.com/bsmi021/mcp-gemini-server)",
105 | },
106 |
107 | // Initialize other service configs with defaults:
108 | // yourService: {
109 | // someSetting: 'default value',
110 | // retryCount: 3,
111 | // },
112 | };
113 |
114 | const migrationService = ModelMigrationService.getInstance();
115 | migrationService.migrateEnvironmentVariables();
116 |
117 | const validation = migrationService.validateConfiguration();
118 | if (!validation.isValid) {
119 | logger.error("[ConfigurationManager] Configuration validation failed", {
120 | errors: validation.errors,
121 | });
122 | }
123 |
124 | const deprecated = migrationService.getDeprecatedFeatures();
125 | if (deprecated.length > 0) {
126 | logger.warn("[ConfigurationManager] Deprecated features detected", {
127 | deprecated,
128 | });
129 | }
130 |
131 | this.validateRequiredEnvVars();
132 | this.loadEnvironmentOverrides();
133 | this.config.modelConfiguration = this.parseModelConfiguration();
134 |
135 | FileSecurityService.configureFromEnvironment();
136 | }
137 |
138 | private validateRequiredEnvVars(): void {
139 | // Skip validation in test environment
140 | if (process.env.NODE_ENV === "test") {
141 | logger.info(
142 | "Skipping environment variable validation in test environment"
143 | );
144 | return;
145 | }
146 |
147 | // Always require Gemini API key
148 | const requiredVars = ["GOOGLE_GEMINI_API_KEY"];
149 |
150 | // Check transport type to determine if MCP server variables are required
151 | const transportType =
152 | process.env.MCP_TRANSPORT || process.env.MCP_TRANSPORT_TYPE || "stdio";
153 |
154 | // Only require MCP server variables for HTTP/SSE transport modes
155 | // Note: MCP_CLIENT_ID is not required as it's optional with a default value
156 | if (
157 | transportType === "http" ||
158 | transportType === "sse" ||
159 | transportType === "streamable"
160 | ) {
161 | requiredVars.push(
162 | "MCP_SERVER_HOST",
163 | "MCP_SERVER_PORT",
164 | "MCP_CONNECTION_TOKEN"
165 | );
166 | }
167 |
168 | const missingVars = requiredVars.filter((varName) => !process.env[varName]);
169 |
170 | if (missingVars.length > 0) {
171 | throw new Error(
172 | `Missing required environment variables: ${missingVars.join(", ")}`
173 | );
174 | }
175 | }
176 |
177 | /**
178 | * Get the singleton instance of ConfigurationManager.
179 | * Basic lock to prevent race conditions during initial creation.
180 | */
181 | public static getInstance(): ConfigurationManager {
182 | if (!ConfigurationManager.instance) {
183 | if (!ConfigurationManager.instanceLock) {
184 | ConfigurationManager.instanceLock = true; // Lock
185 | try {
186 | ConfigurationManager.instance = new ConfigurationManager();
187 | } finally {
188 | ConfigurationManager.instanceLock = false; // Unlock
189 | }
190 | } else {
191 | // Basic busy wait if locked (consider a more robust async lock if high contention is expected)
192 | while (ConfigurationManager.instanceLock) {
193 | // Small delay to prevent tight loop
194 | const now = Date.now();
195 | while (Date.now() - now < 10) {
196 | // Intentional minimal delay
197 | }
198 | }
199 | // Re-check instance after wait
200 | if (!ConfigurationManager.instance) {
201 | // This path is less likely but handles edge cases if lock logic needs refinement
202 | return ConfigurationManager.getInstance();
203 | }
204 | }
205 | }
206 | return ConfigurationManager.instance;
207 | }
208 |
209 | // --- Getters for specific configurations ---
210 |
211 | public getExampleServiceConfig(): Required<ExampleServiceConfig> {
212 | // Return a copy to prevent accidental modification of the internal state
213 | return { ...this.config.exampleService };
214 | }
215 |
216 | public getGeminiServiceConfig(): GeminiServiceConfig {
217 | // Return a copy to prevent accidental modification
218 | return { ...this.config.geminiService };
219 | }
220 |
221 | // Getter for MCP Configuration
222 | public getMcpConfig(): Required<ManagedConfigs["mcpConfig"]> {
223 | // Return a copy to ensure type safety and prevent modification
224 | // Cast to Required because we validate essential fields are set from env vars.
225 | // Optional fields will have their defaults.
226 | return { ...this.config.mcpConfig } as Required<
227 | ManagedConfigs["mcpConfig"]
228 | >;
229 | }
230 |
231 | // Getter specifically for the default model name
232 | public getDefaultModelName(): string | undefined {
233 | return this.config.geminiService.defaultModel;
234 | }
235 |
236 | public getModelConfiguration(): ModelConfiguration {
237 | return { ...this.config.modelConfiguration };
238 | }
239 |
240 | /**
241 | * Returns the GitHub API token for GitHub API requests
242 | * @returns The configured GitHub API token or undefined if not set
243 | */
244 | public getGitHubApiToken(): string | undefined {
245 | return this.config.github.apiToken || undefined;
246 | }
247 |
248 | /**
249 | * Returns the list of allowed output paths for file writing
250 | * @returns A copy of the configured allowed output paths array
251 | */
252 | public getAllowedOutputPaths(): string[] {
253 | // Return a copy to prevent accidental modification
254 | return [...this.config.allowedOutputPaths];
255 | }
256 |
257 | /**
258 | * Returns the URL context configuration
259 | * @returns A copy of the URL context configuration
260 | */
261 | public getUrlContextConfig(): Required<ManagedConfigs["urlContext"]> {
262 | return { ...this.config.urlContext };
263 | }
264 |
265 | // Add getters for other service configs:
266 | // public getYourServiceConfig(): Required<YourServiceConfig> {
267 | // return { ...this.config.yourService };
268 | // }
269 |
270 | // --- Updaters for specific configurations (if runtime updates are needed) ---
271 |
272 | public updateExampleServiceConfig(
273 | update: Partial<ExampleServiceConfig>
274 | ): void {
275 | this.config.exampleService = {
276 | ...this.config.exampleService,
277 | ...update,
278 | };
279 | // Optional: Notify relevant services about the config change
280 | }
281 |
282 | // Add updaters for other service configs:
283 | // public updateYourServiceConfig(update: Partial<YourServiceConfig>): void {
284 | // this.config.yourService = {
285 | // ...this.config.yourService,
286 | // ...update,
287 | // };
288 | // }
289 |
290 | /**
291 | * Example method to load configuration overrides from environment variables.
292 | * Call this in the constructor.
293 | */
294 | private loadEnvironmentOverrides(): void {
295 | // Example for ExampleService
296 | if (process.env.EXAMPLE_GREETING) {
297 | this.config.exampleService.greeting = process.env.EXAMPLE_GREETING;
298 | }
299 | if (process.env.EXAMPLE_ENABLE_LOGS) {
300 | this.config.exampleService.enableDetailedLogs =
301 | process.env.EXAMPLE_ENABLE_LOGS.toLowerCase() === "true";
302 | }
303 |
304 | // Load GitHub API token if provided
305 | if (process.env.GITHUB_API_TOKEN) {
306 | this.config.github.apiToken = process.env.GITHUB_API_TOKEN;
307 | logger.info("[ConfigurationManager] GitHub API token configured");
308 | } else {
309 | logger.warn(
310 | "[ConfigurationManager] GITHUB_API_TOKEN environment variable not set. GitHub code review features may not work properly."
311 | );
312 | }
313 |
314 | // Add logic for other services based on their environment variables
315 | // if (process.env.YOUR_SERVICE_RETRY_COUNT) {
316 | // const retryCount = parseInt(process.env.YOUR_SERVICE_RETRY_COUNT, 10);
317 | // if (!isNaN(retryCount)) {
318 | // this.config.yourService.retryCount = retryCount;
319 | // }
320 | // }
321 |
322 | // Load Gemini API Key (using the name from .env)
323 | if (process.env.GOOGLE_GEMINI_API_KEY) {
324 | this.config.geminiService.apiKey = process.env.GOOGLE_GEMINI_API_KEY;
325 | } else {
326 | // Log a warning if the key is missing, the service constructor will throw
327 | logger.warn(
328 | "[ConfigurationManager] WARNING: GOOGLE_GEMINI_API_KEY environment variable not set."
329 | );
330 | }
331 |
332 | // Load Default Gemini Model Name
333 | if (process.env.GOOGLE_GEMINI_MODEL) {
334 | this.config.geminiService.defaultModel = process.env.GOOGLE_GEMINI_MODEL;
335 | logger.info(
336 | `[ConfigurationManager] Default Gemini model set to: ${this.config.geminiService.defaultModel}`
337 | );
338 | } else {
339 | logger.info(
340 | "[ConfigurationManager] GOOGLE_GEMINI_MODEL environment variable not set. No default model configured."
341 | );
342 | }
343 |
344 | // Load image-specific settings if provided
345 | if (process.env.GOOGLE_GEMINI_IMAGE_RESOLUTION) {
346 | const resolution = process.env.GOOGLE_GEMINI_IMAGE_RESOLUTION;
347 | if (["512x512", "1024x1024", "1536x1536"].includes(resolution)) {
348 | this.config.geminiService.defaultImageResolution = resolution as
349 | | "512x512"
350 | | "1024x1024"
351 | | "1536x1536";
352 | logger.info(
353 | `[ConfigurationManager] Default image resolution set to: ${resolution}`
354 | );
355 | } else {
356 | logger.warn(
357 | `[ConfigurationManager] Invalid image resolution '${resolution}' specified in GOOGLE_GEMINI_IMAGE_RESOLUTION. Using default.`
358 | );
359 | }
360 | }
361 |
362 | if (process.env.GOOGLE_GEMINI_MAX_IMAGE_SIZE_MB) {
363 | const sizeMB = parseInt(process.env.GOOGLE_GEMINI_MAX_IMAGE_SIZE_MB, 10);
364 | if (!isNaN(sizeMB) && sizeMB > 0) {
365 | this.config.geminiService.maxImageSizeMB = sizeMB;
366 | logger.info(
367 | `[ConfigurationManager] Maximum image size set to: ${sizeMB}MB`
368 | );
369 | } else {
370 | logger.warn(
371 | `[ConfigurationManager] Invalid max image size '${process.env.GOOGLE_GEMINI_MAX_IMAGE_SIZE_MB}' specified. Using default.`
372 | );
373 | }
374 | }
375 |
376 | if (process.env.GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS) {
377 | try {
378 | const formats = JSON.parse(
379 | process.env.GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS
380 | );
381 | if (
382 | Array.isArray(formats) &&
383 | formats.every((f) => typeof f === "string")
384 | ) {
385 | this.config.geminiService.supportedImageFormats = formats;
386 | logger.info(
387 | `[ConfigurationManager] Supported image formats set to: ${formats.join(", ")}`
388 | );
389 | } else {
390 | throw new Error("Invalid format array");
391 | }
392 | } catch (error) {
393 | logger.warn(
394 | `[ConfigurationManager] Invalid image formats specified in GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS: '${process.env.GOOGLE_GEMINI_SUPPORTED_IMAGE_FORMATS}'. Using default.`
395 | );
396 | }
397 | }
398 |
399 | // Load default thinking budget if provided
400 | if (process.env.GOOGLE_GEMINI_DEFAULT_THINKING_BUDGET) {
401 | const budget = parseInt(
402 | process.env.GOOGLE_GEMINI_DEFAULT_THINKING_BUDGET,
403 | 10
404 | );
405 | if (!isNaN(budget) && budget >= 0 && budget <= 24576) {
406 | this.config.geminiService.defaultThinkingBudget = budget;
407 | logger.info(
408 | `[ConfigurationManager] Default thinking budget set to: ${budget} tokens`
409 | );
410 | } else {
411 | logger.warn(
412 | `[ConfigurationManager] Invalid thinking budget '${process.env.GOOGLE_GEMINI_DEFAULT_THINKING_BUDGET}' specified. Must be between 0 and 24576. Not using default thinking budget.`
413 | );
414 | }
415 | }
416 |
417 | // Load MCP Configuration
418 | if (process.env.MCP_SERVER_HOST) {
419 | this.config.mcpConfig.host = process.env.MCP_SERVER_HOST;
420 | }
421 | if (process.env.MCP_SERVER_PORT) {
422 | const port = parseInt(process.env.MCP_SERVER_PORT, 10);
423 | if (!isNaN(port) && port > 0 && port < 65536) {
424 | this.config.mcpConfig.port = port;
425 | } else {
426 | logger.warn(
427 | `[ConfigurationManager] Invalid MCP_SERVER_PORT: '${process.env.MCP_SERVER_PORT}'. Using default ${this.config.mcpConfig.port}.`
428 | );
429 | }
430 | }
431 | if (process.env.MCP_CONNECTION_TOKEN) {
432 | this.config.mcpConfig.connectionToken = process.env.MCP_CONNECTION_TOKEN;
433 | }
434 | if (process.env.MCP_CLIENT_ID) {
435 | this.config.mcpConfig.clientId = process.env.MCP_CLIENT_ID;
436 | }
437 | if (process.env.MCP_LOG_LEVEL) {
438 | const logLevel = process.env.MCP_LOG_LEVEL.toLowerCase();
439 | if (["debug", "info", "warn", "error"].includes(logLevel)) {
440 | this.config.mcpConfig.logLevel = logLevel as
441 | | "debug"
442 | | "info"
443 | | "warn"
444 | | "error";
445 | } else {
446 | logger.warn(
447 | `[ConfigurationManager] Invalid MCP_LOG_LEVEL: '${process.env.MCP_LOG_LEVEL}'. Using default '${this.config.mcpConfig.logLevel}'.`
448 | );
449 | }
450 | }
451 | if (process.env.MCP_TRANSPORT) {
452 | const transport = process.env.MCP_TRANSPORT.toLowerCase();
453 | if (["stdio", "sse"].includes(transport)) {
454 | this.config.mcpConfig.transport = transport as "stdio" | "sse";
455 | } else {
456 | logger.warn(
457 | `[ConfigurationManager] Invalid MCP_TRANSPORT: '${process.env.MCP_TRANSPORT}'. Using default '${this.config.mcpConfig.transport}'.`
458 | );
459 | }
460 | }
461 |
462 | if (process.env.MCP_ENABLE_STREAMING) {
463 | this.config.mcpConfig.enableStreaming =
464 | process.env.MCP_ENABLE_STREAMING.toLowerCase() === "true";
465 | logger.info(
466 | `[ConfigurationManager] MCP streaming enabled: ${this.config.mcpConfig.enableStreaming}`
467 | );
468 | }
469 |
470 | if (process.env.MCP_SESSION_TIMEOUT) {
471 | const timeout = parseInt(process.env.MCP_SESSION_TIMEOUT, 10);
472 | if (!isNaN(timeout) && timeout > 0) {
473 | this.config.mcpConfig.sessionTimeoutSeconds = timeout;
474 | logger.info(
475 | `[ConfigurationManager] MCP session timeout set to: ${timeout} seconds`
476 | );
477 | } else {
478 | logger.warn(
479 | `[ConfigurationManager] Invalid MCP_SESSION_TIMEOUT: '${process.env.MCP_SESSION_TIMEOUT}'. Using default.`
480 | );
481 | }
482 | }
483 |
484 | logger.info("[ConfigurationManager] MCP configuration loaded.");
485 |
486 | // Load URL Context Configuration
487 | if (process.env.GOOGLE_GEMINI_ENABLE_URL_CONTEXT) {
488 | this.config.urlContext.enabled =
489 | process.env.GOOGLE_GEMINI_ENABLE_URL_CONTEXT.toLowerCase() === "true";
490 | logger.info(
491 | `[ConfigurationManager] URL context feature enabled: ${this.config.urlContext.enabled}`
492 | );
493 | }
494 |
495 | if (process.env.GOOGLE_GEMINI_URL_MAX_COUNT) {
496 | const maxCount = parseInt(process.env.GOOGLE_GEMINI_URL_MAX_COUNT, 10);
497 | if (!isNaN(maxCount) && maxCount > 0 && maxCount <= 20) {
498 | this.config.urlContext.maxUrlsPerRequest = maxCount;
499 | logger.info(`[ConfigurationManager] URL max count set to: ${maxCount}`);
500 | } else {
501 | logger.warn(
502 | `[ConfigurationManager] Invalid URL max count '${process.env.GOOGLE_GEMINI_URL_MAX_COUNT}'. Must be between 1 and 20.`
503 | );
504 | }
505 | }
506 |
507 | if (process.env.GOOGLE_GEMINI_URL_MAX_CONTENT_KB) {
508 | const maxKb = parseInt(process.env.GOOGLE_GEMINI_URL_MAX_CONTENT_KB, 10);
509 | if (!isNaN(maxKb) && maxKb > 0 && maxKb <= 1000) {
510 | this.config.urlContext.defaultMaxContentKb = maxKb;
511 | logger.info(
512 | `[ConfigurationManager] URL max content size set to: ${maxKb}KB`
513 | );
514 | } else {
515 | logger.warn(
516 | `[ConfigurationManager] Invalid URL max content size '${process.env.GOOGLE_GEMINI_URL_MAX_CONTENT_KB}'. Must be between 1 and 1000 KB.`
517 | );
518 | }
519 | }
520 |
521 | if (process.env.GOOGLE_GEMINI_URL_FETCH_TIMEOUT_MS) {
522 | const timeout = parseInt(
523 | process.env.GOOGLE_GEMINI_URL_FETCH_TIMEOUT_MS,
524 | 10
525 | );
526 | if (!isNaN(timeout) && timeout >= 1000 && timeout <= 30000) {
527 | this.config.urlContext.defaultTimeoutMs = timeout;
528 | logger.info(
529 | `[ConfigurationManager] URL fetch timeout set to: ${timeout}ms`
530 | );
531 | } else {
532 | logger.warn(
533 | `[ConfigurationManager] Invalid URL fetch timeout '${process.env.GOOGLE_GEMINI_URL_FETCH_TIMEOUT_MS}'. Must be between 1000 and 30000 ms.`
534 | );
535 | }
536 | }
537 |
538 | if (process.env.GOOGLE_GEMINI_URL_ALLOWED_DOMAINS) {
539 | try {
540 | const domains = this.parseStringArray(
541 | process.env.GOOGLE_GEMINI_URL_ALLOWED_DOMAINS
542 | );
543 | this.config.urlContext.allowedDomains = domains;
544 | logger.info(
545 | `[ConfigurationManager] URL allowed domains set to: ${domains.join(", ")}`
546 | );
547 | } catch (error) {
548 | logger.warn(
549 | `[ConfigurationManager] Invalid URL allowed domains format: ${error}`
550 | );
551 | }
552 | }
553 |
554 | if (process.env.GOOGLE_GEMINI_URL_BLOCKLIST) {
555 | try {
556 | const domains = this.parseStringArray(
557 | process.env.GOOGLE_GEMINI_URL_BLOCKLIST
558 | );
559 | this.config.urlContext.blocklistedDomains = domains;
560 | logger.info(
561 | `[ConfigurationManager] URL blocklisted domains set to: ${domains.join(", ")}`
562 | );
563 | } catch (error) {
564 | logger.warn(
565 | `[ConfigurationManager] Invalid URL blocklist format: ${error}`
566 | );
567 | }
568 | }
569 |
570 | if (process.env.GOOGLE_GEMINI_URL_CONVERT_TO_MARKDOWN) {
571 | this.config.urlContext.convertToMarkdown =
572 | process.env.GOOGLE_GEMINI_URL_CONVERT_TO_MARKDOWN.toLowerCase() ===
573 | "true";
574 | logger.info(
575 | `[ConfigurationManager] URL markdown conversion enabled: ${this.config.urlContext.convertToMarkdown}`
576 | );
577 | }
578 |
579 | if (process.env.GOOGLE_GEMINI_URL_INCLUDE_METADATA) {
580 | this.config.urlContext.includeMetadata =
581 | process.env.GOOGLE_GEMINI_URL_INCLUDE_METADATA.toLowerCase() === "true";
582 | logger.info(
583 | `[ConfigurationManager] URL metadata inclusion enabled: ${this.config.urlContext.includeMetadata}`
584 | );
585 | }
586 |
587 | if (process.env.GOOGLE_GEMINI_URL_ENABLE_CACHING) {
588 | this.config.urlContext.enableCaching =
589 | process.env.GOOGLE_GEMINI_URL_ENABLE_CACHING.toLowerCase() === "true";
590 | logger.info(
591 | `[ConfigurationManager] URL caching enabled: ${this.config.urlContext.enableCaching}`
592 | );
593 | }
594 |
595 | if (process.env.GOOGLE_GEMINI_URL_USER_AGENT) {
596 | this.config.urlContext.userAgent =
597 | process.env.GOOGLE_GEMINI_URL_USER_AGENT;
598 |
599 | logger.info(
600 | `[ConfigurationManager] URL user agent set to: ${this.config.urlContext.userAgent}`
601 | );
602 | }
603 |
604 | logger.info("[ConfigurationManager] URL context configuration loaded.");
605 |
606 | this.config.allowedOutputPaths = [];
607 | const allowedOutputPathsEnv = process.env.ALLOWED_OUTPUT_PATHS;
608 |
609 | if (allowedOutputPathsEnv && allowedOutputPathsEnv.trim().length > 0) {
610 | const pathsArray = allowedOutputPathsEnv
611 | .split(",")
612 | .map((p) => p.trim()) // Trim whitespace from each path
613 | .filter((p) => p.length > 0); // Filter out any empty strings resulting from split
614 |
615 | if (pathsArray.length > 0) {
616 | this.config.allowedOutputPaths = pathsArray.map((p) => path.resolve(p)); // Resolve to absolute paths
617 | logger.info(
618 | `[ConfigurationManager] Allowed output paths configured: ${this.config.allowedOutputPaths.join(
619 | ", "
620 | )}`
621 | );
622 | } else {
623 | // This case handles if ALLOWED_OUTPUT_PATHS was something like ",," or " , "
624 | logger.warn(
625 | "[ConfigurationManager] ALLOWED_OUTPUT_PATHS environment variable was provided but contained no valid paths after trimming. File writing might be restricted."
626 | );
627 | }
628 | } else {
629 | logger.warn(
630 | "[ConfigurationManager] ALLOWED_OUTPUT_PATHS environment variable not set or is empty. File writing might be restricted or disabled."
631 | );
632 | }
633 | }
634 |
635 | private buildDefaultModelConfiguration(): ModelConfiguration {
636 | return {
637 | default: "gemini-2.5-flash-preview-05-20",
638 | textGeneration: [
639 | "gemini-2.5-pro-preview-05-06",
640 | "gemini-2.5-flash-preview-05-20",
641 | "gemini-2.0-flash",
642 | "gemini-1.5-pro",
643 | "gemini-1.5-flash",
644 | ],
645 | imageGeneration: [
646 | "imagen-3.0-generate-002",
647 | "gemini-2.0-flash-preview-image-generation",
648 | ],
649 | videoGeneration: ["veo-2.0-generate-001"],
650 | codeReview: [
651 | "gemini-2.5-pro-preview-05-06",
652 | "gemini-2.5-flash-preview-05-20",
653 | "gemini-2.0-flash",
654 | ],
655 | complexReasoning: [
656 | "gemini-2.5-pro-preview-05-06",
657 | "gemini-2.5-flash-preview-05-20",
658 | ],
659 | capabilities: this.buildCapabilitiesMap(),
660 | routing: {
661 | preferCostEffective: false,
662 | preferSpeed: false,
663 | preferQuality: true,
664 | },
665 | };
666 | }
667 |
668 | private buildCapabilitiesMap(): ModelCapabilitiesMap {
669 | return {
670 | "gemini-2.5-pro-preview-05-06": {
671 | textGeneration: true,
672 | imageInput: true,
673 | videoInput: true,
674 | audioInput: true,
675 | imageGeneration: false,
676 | videoGeneration: false,
677 | codeExecution: "excellent",
678 | complexReasoning: "excellent",
679 | costTier: "high",
680 | speedTier: "medium",
681 | maxTokens: 65536,
682 | contextWindow: 1048576,
683 | supportsFunctionCalling: true,
684 | supportsSystemInstructions: true,
685 | supportsCaching: true,
686 | },
687 | "gemini-2.5-flash-preview-05-20": {
688 | textGeneration: true,
689 | imageInput: true,
690 | videoInput: true,
691 | audioInput: true,
692 | imageGeneration: false,
693 | videoGeneration: false,
694 | codeExecution: "excellent",
695 | complexReasoning: "excellent",
696 | costTier: "medium",
697 | speedTier: "fast",
698 | maxTokens: 65536,
699 | contextWindow: 1048576,
700 | supportsFunctionCalling: true,
701 | supportsSystemInstructions: true,
702 | supportsCaching: true,
703 | },
704 | "gemini-2.0-flash": {
705 | textGeneration: true,
706 | imageInput: true,
707 | videoInput: true,
708 | audioInput: true,
709 | imageGeneration: false,
710 | videoGeneration: false,
711 | codeExecution: "good",
712 | complexReasoning: "good",
713 | costTier: "medium",
714 | speedTier: "fast",
715 | maxTokens: 8192,
716 | contextWindow: 1048576,
717 | supportsFunctionCalling: true,
718 | supportsSystemInstructions: true,
719 | supportsCaching: true,
720 | },
721 | "gemini-2.0-flash-preview-image-generation": {
722 | textGeneration: true,
723 | imageInput: true,
724 | videoInput: false,
725 | audioInput: false,
726 | imageGeneration: true,
727 | videoGeneration: false,
728 | codeExecution: "basic",
729 | complexReasoning: "basic",
730 | costTier: "medium",
731 | speedTier: "medium",
732 | maxTokens: 8192,
733 | contextWindow: 32000,
734 | supportsFunctionCalling: false,
735 | supportsSystemInstructions: true,
736 | supportsCaching: false,
737 | },
738 | "gemini-1.5-pro": {
739 | textGeneration: true,
740 | imageInput: true,
741 | videoInput: true,
742 | audioInput: true,
743 | imageGeneration: false,
744 | videoGeneration: false,
745 | codeExecution: "good",
746 | complexReasoning: "good",
747 | costTier: "high",
748 | speedTier: "medium",
749 | maxTokens: 8192,
750 | contextWindow: 2000000,
751 | supportsFunctionCalling: true,
752 | supportsSystemInstructions: true,
753 | supportsCaching: true,
754 | },
755 | "gemini-1.5-flash": {
756 | textGeneration: true,
757 | imageInput: true,
758 | videoInput: true,
759 | audioInput: true,
760 | imageGeneration: false,
761 | videoGeneration: false,
762 | codeExecution: "basic",
763 | complexReasoning: "basic",
764 | costTier: "low",
765 | speedTier: "fast",
766 | maxTokens: 8192,
767 | contextWindow: 1000000,
768 | supportsFunctionCalling: true,
769 | supportsSystemInstructions: true,
770 | supportsCaching: true,
771 | },
772 | "imagen-3.0-generate-002": {
773 | textGeneration: false,
774 | imageInput: false,
775 | videoInput: false,
776 | audioInput: false,
777 | imageGeneration: true,
778 | videoGeneration: false,
779 | codeExecution: "none",
780 | complexReasoning: "none",
781 | costTier: "medium",
782 | speedTier: "medium",
783 | maxTokens: 0,
784 | contextWindow: 0,
785 | supportsFunctionCalling: false,
786 | supportsSystemInstructions: false,
787 | supportsCaching: false,
788 | },
789 | "veo-2.0-generate-001": {
790 | textGeneration: false,
791 | imageInput: true,
792 | videoInput: false,
793 | audioInput: false,
794 | imageGeneration: false,
795 | videoGeneration: true,
796 | codeExecution: "none",
797 | complexReasoning: "none",
798 | costTier: "high",
799 | speedTier: "slow",
800 | maxTokens: 0,
801 | contextWindow: 0,
802 | supportsFunctionCalling: false,
803 | supportsSystemInstructions: true,
804 | supportsCaching: false,
805 | },
806 | };
807 | }
808 |
809 | private parseModelConfiguration(): ModelConfiguration {
810 | const textModels = this.parseModelArray("GOOGLE_GEMINI_MODELS") ||
811 | this.parseModelArray("GOOGLE_GEMINI_TEXT_MODELS") || [
812 | process.env.GOOGLE_GEMINI_MODEL || "gemini-2.5-flash-preview-05-20",
813 | ];
814 |
815 | const imageModels = this.parseModelArray("GOOGLE_GEMINI_IMAGE_MODELS") || [
816 | "imagen-3.0-generate-002",
817 | "gemini-2.0-flash-preview-image-generation",
818 | ];
819 |
820 | const videoModels = this.parseModelArray("GOOGLE_GEMINI_VIDEO_MODELS") || [
821 | "veo-2.0-generate-001",
822 | ];
823 |
824 | const codeModels = this.parseModelArray("GOOGLE_GEMINI_CODE_MODELS") || [
825 | "gemini-2.5-pro-preview-05-06",
826 | "gemini-2.5-flash-preview-05-20",
827 | "gemini-2.0-flash",
828 | ];
829 |
830 | return {
831 | default: process.env.GOOGLE_GEMINI_DEFAULT_MODEL || textModels[0],
832 | textGeneration: textModels,
833 | imageGeneration: imageModels,
834 | videoGeneration: videoModels,
835 | codeReview: codeModels,
836 | complexReasoning: textModels.filter((m) => this.isHighReasoningModel(m)),
837 | capabilities: this.buildCapabilitiesMap(),
838 | routing: this.parseRoutingPreferences(),
839 | };
840 | }
841 |
842 | private parseModelArray(envVarName: string): string[] | null {
843 | const envValue = process.env[envVarName];
844 | if (!envValue) return null;
845 |
846 | try {
847 | const parsed = JSON.parse(envValue);
848 | if (
849 | Array.isArray(parsed) &&
850 | parsed.every((item) => typeof item === "string")
851 | ) {
852 | return parsed;
853 | }
854 | logger.warn(
855 | `[ConfigurationManager] Invalid ${envVarName} format: expected JSON array of strings`
856 | );
857 | return null;
858 | } catch (error) {
859 | logger.warn(
860 | `[ConfigurationManager] Failed to parse ${envVarName}: ${error}`
861 | );
862 | return null;
863 | }
864 | }
865 |
866 | private isHighReasoningModel(modelName: string): boolean {
867 | const highReasoningModels = [
868 | "gemini-2.5-pro-preview-05-06",
869 | "gemini-2.5-flash-preview-05-20",
870 | "gemini-1.5-pro",
871 | ];
872 | return highReasoningModels.includes(modelName);
873 | }
874 |
875 | private parseRoutingPreferences(): ModelConfiguration["routing"] {
876 | return {
877 | preferCostEffective:
878 | process.env.GOOGLE_GEMINI_ROUTING_PREFER_COST?.toLowerCase() === "true",
879 | preferSpeed:
880 | process.env.GOOGLE_GEMINI_ROUTING_PREFER_SPEED?.toLowerCase() ===
881 | "true",
882 | preferQuality:
883 | process.env.GOOGLE_GEMINI_ROUTING_PREFER_QUALITY?.toLowerCase() ===
884 | "true" ||
885 | (!process.env.GOOGLE_GEMINI_ROUTING_PREFER_COST &&
886 | !process.env.GOOGLE_GEMINI_ROUTING_PREFER_SPEED),
887 | };
888 | }
889 |
890 | /**
891 | * Parse a comma-separated string or JSON array into a string array
892 | */
893 | private parseStringArray(value: string): string[] {
894 | if (!value || value.trim() === "") {
895 | return [];
896 | }
897 |
898 | // Try to parse as JSON first
899 | if (value.trim().startsWith("[")) {
900 | try {
901 | const parsed = JSON.parse(value);
902 | if (
903 | Array.isArray(parsed) &&
904 | parsed.every((item) => typeof item === "string")
905 | ) {
906 | return parsed;
907 | }
908 | throw new Error("Not a string array");
909 | } catch (error) {
910 | throw new Error(`Invalid JSON array format: ${error}`);
911 | }
912 | }
913 |
914 | // Parse as comma-separated string
915 | return value
916 | .split(",")
917 | .map((item) => item.trim())
918 | .filter((item) => item.length > 0);
919 | }
920 | }
921 |
```