#
tokens: 8881/50000 2/42 files (page 2/2)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 2. Use http://codebase.md/shariqriazz/vertex-ai-mcp-server?page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .gitignore
├── bun.lock
├── Dockerfile
├── LICENSE
├── package.json
├── README.md
├── smithery.yaml
├── src
│   ├── config.ts
│   ├── index.ts
│   ├── tools
│   │   ├── answer_query_direct.ts
│   │   ├── answer_query_websearch.ts
│   │   ├── architecture_pattern_recommendation.ts
│   │   ├── code_analysis_with_docs.ts
│   │   ├── database_schema_analyzer.ts
│   │   ├── dependency_vulnerability_scan.ts
│   │   ├── directory_tree.ts
│   │   ├── documentation_generator.ts
│   │   ├── edit_file.ts
│   │   ├── execute_terminal_command.ts
│   │   ├── explain_topic_with_docs.ts
│   │   ├── generate_project_guidelines.ts
│   │   ├── get_doc_snippets.ts
│   │   ├── get_file_info.ts
│   │   ├── index.ts
│   │   ├── list_directory.ts
│   │   ├── microservice_design_assistant.ts
│   │   ├── move_file.ts
│   │   ├── read_file.ts
│   │   ├── regulatory_compliance_advisor.ts
│   │   ├── save_answer_query_direct.ts
│   │   ├── save_answer_query_websearch.ts
│   │   ├── save_doc_snippet.ts
│   │   ├── save_generate_project_guidelines.ts
│   │   ├── save_topic_explanation.ts
│   │   ├── search_files.ts
│   │   ├── security_best_practices_advisor.ts
│   │   ├── technical_comparison.ts
│   │   ├── testing_strategy_generator.ts
│   │   ├── tool_definition.ts
│   │   └── write_file.ts
│   ├── utils.ts
│   └── vertex_ai_client.ts
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/src/tools/save_generate_project_guidelines.ts:
--------------------------------------------------------------------------------

```typescript
import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
import { ToolDefinition, modelIdPlaceholder } from "./tool_definition.js";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";

// Schema for combined arguments
export const SaveGenerateProjectGuidelinesArgsSchema = z.object({
    tech_stack: z.array(z.string()).min(1).describe("An array of strings specifying the project's technologies, optionally with versions (e.g., ['React', 'TypeScript 5.x', 'Node.js', 'Express 4.18', 'PostgreSQL 16.x']). If no version is specified, the latest stable version will be assumed."),
    output_path: z.string().describe("The relative path where the generated guidelines Markdown file should be saved (e.g., 'docs/PROJECT_GUIDELINES.md').")
});

// Convert Zod schema to JSON schema
const SaveGenerateProjectGuidelinesJsonSchema = zodToJsonSchema(SaveGenerateProjectGuidelinesArgsSchema);

export const saveGenerateProjectGuidelinesTool: ToolDefinition = {
    name: "save_generate_project_guidelines",
    description: `Generates comprehensive project guidelines based on a tech stack using web search and saves the result to a specified file path. Uses the configured Vertex AI model (${modelIdPlaceholder}). Requires 'tech_stack' and 'output_path'.`,
    inputSchema: SaveGenerateProjectGuidelinesJsonSchema as any,

    // This buildPrompt function contains the core logic for generating the AI prompt.
    // The main handler in index.ts will call this *part* of the logic.
    buildPrompt: (args: any, modelId: string) => {
        // Validate args using the combined schema
        const parsed = SaveGenerateProjectGuidelinesArgsSchema.safeParse(args);
        if (!parsed.success) {
             throw new McpError(ErrorCode.InvalidParams, `Invalid arguments for save_generate_project_guidelines: ${parsed.error.errors.map(e => `${e.path.join('.')}: ${e.message}`).join(', ')}`);
        }
        const { tech_stack } = parsed.data; // output_path is used in the handler, not the prompt

        const techStackString = tech_stack.join(', ');

        // --- Use the Updated Prompt Logic Provided by User ---
        const systemInstructionText = `You are an AI assistant acting as a Senior Enterprise Technical Architect and Lead Developer with 15+ years of experience. Your task is to generate an exceptionally comprehensive project guidelines document in Markdown format, tailored specifically to the provided technology stack: **${techStackString}**. You MUST synthesize information EXCLUSIVELY from the latest official documentation, widely accepted style guides, and authoritative best practice articles found via web search for the relevant versions.

CRITICAL RESEARCH METHODOLOGY REQUIREMENTS:
1. **VERSION HANDLING:** For each technology listed in the stack (${techStackString}):
   a. **If a specific version is provided** (e.g., "TypeScript x.x", "Express x.xx"): Base guidelines ONLY on information found via web search for that EXACT specified version.
   b. **If NO specific version is provided** (e.g., "React", "Node.js"): You MUST FIRST perform **multiple web searches** (e.g., "[technology] latest stable version", "[technology] releases", "[technology] official blog announcements") to identify the **ABSOLUTE latest, most recent STABLE version** (or the **ABSOLUTE latest, most recent STABLE LTS version** for technologies like Node.js, checking the official release schedule). **Verify this against official sources.** State the identified absolute latest version clearly in the "Technology Stack Overview" section. THEN, base all subsequent guidelines and searches for that technology EXCLUSIVELY on the identified absolute latest stable version. **Do NOT use older stable versions if a newer one exists.**
2. TREAT ALL PRE-EXISTING KNOWLEDGE AS POTENTIALLY OUTDATED. Base guidelines ONLY on information found via web search for the relevant versions (either specified or the absolute latest stable identified).
3. For EACH technology (using the relevant version):
   a. First search for "[technology] [version] official documentation" (e.g., "React xx.x official documentation", "Latest Node.js LTS official documentation")
   b. Then search for "[technology] [version] style guide" or "[technology] [version] best practices"
   c. Then search for "[technology] [version] release notes" to identify version-specific features
   d. Finally search for "[technology] [version] security advisories" and "[technology] [version] performance optimization"
4. For EACH PAIR of technologies in the stack (using relevant versions), search for specific integration guidelines (e.g., "Latest TypeScript with Latest React best practices")
   5. Prioritize sources in this order:
   a. Official documentation (e.g., reactjs.org, nodejs.org)
   b. Official GitHub repositories and their wikis/READMEs
   c. Widely-adopted style guides (e.g., Airbnb JavaScript Style Guide, Google's Java Style Guide)
   d. Technical blogs from the technology creators or major contributors
   e. Well-established tech companies' engineering blogs (e.g., Meta Engineering, Netflix Tech Blog)
   f. Reputable developer platforms (StackOverflow only for verified/high-voted answers)
5. Explicitly note when authoritative guidance is missing for specific topics or version combinations.

COMPREHENSIVE DOCUMENT STRUCTURE REQUIREMENTS:
The document MUST include ALL of the following major sections with appropriate subsections:

1. **Executive Summary**
   * One-paragraph high-level overview of the technology stack
   * Bullet points highlighting 3-5 most critical guidelines that span the entire stack

2. **Technology Stack Overview**
   * **Identified Versions:** Clearly list each technology and the specific version used for these guidelines (either provided or identified as latest stable/LTS).
   * Version-specific capabilities and limitations for each component based on the identified version.
   * Expected technology lifecycle considerations (upcoming EOL dates, migration paths) for the identified versions.
   * Compatibility matrix showing tested/verified combinations for the identified versions.
   * Diagram recommendation for visualizing the stack architecture

3. **Development Environment Setup**
   * Required development tools and versions (IDEs, CLIs, extensions)
   * Recommended local environment configurations with exact version numbers
   * Docker/containerization standards if applicable
   * Local development workflow recommendations

4. **Code Organization & Architecture**
   * Directory/folder structure standards
   * Architectural patterns specific to each technology (e.g., hooks patterns for React)
   * Module organization principles
   * State management approach
   * API design principles specific to the technology versions
   * Database schema design principles (if applicable)

5. **Coding Standards** (language/framework-specific with explicit examples)
   * Naming conventions with clear examples showing right/wrong approaches
   * Formatting and linting configurations with tool-specific recommendations
   * Type definitions and type safety guidelines
   * Comments and documentation requirements with examples
   * File size/complexity limits with quantitative metrics

6. **Version-Specific Implementations**
   * Feature usage guidance specifically for the stated versions
   * Deprecated features to avoid in these versions
   * Migration strategies from previous versions if applicable
   * Version-specific optimizations
   * Innovative patterns enabled by latest versions

7. **Component Interaction Guidelines**
   * How each technology should integrate with others in the stack
   * Data transformation standards between layers
   * Communication protocols and patterns
   * Error handling and propagation between components

8. **Security Best Practices**
   * Authentication and authorization patterns
   * Input validation and sanitization
   * OWASP security considerations specific to each technology
   * Dependency management and vulnerability scanning
   * Secrets management
   * Version-specific security concerns

9. **Performance Optimization**
   * Stack-specific performance metrics and benchmarks
   * Version-specific performance features and optimizations
   * Resource management (memory, connections, threads)
   * Caching strategies tailored to the stack
   * Load testing recommendations

10. **Testing Strategy**
    * Test pyramid implementation for this specific stack
    * Recommended testing frameworks and tools with exact versions
    * Unit testing standards with coverage expectations (specific percentages)
    * Integration testing approach
    * End-to-end testing methodology
    * Performance testing guidelines
    * Mock/stub implementation guidelines

11. **Error Handling & Logging**
    * Error categorization framework
    * Logging standards and levels
    * Monitoring integration recommendations
    * Debugging best practices
    * Observability considerations

12. **Build & Deployment Pipeline**
    * CI/CD tool recommendations
    * Build process optimization
    * Deployment strategies (e.g., blue-green, canary)
    * Environment-specific configurations
    * Release management process

13. **Documentation Requirements**
    * API documentation standards
    * Technical documentation templates
    * User documentation guidelines
    * Knowledge transfer protocols

14. **Common Pitfalls & Anti-patterns**
    * Technology-specific anti-patterns with explicit examples
    * Known bugs or issues in specified versions
    * Legacy patterns to avoid
    * Performance traps specific to this stack

15. **Collaboration Workflows**
    * Code review checklist tailored to the stack
    * Pull request/merge request standards
    * Branching strategy
    * Communication protocols for technical discussions

16. **Governance & Compliance**
    * Code ownership model
    * Technical debt management approach
    * Accessibility compliance considerations
    * Regulatory requirements affecting implementation (if applicable)

CRITICAL FORMATTING & CONTENT REQUIREMENTS:

1. CODE EXAMPLES - For EVERY major guideline (not just a select few):
   * Provide BOTH correct AND incorrect implementations side-by-side
   * Include comments explaining WHY the guidance matters
   * Ensure examples are complete enough to demonstrate the principle
   * Use syntax highlighting appropriate to the language
   * For complex patterns, show progressive implementation steps

2. VISUAL ELEMENTS:
   * Recommend specific diagrams that should be created (architecture diagrams, data flow diagrams)
   * Use Markdown tables for compatibility matrices and feature comparisons
   * Use clear section dividers for readability

3. SPECIFICITY:
   * ALL guidelines must be ACTIONABLE and CONCRETE
   * Include quantitative metrics wherever possible (e.g., "Functions should not exceed 30 lines" instead of "Keep functions short")
   * Specify exact tool versions and configuration options
   * Avoid generic advice that applies to any technology stack

4. CITATIONS:
   * Include inline citations for EVERY significant guideline using format: [Source: URL]
   * For critical security or architectural recommendations, cite multiple sources if available
   * When citing version-specific features, link directly to release notes or version documentation
   * If guidance conflicts between sources, note the conflict and explain your recommendation

5. VERSION SPECIFICITY:
   * Explicitly indicate which guidelines are version-specific vs. universal
   * Note when a practice is specific to the combination of technologies in this stack
   * Identify features that might change in upcoming version releases
   * Include recommended update paths when applicable

OUTPUT FORMAT:
- Start with a title: "# Comprehensive Project Guidelines for ${techStackString}"
- Use Markdown headers (##, ###, ####) to structure sections and subsections logically
- Use bulleted lists for individual guidelines
- Use numbered lists for sequential procedures
- Use code blocks with language specification for all code examples
- Use tables for comparative information
- Include a comprehensive table of contents
- Use blockquotes to highlight critical warnings or notes
- End with an "Appendix" section containing links to all cited resources
- The entire output must be a single, coherent Markdown document that feels like it was crafted by an expert technical architect`;

        const userQueryText = `Generate an exceptionally detailed and comprehensive project guidelines document in Markdown format for a project using the following technology stack: **${techStackString}**.

**Important:** For any technology listed without a specific version, first identify the latest stable version (or latest stable LTS for Node.js) via web search, state it clearly in the overview, and base the guidelines on that version. For technologies with specified versions, use only those versions.

Search for and synthesize information from the latest authoritative sources for the relevant versions of each technology:
1. Official documentation for each relevant version (specified or latest stable).
2. Established style guides and best practices from technology creators for those versions.
3. Security advisories and performance optimization guidance for those versions.
4. Integration patterns between the specific technologies in this stack (using relevant versions).

Your document must comprehensively cover:
- Development environment setup with exact tool versions
- Code organization and architectural patterns specific to these versions
- Detailed coding standards with clear examples of both correct and incorrect approaches
- Version-specific implementation details highlighting new features and deprecations
- Component interaction guidelines showing how these technologies should work together
- Comprehensive security best practices addressing OWASP concerns
- Performance optimization techniques validated for these specific versions
- Testing strategy with specific framework recommendations and coverage expectations
- Error handling patterns and logging standards
- Build and deployment pipeline recommendations
- Documentation requirements and standards
- Common pitfalls and anti-patterns with explicit examples
- Team collaboration workflows tailored to this technology stack
- Governance and compliance considerations

Ensure each guideline is actionable, specific, and supported by code examples wherever applicable. Cite authoritative sources for all key recommendations. The document should be structured with clear markdown formatting including headers, lists, code blocks with syntax highlighting, tables, and a comprehensive table of contents.`;

        // Return the prompt components needed by the handler
        return {
            systemInstructionText: systemInstructionText,
            userQueryText: userQueryText,
            useWebSearch: true, // Always use web search for guidelines
            enableFunctionCalling: false // No function calling needed for generation
        };
    }
};
```

--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

import dotenv from 'dotenv';
import path from 'path';

// Load .env file from the current working directory (where npx/node is run)
// This ensures it works correctly when run via npx outside the project dir
dotenv.config({ path: path.resolve(process.cwd(), '.env') });

import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
  CallToolRequestSchema,
  ListToolsRequestSchema,
  McpError,
  ErrorCode,
} from "@modelcontextprotocol/sdk/types.js";
// Removed vertexai Content import as CombinedContent covers it
import fs from "fs/promises";
import { z } from "zod"; // Needed for schema parsing within handler
import { diffLines, createTwoFilesPatch } from 'diff';
import { minimatch } from 'minimatch';
import { exec } from 'child_process'; // Added for command execution
import util from 'util'; // Added for promisify

import { getAIConfig } from './config.js';
// Import CombinedContent along with callGenerativeAI
import { callGenerativeAI, CombinedContent } from './vertex_ai_client.js';
import { allTools, toolMap } from './tools/index.js';
import { buildInitialContent, getToolsForApi } from './tools/tool_definition.js';

// Import Zod schemas from tool files for validation within the handler
import { ReadFileArgsSchema } from './tools/read_file.js';
// import { ReadMultipleFilesArgsSchema } from './tools/read_multiple_files.js'; // Removed
import { WriteFileArgsSchema } from './tools/write_file.js';
import { EditFileArgsSchema, EditOperationSchema } from './tools/edit_file.js'; // Import EditOperationSchema too
// import { CreateDirectoryArgsSchema } from './tools/create_directory.js'; // Removed
import { ListDirectoryArgsSchema } from './tools/list_directory.js';
import { DirectoryTreeArgsSchema } from './tools/directory_tree.js';
import { MoveFileArgsSchema } from './tools/move_file.js';
import { SearchFilesArgsSchema } from './tools/search_files.js';
import { GetFileInfoArgsSchema } from './tools/get_file_info.js';
// Import schemas for the new combined tools
import { SaveGenerateProjectGuidelinesArgsSchema } from './tools/save_generate_project_guidelines.js';
import { SaveDocSnippetArgsSchema } from './tools/save_doc_snippet.js';
import { SaveTopicExplanationArgsSchema } from './tools/save_topic_explanation.js';
import { SaveAnswerQueryDirectArgsSchema } from './tools/save_answer_query_direct.js';
import { SaveAnswerQueryWebsearchArgsSchema } from './tools/save_answer_query_websearch.js';
import { ExecuteTerminalCommandArgsSchema } from './tools/execute_terminal_command.js'; // Renamed


// --- Filesystem Helper Functions (Adapted from example.ts) ---

// Basic security check - ensure path stays within workspace
function validateWorkspacePath(requestedPath: string): string {
    const absolutePath = path.resolve(process.cwd(), requestedPath);
    if (!absolutePath.startsWith(process.cwd())) {
        throw new Error(`Path traversal attempt detected: ${requestedPath}`);
    }
    return absolutePath;
}

interface FileInfo {
  size: number;
  created: Date;
  modified: Date;
  accessed: Date;
  isDirectory: boolean;
  isFile: boolean;
  permissions: string;
}

async function getFileStats(filePath: string): Promise<FileInfo> {
  const stats = await fs.stat(filePath);
  return {
    size: stats.size,
    created: stats.birthtime,
    modified: stats.mtime,
    accessed: stats.atime,
    isDirectory: stats.isDirectory(),
    isFile: stats.isFile(),
    permissions: stats.mode.toString(8).slice(-3), // POSIX permissions
  };
}

async function searchFilesRecursive(
  rootPath: string,
  currentPath: string,
  pattern: string,
  excludePatterns: string[],
  results: string[]
): Promise<void> {
  const entries = await fs.readdir(currentPath, { withFileTypes: true });

  for (const entry of entries) {
    const fullPath = path.join(currentPath, entry.name);
    const relativePath = path.relative(rootPath, fullPath);

    const shouldExclude = excludePatterns.some(p => minimatch(relativePath, p, { dot: true, matchBase: true }));
    if (shouldExclude) {
      continue;
    }

    if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
      results.push(path.relative(process.cwd(), fullPath));
    }

    if (entry.isDirectory()) {
      try {
          const realPath = await fs.realpath(fullPath);
          if (realPath.startsWith(rootPath)) {
             await searchFilesRecursive(rootPath, fullPath, pattern, excludePatterns, results);
          }
      } catch (e) {
          console.error(`Skipping search in ${fullPath}: ${(e as Error).message}`);
      }
    }
  }
}

function normalizeLineEndings(text: string): string {
  return text.replace(/\r\n/g, '\n');
}

function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
  const normalizedOriginal = normalizeLineEndings(originalContent);
  const normalizedNew = normalizeLineEndings(newContent);
  return createTwoFilesPatch(
    filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified'
  );
}

async function applyFileEdits(
  filePath: string,
  edits: z.infer<typeof EditOperationSchema>[],
  dryRun = false
): Promise<string> {
  const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'));
  let modifiedContent = content;

  for (const edit of edits) {
    const normalizedOld = normalizeLineEndings(edit.oldText);
    const normalizedNew = normalizeLineEndings(edit.newText);

    if (modifiedContent.includes(normalizedOld)) {
      modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew);
      continue;
    }

    const oldLines = normalizedOld.split('\n');
    const contentLines = modifiedContent.split('\n');
    let matchFound = false;

    for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
      const potentialMatch = contentLines.slice(i, i + oldLines.length);
      const isMatch = oldLines.every((oldLine, j) => oldLine.trim() === potentialMatch[j].trim());

      if (isMatch) {
        const originalIndent = contentLines[i].match(/^\s*/)?.[0] || '';
        const newLines = normalizedNew.split('\n').map((line, j) => {
          if (j === 0) return originalIndent + line.trimStart();
          const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || '';
          const newIndent = line.match(/^\s*/)?.[0] || '';
          if (oldIndent && newIndent) {
            const relativeIndent = newIndent.length - oldIndent.length;
            return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart();
          }
          return line;
        });

        contentLines.splice(i, oldLines.length, ...newLines);
        modifiedContent = contentLines.join('\n');
        matchFound = true;
        break;
      }
    }

    if (!matchFound) {
      throw new Error(`Could not find exact or whitespace-insensitive match for edit:\n${edit.oldText}`);
    }
  }

  const diff = createUnifiedDiff(content, modifiedContent, path.relative(process.cwd(), filePath));

  if (!dryRun) {
    await fs.writeFile(filePath, modifiedContent, 'utf-8');
  }

  let numBackticks = 3;
  while (diff.includes('`'.repeat(numBackticks))) {
    numBackticks++;
  }
  return `${'`'.repeat(numBackticks)}diff\n${diff}\n${'`'.repeat(numBackticks)}`;
}


interface TreeEntry {
    name: string;
    type: 'file' | 'directory';
    children?: TreeEntry[];
}

async function buildDirectoryTree(currentPath: string): Promise<TreeEntry[]> {
    const entries = await fs.readdir(currentPath, {withFileTypes: true});
    const result: TreeEntry[] = [];

    for (const entry of entries) {
        const entryData: TreeEntry = {
            name: entry.name,
            type: entry.isDirectory() ? 'directory' : 'file'
        };

        if (entry.isDirectory()) {
            const subPath = path.join(currentPath, entry.name);
             try {
                const realPath = await fs.realpath(subPath);
                if (realPath.startsWith(path.dirname(currentPath))) {
                    entryData.children = await buildDirectoryTree(subPath);
                } else {
                     entryData.children = [];
                }
            } catch (e) {
                 entryData.children = [];
                 console.error(`Skipping tree build in ${subPath}: ${(e as Error).message}`);
            }
        }
        result.push(entryData);
    }
    result.sort((a, b) => {
        if (a.type === 'directory' && b.type === 'file') return -1;
        if (a.type === 'file' && b.type === 'directory') return 1;
        return a.name.localeCompare(b.name);
    });
    return result;
}


// Set of filesystem tool names for easy checking
const filesystemToolNames = new Set([
    "read_file_content", // Handles single/multiple
    // "read_multiple_files_content", // Removed
    "write_file_content", // Handles single/multiple
    "edit_file_content",
    // "create_directory", // Removed
    "list_directory_contents",
    "get_directory_tree",
    "move_file_or_directory",
    "search_filesystem",
    "get_filesystem_info",
]);


// --- MCP Server Setup ---
const server = new Server(
  { name: "vertex-ai-mcp-server", version: "0.5.0" },
  { capabilities: { tools: {} } }
);

// --- Tool Definitions Handler ---
server.setRequestHandler(ListToolsRequestSchema, async () => {
  // Use new config function
  const config = getAIConfig();
  return {
      tools: allTools.map(t => ({
          name: t.name,
          // Inject model ID dynamically from new config structure
          description: t.description.replace("${modelId}", config.modelId),
          inputSchema: t.inputSchema
      }))
  };
});

// --- Tool Call Handler ---
server.setRequestHandler(CallToolRequestSchema, async (request) => {
  const toolName = request.params.name;
  const args = request.params.arguments ?? {};

  const toolDefinition = toolMap.get(toolName);
  if (!toolDefinition) {
    throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${toolName}`);
  }

  try {
    // --- Special Handling for Combined Tool ---
    if (toolName === "save_generate_project_guidelines") {
        const parsedArgs = SaveGenerateProjectGuidelinesArgsSchema.parse(args);
        const { tech_stack, output_path } = parsedArgs;

        // Use new config function
        const config = getAIConfig();
        const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);

        // Use new AI function call and type cast
        const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
        const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);

        const generatedContent = await callGenerativeAI(
            initialContents,
            toolsForApi
            // Config args removed
        );

        const validOutputPath = validateWorkspacePath(output_path);
        await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
        await fs.writeFile(validOutputPath, generatedContent, "utf-8");

        return {
            content: [{ type: "text", text: `Successfully generated guidelines and saved to ${output_path}` }],
        };

    } else if (toolName === "save_doc_snippet") {
        const parsedArgs = SaveDocSnippetArgsSchema.parse(args);
        const { output_path } = parsedArgs;

        const config = getAIConfig();
        const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);

        const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
        const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);

        const generatedContent = await callGenerativeAI(
            initialContents,
            toolsForApi
        );

        const validOutputPath = validateWorkspacePath(output_path);
        await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
        await fs.writeFile(validOutputPath, generatedContent, "utf-8");

        return {
            content: [{ type: "text", text: `Successfully generated snippet and saved to ${output_path}` }],
        };

    } else if (toolName === "save_topic_explanation") {
        const parsedArgs = SaveTopicExplanationArgsSchema.parse(args);
        const { output_path } = parsedArgs;

        const config = getAIConfig();
        const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);

        const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
        const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);

        const generatedContent = await callGenerativeAI(
            initialContents,
            toolsForApi
        );

        const validOutputPath = validateWorkspacePath(output_path);
        await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
        await fs.writeFile(validOutputPath, generatedContent, "utf-8");

        return {
            content: [{ type: "text", text: `Successfully generated explanation and saved to ${output_path}` }],
        };

    } else if (toolName === "save_answer_query_direct") {
        const parsedArgs = SaveAnswerQueryDirectArgsSchema.parse(args);
        const { output_path } = parsedArgs;

        const config = getAIConfig();
        const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);

        const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
        const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);

        const generatedContent = await callGenerativeAI(
            initialContents,
            toolsForApi
        );

        const validOutputPath = validateWorkspacePath(output_path);
        await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
        await fs.writeFile(validOutputPath, generatedContent, "utf-8");

        return {
            content: [{ type: "text", text: `Successfully generated direct answer and saved to ${output_path}` }],
        };

    } else if (toolName === "save_answer_query_websearch") {
        const parsedArgs = SaveAnswerQueryWebsearchArgsSchema.parse(args);
        const { output_path } = parsedArgs;

        const config = getAIConfig();
        const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);

        const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
        const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);

        const generatedContent = await callGenerativeAI(
            initialContents,
            toolsForApi
        );

        const validOutputPath = validateWorkspacePath(output_path);
        await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
        await fs.writeFile(validOutputPath, generatedContent, "utf-8");

        return {
            content: [{ type: "text", text: `Successfully generated websearch answer and saved to ${output_path}` }],
        };

    } // --- Filesystem Tool Execution Logic ---
    else if (filesystemToolNames.has(toolName)) {
      let resultText = "";

      switch (toolName) {
        case "read_file_content": {
          const parsed = ReadFileArgsSchema.parse(args);
          if (typeof parsed.paths === 'string') {
            // Handle single file read
            const validPath = validateWorkspacePath(parsed.paths);
            const content = await fs.readFile(validPath, "utf-8");
            resultText = content;
          } else {
            // Handle multiple file read (similar to old read_multiple_files_content)
            const results = await Promise.all(
              parsed.paths.map(async (filePath: string) => {
                try {
                  const validPath = validateWorkspacePath(filePath);
                  const content = await fs.readFile(validPath, "utf-8");
                  return `${path.relative(process.cwd(), validPath)}:\n${content}\n`;
                } catch (error) {
                  const errorMessage = error instanceof Error ? error.message : String(error);
                  return `${filePath}: Error - ${errorMessage}`;
                }
              }),
            );
            resultText = results.join("\n---\n");
          }
          break;
        }
        // case "read_multiple_files_content": // Removed - logic merged into read_file_content
        case "write_file_content": {
          const parsed = WriteFileArgsSchema.parse(args);
          // Access the 'writes' property which contains either a single object or an array
          const writeOperations = Array.isArray(parsed.writes) ? parsed.writes : [parsed.writes];
          const results: string[] = [];

          for (const op of writeOperations) {
              try {
                  const validPath = validateWorkspacePath(op.path);
                  await fs.mkdir(path.dirname(validPath), { recursive: true });
                  await fs.writeFile(validPath, op.content, "utf-8");
                  results.push(`Successfully wrote to ${op.path}`);
              } catch (error) {
                  const errorMessage = error instanceof Error ? error.message : String(error);
                  results.push(`Error writing to ${op.path}: ${errorMessage}`);
              }
          }
          resultText = results.join("\n");
          break;
        }
        case "edit_file_content": {
          const parsed = EditFileArgsSchema.parse(args);
          if (parsed.edits.length === 0) {
             throw new McpError(ErrorCode.InvalidParams, `'edits' array cannot be empty for ${toolName}.`);
          }
          const validPath = validateWorkspacePath(parsed.path);
          resultText = await applyFileEdits(validPath, parsed.edits, parsed.dryRun);
          break;
        }
        // case "create_directory": // Removed
        case "list_directory_contents": {
          const parsed = ListDirectoryArgsSchema.parse(args);
          const validPath = validateWorkspacePath(parsed.path);
          const entries = await fs.readdir(validPath, { withFileTypes: true });
          resultText = entries
            .map((entry) => `${entry.isDirectory() ? "[DIR] " : "[FILE]"} ${entry.name}`)
            .sort()
            .join("\n");
           if (!resultText) resultText = "(Directory is empty)";
          break;
        }
        case "get_directory_tree": {
            const parsed = DirectoryTreeArgsSchema.parse(args);
            const validPath = validateWorkspacePath(parsed.path);
            const treeData = await buildDirectoryTree(validPath);
            resultText = JSON.stringify(treeData, null, 2);
            break;
        }
        case "move_file_or_directory": {
          const parsed = MoveFileArgsSchema.parse(args);
           if (parsed.source === parsed.destination) {
             throw new McpError(ErrorCode.InvalidParams, `Source and destination paths cannot be the same for ${toolName}.`);
           }
          const validSourcePath = validateWorkspacePath(parsed.source);
          const validDestPath = validateWorkspacePath(parsed.destination);
          await fs.mkdir(path.dirname(validDestPath), { recursive: true });
          await fs.rename(validSourcePath, validDestPath);
          resultText = `Successfully moved ${parsed.source} to ${parsed.destination}`;
          break;
        }
        case "search_filesystem": {
          const parsed = SearchFilesArgsSchema.parse(args);
          const validPath = validateWorkspacePath(parsed.path);
          const results: string[] = [];
          await searchFilesRecursive(validPath, validPath, parsed.pattern, parsed.excludePatterns, results);
          resultText = results.length > 0 ? results.join("\n") : "No matches found";
          break;
        }
        case "get_filesystem_info": {
          const parsed = GetFileInfoArgsSchema.parse(args);
          const validPath = validateWorkspacePath(parsed.path);
          const info = await getFileStats(validPath);
          resultText = `Path: ${parsed.path}\nType: ${info.isDirectory ? 'Directory' : 'File'}\nSize: ${info.size} bytes\nCreated: ${info.created.toISOString()}\nModified: ${info.modified.toISOString()}\nAccessed: ${info.accessed.toISOString()}\nPermissions: ${info.permissions}`;
          break;
        }
        default:
          throw new McpError(ErrorCode.MethodNotFound, `Filesystem tool handler not implemented: ${toolName}`);
      }

      // Return successful filesystem operation result
      return {
        content: [{ type: "text", text: resultText }],
      };
} else if (toolName === "execute_terminal_command") { // Renamed tool name check
    const parsed = ExecuteTerminalCommandArgsSchema.parse(args); // Renamed schema
    const execPromise = util.promisify(exec);

    const options: { cwd?: string; timeout?: number; signal?: AbortSignal } = {};
        if (parsed.cwd) {
            options.cwd = validateWorkspacePath(parsed.cwd); // Reuse validation
        } else {
            options.cwd = process.cwd(); // Default to workspace root
        }

        let controller: AbortController | undefined;
        if (parsed.timeout) {
            controller = new AbortController();
            options.signal = controller.signal;
            options.timeout = parsed.timeout * 1000; // Convert seconds to milliseconds
        }

        try {
            // Execute the command
            const { stdout, stderr } = await execPromise(parsed.command, options);
            const output = `STDOUT:\n${stdout}\nSTDERR:\n${stderr}`;
            return {
                content: [{ type: "text", text: output.trim() || "(No output)" }],
            };
        } catch (error: any) {
            // Handle different error types
            let errorMessage = "Command execution failed.";
            if (error.signal === 'SIGTERM' || error.code === 'ABORT_ERR') {
                errorMessage = `Command timed out after ${parsed.timeout} seconds.`;
            } else if (error.stderr || error.stdout) {
                errorMessage = `Command failed with exit code ${error.code || 'unknown'}.\nSTDOUT:\n${error.stdout}\nSTDERR:\n${error.stderr}`;
            } else if (error instanceof Error) {
                errorMessage = `Command execution error: ${error.message}`;
            }
            throw new McpError(ErrorCode.InternalError, errorMessage);
        } finally {
             // The finally block might not be strictly necessary here as execPromise handles cleanup
             // if (controller) { controller.abort(); } // Example if manual cleanup were needed
        }

    } else {
      // --- Generic AI Tool Logic (Non-filesystem, non-combined) ---
      const config = getAIConfig(); // Use renamed config function
      if (!toolDefinition.buildPrompt) {
        throw new McpError(ErrorCode.MethodNotFound, `Tool ${toolName} is missing required buildPrompt logic.`);
      }
      const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
      const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[]; // Cast
      const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);

      // Call the unified AI function
      const responseText = await callGenerativeAI(
          initialContents,
          toolsForApi
          // Config is implicitly used by callGenerativeAI now
      );

      return {
        content: [{ type: "text", text: responseText }],
      };
    }

  } catch (error) {
     // Centralized error handling
    if (error instanceof z.ZodError) {
        throw new McpError(ErrorCode.InvalidParams, `Invalid arguments for ${toolName}: ${error.errors.map(e => `${e.path.join('.')}: ${e.message}`).join(', ')}`);
    } else if (error instanceof McpError) {
      throw error;
    } else if (error instanceof Error && error.message.includes('ENOENT')) {
         throw new McpError(ErrorCode.InvalidParams, `Path not found for tool ${toolName}: ${error.message}`);
    } else {
      console.error(`[${new Date().toISOString()}] Unexpected error in tool handler (${toolName}):`, error);
      throw new McpError(ErrorCode.InternalError, `Unexpected server error during ${toolName}: ${(error as Error).message || "Unknown"}`);
    }
  }
});

// --- Server Start ---
async function main() {
  const transport = new StdioServerTransport();
  console.error(`[${new Date().toISOString()}] vertex-ai-mcp-server connecting via stdio...`);
  await server.connect(transport);
  console.error(`[${new Date().toISOString()}] vertex-ai-mcp-server connected.`);
}

main().catch((error) => {
  console.error(`[${new Date().toISOString()}] Server failed to start:`, error);
  process.exit(1);
});

// --- Graceful Shutdown ---
const shutdown = async (signal: string) => {
    console.error(`[${new Date().toISOString()}] Received ${signal}. Shutting down server...`);
    try {
      await server.close();
      console.error(`[${new Date().toISOString()}] Server shut down gracefully.`);
      process.exit(0);
    } catch (shutdownError) {
      console.error(`[${new Date().toISOString()}] Error during server shutdown:`, shutdownError);
      process.exit(1);
    }
};
process.on('SIGINT', () => shutdown('SIGINT'));
process.on('SIGTERM', () => shutdown('SIGTERM'));

```
Page 2/2FirstPrevNextLast