#
tokens: 24342/50000 6/42 files (page 2/2)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 2 of 2. Use http://codebase.md/shariqriazz/vertex-ai-mcp-server?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .env.example
├── .gitignore
├── bun.lock
├── Dockerfile
├── LICENSE
├── package.json
├── README.md
├── smithery.yaml
├── src
│   ├── config.ts
│   ├── index.ts
│   ├── tools
│   │   ├── answer_query_direct.ts
│   │   ├── answer_query_websearch.ts
│   │   ├── architecture_pattern_recommendation.ts
│   │   ├── code_analysis_with_docs.ts
│   │   ├── database_schema_analyzer.ts
│   │   ├── dependency_vulnerability_scan.ts
│   │   ├── directory_tree.ts
│   │   ├── documentation_generator.ts
│   │   ├── edit_file.ts
│   │   ├── execute_terminal_command.ts
│   │   ├── explain_topic_with_docs.ts
│   │   ├── generate_project_guidelines.ts
│   │   ├── get_doc_snippets.ts
│   │   ├── get_file_info.ts
│   │   ├── index.ts
│   │   ├── list_directory.ts
│   │   ├── microservice_design_assistant.ts
│   │   ├── move_file.ts
│   │   ├── read_file.ts
│   │   ├── regulatory_compliance_advisor.ts
│   │   ├── save_answer_query_direct.ts
│   │   ├── save_answer_query_websearch.ts
│   │   ├── save_doc_snippet.ts
│   │   ├── save_generate_project_guidelines.ts
│   │   ├── save_topic_explanation.ts
│   │   ├── search_files.ts
│   │   ├── security_best_practices_advisor.ts
│   │   ├── technical_comparison.ts
│   │   ├── testing_strategy_generator.ts
│   │   ├── tool_definition.ts
│   │   └── write_file.ts
│   ├── utils.ts
│   └── vertex_ai_client.ts
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/src/tools/testing_strategy_generator.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
  2 | import { ToolDefinition, modelIdPlaceholder } from "./tool_definition.js";
  3 | 
  4 | export const testingStrategyGeneratorTool: ToolDefinition = {
  5 |     name: "testing_strategy_generator",
  6 |     description: `Creates comprehensive testing strategies for applications or features. Suggests appropriate testing types (unit, integration, e2e) with coverage goals. Provides example test cases and testing frameworks. Uses the configured Vertex AI model (${modelIdPlaceholder}) with Google Search. Requires 'project_description' and 'tech_stack'.`,
  7 |     inputSchema: {
  8 |         type: "object",
  9 |         properties: {
 10 |             project_description: {
 11 |                 type: "string",
 12 |                 description: "Description of the project or feature to be tested."
 13 |             },
 14 |             tech_stack: {
 15 |                 type: "array",
 16 |                 items: { type: "string" },
 17 |                 description: "Technologies used in the project (e.g., ['React', 'Node.js', 'PostgreSQL'])."
 18 |             },
 19 |             project_type: {
 20 |                 type: "string",
 21 |                 enum: ["web", "mobile", "desktop", "api", "library", "microservices", "data_pipeline", "other"],
 22 |                 description: "Type of project being developed."
 23 |             },
 24 |             testing_priorities: {
 25 |                 type: "array",
 26 |                 items: {
 27 |                     type: "string",
 28 |                     enum: ["functionality", "performance", "security", "accessibility", "usability", "reliability", "compatibility", "all"]
 29 |                 },
 30 |                 description: "Optional. Testing priorities for the project.",
 31 |                 default: ["all"]
 32 |             },
 33 |             constraints: {
 34 |                 type: "object",
 35 |                 properties: {
 36 |                     time: {
 37 |                         type: "string",
 38 |                         description: "Time constraints for implementing testing."
 39 |                     },
 40 |                     resources: {
 41 |                         type: "string",
 42 |                         description: "Resource constraints (team size, expertise, etc.)."
 43 |                     },
 44 |                     environment: {
 45 |                         type: "string",
 46 |                         description: "Environment constraints (CI/CD, deployment, etc.)."
 47 |                     }
 48 |                 },
 49 |                 description: "Optional. Constraints that might affect the testing strategy."
 50 |             }
 51 |         },
 52 |         required: ["project_description", "tech_stack", "project_type"]
 53 |     },
 54 |     buildPrompt: (args: any, modelId: string) => {
 55 |         const { project_description, tech_stack, project_type, testing_priorities = ["all"], constraints = {} } = args;
 56 |         
 57 |         if (typeof project_description !== "string" || !project_description)
 58 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'project_description'.");
 59 |         
 60 |         if (!Array.isArray(tech_stack) || tech_stack.length === 0 || !tech_stack.every(item => typeof item === 'string' && item))
 61 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'tech_stack' array.");
 62 |             
 63 |         if (typeof project_type !== "string" || !project_type)
 64 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'project_type'.");
 65 |             
 66 |         const techStackString = tech_stack.join(', ');
 67 |         
 68 |         const priorities = testing_priorities.includes("all") 
 69 |             ? ["functionality", "performance", "security", "accessibility", "usability", "reliability", "compatibility"] 
 70 |             : testing_priorities;
 71 |             
 72 |         const prioritiesText = priorities.join(', ');
 73 |         
 74 |         const constraintsText = Object.entries(constraints)
 75 |             .filter(([_, value]) => value)
 76 |             .map(([key, value]) => `${key}: ${value}`)
 77 |             .join('\n');
 78 |             
 79 |         const constraintsSection = constraintsText ? `\n\nConstraints:\n${constraintsText}` : '';
 80 |         
 81 |         const systemInstructionText = `You are TestingStrategistGPT, an elite software quality assurance architect with decades of experience designing comprehensive testing strategies across multiple domains. Your task is to create a detailed, actionable testing strategy for a ${project_type} project using ${techStackString}, with focus on these testing priorities: ${prioritiesText}.${constraintsSection}
 82 | 
 83 | SEARCH METHODOLOGY - EXECUTE IN THIS EXACT ORDER:
 84 | 1. FIRST search for: "testing best practices for ${project_type} applications"
 85 | 2. THEN search for: "testing frameworks for ${techStackString}"
 86 | 3. THEN search for specific testing approaches for each technology: "${tech_stack.map(t => `${t} testing best practices`).join('", "')}"
 87 | 4. THEN search for testing approaches for each priority: "${priorities.map((p: string) => `${project_type} ${p} testing`).join('", "')}"
 88 | 5. THEN search for: "${project_type} test automation with ${techStackString}"
 89 | 6. THEN search for: "test coverage metrics for ${project_type} applications"
 90 | 7. FINALLY search for: "CI/CD integration for ${techStackString} testing"
 91 | 
 92 | DOCUMENTATION SOURCE PRIORITIZATION (in strict order):
 93 | 1. Official testing documentation for each technology in the stack
 94 | 2. Industry-standard testing methodologies (e.g., ISTQB, TMap)
 95 | 3. Technical blogs from testing experts and technology creators
 96 | 4. Case studies of testing strategies for similar applications
 97 | 5. Academic research on software testing effectiveness
 98 | 6. Testing tool documentation and best practices guides
 99 | 7. Industry surveys on testing practices and effectiveness
100 | 
101 | TESTING STRATEGY REQUIREMENTS:
102 | 1. COMPREHENSIVE TEST PLANNING:
103 |    a. Define clear testing objectives aligned with project goals
104 |    b. Establish appropriate test coverage metrics and targets
105 |    c. Determine testing scope and boundaries
106 |    d. Identify key risk areas requiring focused testing
107 |    e. Create a phased testing approach with clear milestones
108 | 
109 | 2. MULTI-LEVEL TESTING APPROACH:
110 |    a. Unit Testing:
111 |       - Framework selection with justification
112 |       - Component isolation strategies
113 |       - Mocking/stubbing approach
114 |       - Coverage targets and measurement
115 |       - Example test cases for critical components
116 |    
117 |    b. Integration Testing:
118 |       - Integration points identification
119 |       - Testing approach (top-down, bottom-up, sandwich)
120 |       - Service/API contract testing strategy
121 |       - Data consistency verification
122 |       - Example integration test scenarios
123 |    
124 |    c. End-to-End Testing:
125 |       - User journey identification
126 |       - Critical path testing
127 |       - Cross-browser/device strategy (if applicable)
128 |       - Test data management approach
129 |       - Example E2E test scenarios
130 |    
131 |    d. Specialized Testing (based on priorities):
132 |       ${priorities.includes("performance") ? `- Performance testing approach (load, stress, endurance)
133 |       - Performance metrics and baselines
134 |       - Performance testing tools and configuration
135 |       - Performance test scenarios` : ""}
136 |       ${priorities.includes("security") ? `- Security testing methodology
137 |       - Vulnerability assessment approach
138 |       - Penetration testing strategy
139 |       - Security compliance verification` : ""}
140 |       ${priorities.includes("accessibility") ? `- Accessibility standards compliance (WCAG, etc.)
141 |       - Accessibility testing tools and techniques
142 |       - Manual and automated accessibility testing` : ""}
143 |       ${priorities.includes("usability") ? `- Usability testing approach
144 |       - User feedback collection methods
145 |       - Usability metrics and evaluation criteria` : ""}
146 |       ${priorities.includes("reliability") ? `- Reliability testing methods
147 |       - Chaos engineering approach (if applicable)
148 |       - Recovery testing strategy
149 |       - Failover and resilience testing` : ""}
150 |       ${priorities.includes("compatibility") ? `- Compatibility matrix definition
151 |       - Cross-platform testing approach
152 |       - Backward compatibility testing` : ""}
153 | 
154 | 3. TEST AUTOMATION STRATEGY:
155 |    a. Automation framework selection with justification
156 |    b. Automation scope (what to automate vs. manual testing)
157 |    c. Automation architecture and design patterns
158 |    d. Test data management for automated tests
159 |    e. Continuous integration implementation
160 |    f. Reporting and monitoring approach
161 | 
162 | 4. TESTING INFRASTRUCTURE:
163 |    a. Environment requirements and setup
164 |    b. Test data management strategy
165 |    c. Configuration management approach
166 |    d. Tool selection with specific versions
167 |    e. Infrastructure as code approach for test environments
168 | 
169 | 5. QUALITY METRICS AND REPORTING:
170 |    a. Key quality indicators and metrics
171 |    b. Reporting frequency and format
172 |    c. Defect tracking and management process
173 |    d. Quality gates and exit criteria
174 |    e. Continuous improvement mechanisms
175 | 
176 | RESPONSE STRUCTURE:
177 | 1. Begin with an "Executive Summary" providing a high-level overview of the testing strategy
178 | 2. Include a "Testing Objectives and Scope" section defining clear goals
179 | 3. Provide a "Test Approach" section detailing the overall methodology
180 | 4. For EACH testing level (unit, integration, E2E, specialized):
181 |    a. Detailed approach and methodology
182 |    b. Tool and framework recommendations with versions
183 |    c. Example test cases or scenarios
184 |    d. Coverage targets and measurement approach
185 |    e. Implementation guidelines
186 | 5. Include a "Test Automation Strategy" section
187 | 6. Provide a "Testing Infrastructure" section
188 | 7. Include a "Test Management and Reporting" section
189 | 8. Conclude with an "Implementation Roadmap" with phased approach
190 | 
191 | CRITICAL REQUIREMENTS:
192 | 1. NEVER recommend generic testing approaches without technology-specific details
193 | 2. ALWAYS provide specific tool and framework recommendations with versions
194 | 3. NEVER overlook critical testing areas based on the project type
195 | 4. ALWAYS include example test cases or scenarios for each testing level
196 | 5. NEVER recommend excessive testing that doesn't align with the stated constraints
197 | 6. ALWAYS prioritize testing efforts based on risk and impact
198 | 7. NEVER recommend tools or frameworks that are incompatible with the tech stack
199 | 
200 | ${constraintsText ? `CONSTRAINT CONSIDERATIONS:
201 | ${Object.entries(constraints)
202 |     .filter(([_, value]) => value)
203 |     .map(([key, value]) => {
204 |         if (key === 'time') return `1. Time Constraints (${value}):
205 |    a. Prioritize testing efforts based on critical functionality
206 |    b. Consider phased testing implementation
207 |    c. Leverage automation for efficiency
208 |    d. Focus on high-risk areas first`;
209 |         if (key === 'resources') return `2. Resource Constraints (${value}):
210 |    a. Select tools with appropriate learning curves
211 |    b. Consider expertise requirements for recommended approaches
212 |    c. Suggest training resources if needed
213 |    d. Recommend approaches that maximize efficiency`;
214 |         if (key === 'environment') return `3. Environment Constraints (${value}):
215 |    a. Adapt recommendations to work within the specified environment
216 |    b. Suggest alternatives if optimal approaches aren't feasible
217 |    c. Address specific environmental limitations
218 |    d. Provide workarounds for common constraints`;
219 |         return '';
220 |     })
221 |     .filter(text => text)
222 |     .join('\n')}` : ""}
223 | 
224 | Your testing strategy must be technically precise, evidence-based, and immediately implementable. Focus on providing actionable guidance that balances thoroughness with practical constraints.`;
225 | 
226 |         const userQueryText = `Create a comprehensive testing strategy for the following ${project_type} project:
227 | 
228 | Project Description: ${project_description}
229 | Technology Stack: ${techStackString}
230 | Testing Priorities: ${prioritiesText}
231 | ${constraintsSection}
232 | 
233 | Search for and incorporate best practices for testing ${project_type} applications built with ${techStackString}. Your strategy should include:
234 | 
235 | 1. Overall testing approach and methodology
236 | 2. Specific testing levels with detailed approaches:
237 |    - Unit testing strategy and framework recommendations
238 |    - Integration testing approach
239 |    - End-to-end testing methodology
240 |    - Specialized testing based on priorities (${prioritiesText})
241 | 3. Test automation strategy with specific tools and frameworks
242 | 4. Testing infrastructure and environment requirements
243 | 5. Quality metrics, reporting, and management approach
244 | 6. Implementation roadmap with phased approach
245 | 
246 | For each testing level, provide:
247 | - Specific tools and frameworks with versions
248 | - Example test cases or scenarios
249 | - Coverage targets and measurement approach
250 | - Implementation guidelines with code examples where appropriate
251 | 
252 | Your strategy should be specifically tailored to the technologies, project type, and constraints provided. Include practical, actionable recommendations that can be implemented immediately.`;
253 | 
254 |         return {
255 |             systemInstructionText,
256 |             userQueryText,
257 |             useWebSearch: true,
258 |             enableFunctionCalling: false
259 |         };
260 |     }
261 | };
```

--------------------------------------------------------------------------------
/src/tools/regulatory_compliance_advisor.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
  2 | import { ToolDefinition, modelIdPlaceholder } from "./tool_definition.js";
  3 | 
  4 | export const regulatoryComplianceAdvisorTool: ToolDefinition = {
  5 |     name: "regulatory_compliance_advisor",
  6 |     description: `Provides guidance on regulatory requirements for specific industries (GDPR, HIPAA, etc.). Suggests implementation approaches for compliance. Includes checklists and verification strategies. Uses the configured Vertex AI model (${modelIdPlaceholder}) with Google Search. Requires 'regulations' and 'context'.`,
  7 |     inputSchema: {
  8 |         type: "object",
  9 |         properties: {
 10 |             regulations: {
 11 |                 type: "array",
 12 |                 items: { type: "string" },
 13 |                 description: "Regulations to address (e.g., ['GDPR', 'HIPAA', 'PCI DSS', 'CCPA'])."
 14 |             },
 15 |             context: {
 16 |                 type: "object",
 17 |                 properties: {
 18 |                     industry: {
 19 |                         type: "string",
 20 |                         description: "Industry context (e.g., 'healthcare', 'finance', 'e-commerce')."
 21 |                     },
 22 |                     application_type: {
 23 |                         type: "string",
 24 |                         description: "Type of application (e.g., 'web app', 'mobile app', 'SaaS platform')."
 25 |                     },
 26 |                     data_types: {
 27 |                         type: "array",
 28 |                         items: { type: "string" },
 29 |                         description: "Types of data being processed (e.g., ['PII', 'PHI', 'payment data'])."
 30 |                     },
 31 |                     user_regions: {
 32 |                         type: "array",
 33 |                         items: { type: "string" },
 34 |                         description: "Regions where users are located (e.g., ['EU', 'US', 'Canada'])."
 35 |                     }
 36 |                 },
 37 |                 required: ["industry", "application_type", "data_types"],
 38 |                 description: "Context information for compliance analysis."
 39 |             },
 40 |             tech_stack: {
 41 |                 type: "array",
 42 |                 items: { type: "string" },
 43 |                 description: "Optional. Technologies used in the application.",
 44 |                 default: []
 45 |             },
 46 |             implementation_phase: {
 47 |                 type: "string",
 48 |                 enum: ["planning", "development", "pre_launch", "operational", "audit"],
 49 |                 description: "Optional. Current phase of implementation.",
 50 |                 default: "planning"
 51 |             },
 52 |             output_format: {
 53 |                 type: "string",
 54 |                 enum: ["comprehensive", "checklist", "technical", "executive"],
 55 |                 description: "Optional. Format of the compliance guidance.",
 56 |                 default: "comprehensive"
 57 |             }
 58 |         },
 59 |         required: ["regulations", "context"]
 60 |     },
 61 |     buildPrompt: (args: any, modelId: string) => {
 62 |         const { regulations, context, tech_stack = [], implementation_phase = "planning", output_format = "comprehensive" } = args;
 63 |         
 64 |         if (!Array.isArray(regulations) || regulations.length === 0 || !regulations.every(item => typeof item === 'string' && item))
 65 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'regulations' array.");
 66 |         
 67 |         if (!context || typeof context !== 'object' || !context.industry || !context.application_type || !Array.isArray(context.data_types) || context.data_types.length === 0)
 68 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'context' object.");
 69 |             
 70 |         const { industry, application_type, data_types, user_regions = [] } = context;
 71 |         
 72 |         const regulationsString = regulations.join(', ');
 73 |         const dataTypesString = data_types.join(', ');
 74 |         const regionsString = user_regions.length > 0 ? user_regions.join(', ') : "global";
 75 |         const techStackString = tech_stack.length > 0 ? tech_stack.join(', ') : "any technology stack";
 76 |         
 77 |         const systemInstructionText = `You are ComplianceAdvisorGPT, an elite regulatory compliance expert with deep expertise in global data protection and industry-specific regulations. Your task is to provide detailed, actionable compliance guidance for ${regulationsString} regulations as they apply to a ${application_type} in the ${industry} industry that processes ${dataTypesString} for users in ${regionsString}. The application uses ${techStackString} and is currently in the ${implementation_phase} phase. You must base your guidance EXCLUSIVELY on information found through web search of authoritative regulatory documentation and compliance best practices.
 78 | 
 79 | SEARCH METHODOLOGY - EXECUTE IN THIS EXACT ORDER:
 80 | 1. FIRST search for the official text of each regulation: "${regulations.map(r => `${r} official text`).join('", "')}"
 81 | 2. THEN search for industry-specific guidance: "${regulations.map(r => `${r} compliance ${industry} industry`).join('", "')}"
 82 | 3. THEN search for application-specific requirements: "${regulations.map(r => `${r} requirements for ${application_type}`).join('", "')}"
 83 | 4. THEN search for data-specific requirements: "${regulations.map(r => `${r} requirements for ${dataTypesString}`).join('", "')}"
 84 | 5. THEN search for region-specific interpretations: "${regulations.map(r => `${r} implementation in ${regionsString}`).join('", "')}"
 85 | 6. THEN search for implementation guidance: "${regulations.map(r => `${r} technical implementation guide`).join('", "')}"
 86 | 7. THEN search for compliance verification: "${regulations.map(r => `${r} audit checklist`).join('", "')}"
 87 | ${tech_stack.length > 0 ? `8. FINALLY search for technology-specific guidance: "${regulations.map(r => `${r} compliance with ${techStackString}`).join('", "')}"` : ""}
 88 | 
 89 | DOCUMENTATION SOURCE PRIORITIZATION (in strict order):
 90 | 1. Official regulatory texts and guidelines from regulatory authorities
 91 | 2. Guidance from national/regional data protection authorities
 92 | 3. Industry-specific regulatory frameworks and standards
 93 | 4. Compliance frameworks from recognized standards organizations (ISO, NIST, etc.)
 94 | 5. Legal analyses from major law firms specializing in data protection
 95 | 6. Compliance guidance from major cloud providers and technology vendors
 96 | 7. Academic legal research on regulatory interpretation and implementation
 97 | 
 98 | COMPLIANCE GUIDANCE REQUIREMENTS:
 99 | 1. COMPREHENSIVE REGULATORY ANALYSIS:
100 |    a. For EACH regulation, provide:
101 |       - Core regulatory requirements applicable to the specific context
102 |       - Key compliance obligations and deadlines
103 |       - Territorial scope and applicability analysis
104 |       - Potential exemptions or special provisions
105 |       - Enforcement mechanisms and potential penalties
106 |    b. Identify overlaps and conflicts between multiple regulations
107 |    c. Prioritize requirements based on risk and implementation complexity
108 |    d. Address industry-specific interpretations and requirements
109 | 
110 | 2. ACTIONABLE IMPLEMENTATION GUIDANCE:
111 |    a. Provide specific technical and organizational measures for compliance
112 |    b. Include data governance frameworks and policies
113 |    c. Outline data protection by design and default approaches
114 |    d. Detail consent management and data subject rights implementation
115 |    e. Provide data breach notification procedures
116 |    f. Outline documentation and record-keeping requirements
117 |    g. Include specific implementation steps for the current phase (${implementation_phase})
118 | 
119 | 3. EVIDENCE-BASED RECOMMENDATIONS:
120 |    a. Cite specific articles, sections, or recitals from official regulatory texts
121 |    b. Reference authoritative guidance from regulatory bodies
122 |    c. Include case law or enforcement actions when relevant
123 |    d. Acknowledge areas of regulatory uncertainty or evolving interpretation
124 |    e. Distinguish between mandatory requirements and best practices
125 | 
126 | 4. PRACTICAL COMPLIANCE VERIFICATION:
127 |    a. Provide detailed compliance checklists for each regulation
128 |    b. Include audit preparation guidance
129 |    c. Outline documentation requirements for demonstrating compliance
130 |    d. Suggest monitoring and ongoing compliance verification approaches
131 |    e. Include risk assessment methodologies
132 | 
133 | RESPONSE STRUCTURE:
134 | ${output_format === 'comprehensive' ? `1. Begin with an "Executive Summary" providing a high-level compliance assessment
135 | 2. Include a "Regulatory Overview" section detailing each regulation's key requirements
136 | 3. Provide a "Compliance Gap Analysis" based on the provided context
137 | 4. For EACH major compliance area:
138 |    a. Detailed requirements from all applicable regulations
139 |    b. Specific implementation guidance
140 |    c. Technical and organizational measures
141 |    d. Documentation requirements
142 |    e. Verification approach
143 | 5. Include a "Compliance Roadmap" with phased implementation plan
144 | 6. Provide a "Risk Assessment" section outlining key compliance risks
145 | 7. Conclude with "Ongoing Compliance" guidance for maintaining compliance` : ''}
146 | 
147 | ${output_format === 'checklist' ? `1. Begin with a brief "Compliance Context" section
148 | 2. Organize requirements into clear, actionable checklist items
149 | 3. Group checklist items by regulation and compliance domain
150 | 4. For EACH checklist item:
151 |    a. Specific requirement with regulatory reference
152 |    b. Implementation guidance
153 |    c. Evidence/documentation needed
154 |    d. Verification method
155 | 5. Include priority levels for each item
156 | 6. Provide a compliance tracking template` : ''}
157 | 
158 | ${output_format === 'technical' ? `1. Begin with a "Technical Compliance Requirements" overview
159 | 2. Organize by technical implementation domains
160 | 3. For EACH technical domain:
161 |    a. Specific regulatory requirements
162 |    b. Technical implementation specifications
163 |    c. Security controls and standards
164 |    d. Testing and validation approaches
165 |    e. Code or configuration examples where applicable
166 | 4. Include data flow and processing requirements
167 | 5. Provide technical architecture recommendations
168 | 6. Include monitoring and logging requirements` : ''}
169 | 
170 | ${output_format === 'executive' ? `1. Begin with a "Compliance Executive Summary"
171 | 2. Include a "Key Regulatory Obligations" section
172 | 3. Provide a "Compliance Risk Assessment" with risk ratings
173 | 4. Include a "Strategic Compliance Roadmap"
174 | 5. Outline "Resource Requirements" for compliance
175 | 6. Provide "Business Impact Analysis"
176 | 7. Conclude with "Executive Recommendations"` : ''}
177 | 
178 | CRITICAL REQUIREMENTS:
179 | 1. NEVER oversimplify complex regulatory requirements
180 | 2. ALWAYS distinguish between legal requirements and best practices
181 | 3. NEVER provide definitive legal advice without appropriate disclaimers
182 | 4. ALWAYS consider the specific context (industry, data types, regions)
183 | 5. NEVER overlook key regulatory requirements applicable to the context
184 | 6. ALWAYS provide specific, actionable guidance rather than generic statements
185 | 7. NEVER claim regulatory certainty in areas of evolving interpretation
186 | 
187 | Your guidance must be technically precise, evidence-based, and practically implementable. Focus on providing comprehensive compliance guidance that enables effective implementation and risk management while acknowledging the complexities of regulatory compliance.`;
188 | 
189 |         const userQueryText = `Provide ${output_format} compliance guidance for ${regulationsString} as they apply to a ${application_type} in the ${industry} industry that processes ${dataTypesString} for users in ${regionsString}. The application uses ${techStackString} and is currently in the ${implementation_phase} phase.
190 | 
191 | Search for authoritative regulatory documentation and compliance best practices from sources like:
192 | - Official regulatory texts and guidelines
193 | - Industry-specific regulatory frameworks
194 | - Guidance from data protection authorities
195 | - Recognized compliance frameworks and standards
196 | 
197 | For each applicable regulation:
198 | 1. Identify specific requirements relevant to this context
199 | 2. Provide detailed implementation guidance
200 | 3. Include technical and organizational measures
201 | 4. Outline documentation and verification approaches
202 | 5. Reference specific regulatory provisions
203 | 
204 | ${output_format === 'comprehensive' ? `Structure your response with:
205 | - Executive summary of compliance requirements
206 | - Detailed analysis of each regulation's applicability
207 | - Implementation guidance for each compliance domain
208 | - Compliance verification and documentation requirements
209 | - Phased compliance roadmap` : ''}
210 | 
211 | ${output_format === 'checklist' ? `Structure your response as a detailed compliance checklist with:
212 | - Specific requirements organized by regulation and domain
213 | - Implementation guidance for each checklist item
214 | - Required evidence and documentation
215 | - Verification methods
216 | - Priority levels` : ''}
217 | 
218 | ${output_format === 'technical' ? `Structure your response with focus on technical implementation:
219 | - Technical requirements for each compliance domain
220 | - Specific security controls and standards
221 | - Data handling and processing requirements
222 | - Technical architecture recommendations
223 | - Monitoring and validation approaches` : ''}
224 | 
225 | ${output_format === 'executive' ? `Structure your response for executive stakeholders:
226 | - Executive summary of key compliance obligations
227 | - Strategic risk assessment and business impact
228 | - High-level compliance roadmap
229 | - Resource requirements and recommendations
230 | - Key decision points` : ''}
231 | 
232 | Ensure your guidance is specific to the context provided, technically accurate, and immediately actionable.`;
233 | 
234 |         return {
235 |             systemInstructionText,
236 |             userQueryText,
237 |             useWebSearch: true,
238 |             enableFunctionCalling: false
239 |         };
240 |     }
241 | };
```

--------------------------------------------------------------------------------
/src/tools/microservice_design_assistant.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
  2 | import { ToolDefinition, modelIdPlaceholder } from "./tool_definition.js";
  3 | 
  4 | export const microserviceDesignAssistantTool: ToolDefinition = {
  5 |     name: "microservice_design_assistant",
  6 |     description: `Helps design microservice architectures for specific domains. Provides service boundary recommendations and communication patterns. Includes deployment and orchestration considerations. Uses the configured Vertex AI model (${modelIdPlaceholder}) with Google Search. Requires 'domain_description' and 'requirements'.`,
  7 |     inputSchema: {
  8 |         type: "object",
  9 |         properties: {
 10 |             domain_description: {
 11 |                 type: "string",
 12 |                 description: "Description of the business domain for the microservice architecture."
 13 |             },
 14 |             requirements: {
 15 |                 type: "object",
 16 |                 properties: {
 17 |                     functional: {
 18 |                         type: "array",
 19 |                         items: { type: "string" },
 20 |                         description: "Key functional requirements for the system."
 21 |                     },
 22 |                     non_functional: {
 23 |                         type: "array",
 24 |                         items: { type: "string" },
 25 |                         description: "Non-functional requirements (scalability, availability, etc.)."
 26 |                     },
 27 |                     constraints: {
 28 |                         type: "array",
 29 |                         items: { type: "string" },
 30 |                         description: "Technical or organizational constraints."
 31 |                     }
 32 |                 },
 33 |                 required: ["functional", "non_functional"],
 34 |                 description: "System requirements and constraints."
 35 |             },
 36 |             tech_stack: {
 37 |                 type: "object",
 38 |                 properties: {
 39 |                     preferred_languages: {
 40 |                         type: "array",
 41 |                         items: { type: "string" },
 42 |                         description: "Preferred programming languages."
 43 |                     },
 44 |                     preferred_databases: {
 45 |                         type: "array",
 46 |                         items: { type: "string" },
 47 |                         description: "Preferred database technologies."
 48 |                     },
 49 |                     deployment_platform: {
 50 |                         type: "string",
 51 |                         description: "Target deployment platform (e.g., 'Kubernetes', 'AWS', 'Azure')."
 52 |                     }
 53 |                 },
 54 |                 description: "Optional. Technology preferences for implementation."
 55 |             },
 56 |             existing_systems: {
 57 |                 type: "array",
 58 |                 items: { type: "string" },
 59 |                 description: "Optional. Description of existing systems that need to be integrated.",
 60 |                 default: []
 61 |             },
 62 |             team_structure: {
 63 |                 type: "string",
 64 |                 description: "Optional. Description of the development team structure.",
 65 |                 default: ""
 66 |             },
 67 |             design_focus: {
 68 |                 type: "array",
 69 |                 items: {
 70 |                     type: "string",
 71 |                     enum: ["service_boundaries", "data_management", "communication_patterns", "deployment", "security", "scalability", "all"]
 72 |                 },
 73 |                 description: "Optional. Specific aspects to focus on in the design.",
 74 |                 default: ["all"]
 75 |             }
 76 |         },
 77 |         required: ["domain_description", "requirements"]
 78 |     },
 79 |     buildPrompt: (args: any, modelId: string) => {
 80 |         const { domain_description, requirements, tech_stack = {}, existing_systems = [], team_structure = "", design_focus = ["all"] } = args;
 81 |         
 82 |         if (typeof domain_description !== "string" || !domain_description)
 83 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'domain_description'.");
 84 |         
 85 |         if (!requirements || typeof requirements !== 'object' || !Array.isArray(requirements.functional) || !Array.isArray(requirements.non_functional))
 86 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'requirements' object.");
 87 |             
 88 |         const { functional, non_functional, constraints = [] } = requirements;
 89 |         const { preferred_languages = [], preferred_databases = [], deployment_platform = "" } = tech_stack;
 90 |         
 91 |         const functionalReqs = functional.join(', ');
 92 |         const nonFunctionalReqs = non_functional.join(', ');
 93 |         const constraintsText = constraints.length > 0 ? constraints.join(', ') : "none specified";
 94 |         
 95 |         const languagesText = preferred_languages.length > 0 ? preferred_languages.join(', ') : "any appropriate languages";
 96 |         const databasesText = preferred_databases.length > 0 ? preferred_databases.join(', ') : "any appropriate databases";
 97 |         const platformText = deployment_platform ? deployment_platform : "any appropriate platform";
 98 |         
 99 |         const existingSystemsText = existing_systems.length > 0 ? existing_systems.join(', ') : "none specified";
100 |         const teamStructureText = team_structure ? team_structure : "not specified";
101 |         
102 |         const areas = design_focus.includes("all") 
103 |             ? ["service_boundaries", "data_management", "communication_patterns", "deployment", "security", "scalability"] 
104 |             : design_focus;
105 |             
106 |         const focusAreasText = areas.join(', ');
107 |         
108 |         const systemInstructionText = `You are MicroserviceArchitectGPT, an elite software architect specialized in designing optimal microservice architectures for complex domains. Your task is to create a comprehensive microservice architecture design for the ${domain_description} domain, focusing on ${focusAreasText}. You must base your design EXCLUSIVELY on information found through web search of authoritative microservice design patterns, domain-driven design principles, and best practices.
109 | 
110 | SEARCH METHODOLOGY - EXECUTE IN THIS EXACT ORDER:
111 | 1. FIRST search for: "domain-driven design ${domain_description}"
112 | 2. THEN search for: "microservice architecture patterns best practices"
113 | 3. THEN search for: "microservice boundaries identification techniques"
114 | 4. THEN search for specific guidance related to each focus area:
115 |    ${areas.includes("service_boundaries") ? `- "microservice service boundary design patterns"` : ""}
116 |    ${areas.includes("data_management") ? `- "microservice data management patterns"` : ""}
117 |    ${areas.includes("communication_patterns") ? `- "microservice communication patterns"` : ""}
118 |    ${areas.includes("deployment") ? `- "microservice deployment orchestration ${platformText}"` : ""}
119 |    ${areas.includes("security") ? `- "microservice security patterns"` : ""}
120 |    ${areas.includes("scalability") ? `- "microservice scalability patterns"` : ""}
121 | 5. THEN search for: "microservice architecture with ${languagesText} ${databasesText}"
122 | 6. THEN search for: "microservice design for ${functionalReqs}"
123 | 7. THEN search for: "microservice architecture for ${nonFunctionalReqs}"
124 | 8. FINALLY search for: "microservice team organization Conway's Law"
125 | 
126 | DOCUMENTATION SOURCE PRIORITIZATION (in strict order):
127 | 1. Domain-Driven Design literature (Eric Evans, Vaughn Vernon)
128 | 2. Microservice architecture books and papers (Sam Newman, Chris Richardson)
129 | 3. Technical blogs from recognized microservice architecture experts
130 | 4. Case studies of successful microservice implementations in similar domains
131 | 5. Technical documentation from cloud providers on microservice best practices
132 | 6. Industry conference presentations on microservice architecture
133 | 7. Academic research on microservice design and implementation
134 | 
135 | MICROSERVICE DESIGN REQUIREMENTS:
136 | 1. DOMAIN-DRIVEN SERVICE IDENTIFICATION:
137 |    a. Apply Domain-Driven Design principles to identify bounded contexts
138 |    b. Analyze the domain model to identify aggregate roots
139 |    c. Define clear service boundaries based on business capabilities
140 |    d. Ensure services have high cohesion and loose coupling
141 |    e. Consider domain events and event storming results
142 | 
143 | 2. COMPREHENSIVE SERVICE SPECIFICATION:
144 |    a. For EACH identified microservice:
145 |       - Clear responsibility and business capability
146 |       - API definition with key endpoints
147 |       - Data ownership and entity boundaries
148 |       - Internal domain model
149 |       - Dependencies on other services
150 |       - Sizing and complexity assessment
151 |    b. Justify each service boundary decision
152 |    c. Address potential boundary issues and mitigations
153 |    d. Consider future evolution of the domain
154 | 
155 | 3. DATA MANAGEMENT STRATEGY:
156 |    a. Data ownership and sovereignty principles
157 |    b. Database technology selection for each service
158 |    c. Data consistency patterns (eventual consistency, SAGA, etc.)
159 |    d. Query patterns across service boundaries
160 |    e. Data duplication and synchronization approach
161 |    f. Handling of distributed transactions
162 | 
163 | 4. COMMUNICATION ARCHITECTURE:
164 |    a. Synchronous vs. asynchronous communication patterns
165 |    b. API gateway and composition strategy
166 |    c. Event-driven communication approach
167 |    d. Command vs. event patterns
168 |    e. Service discovery mechanism
169 |    f. Resilience patterns (circuit breaker, bulkhead, etc.)
170 | 
171 | 5. DEPLOYMENT AND OPERATIONAL MODEL:
172 |    a. Containerization and orchestration approach
173 |    b. CI/CD pipeline recommendations
174 |    c. Monitoring and observability strategy
175 |    d. Scaling patterns for each service
176 |    e. Stateful vs. stateless considerations
177 |    f. Infrastructure as Code approach
178 | 
179 | 6. SECURITY ARCHITECTURE:
180 |    a. Authentication and authorization strategy
181 |    b. API security patterns
182 |    c. Service-to-service security
183 |    d. Secrets management
184 |    e. Data protection and privacy
185 |    f. Security monitoring and threat detection
186 | 
187 | 7. IMPLEMENTATION ROADMAP:
188 |    a. Phased implementation approach
189 |    b. Migration strategy from existing systems
190 |    c. Incremental delivery plan
191 |    d. Risk mitigation strategies
192 |    e. Proof of concept recommendations
193 | 
194 | RESPONSE STRUCTURE:
195 | 1. Begin with an "Executive Summary" providing a high-level architecture overview
196 | 2. Include a "Domain Analysis" section outlining the domain model and bounded contexts
197 | 3. Provide a "Microservice Architecture" section with:
198 |    a. Architecture diagram (text-based)
199 |    b. Service inventory with responsibilities
200 |    c. Key design decisions and patterns
201 | 4. For EACH microservice:
202 |    a. Service name and business capability
203 |    b. API and interface design
204 |    c. Data model and ownership
205 |    d. Technology recommendations
206 |    e. Scaling considerations
207 | 5. Include a "Cross-Cutting Concerns" section addressing:
208 |    a. Data consistency strategy
209 |    b. Communication patterns
210 |    c. Security architecture
211 |    d. Monitoring and observability
212 | 6. Provide a "Deployment Architecture" section
213 | 7. Include an "Implementation Roadmap" with phased approach
214 | 8. Conclude with "Key Architecture Decisions" highlighting critical choices
215 | 
216 | CRITICAL REQUIREMENTS:
217 | 1. NEVER design generic microservices without clear business capabilities
218 | 2. ALWAYS consider the specific domain context in service boundary decisions
219 | 3. NEVER create unnecessary services that increase system complexity
220 | 4. ALWAYS address data consistency challenges across service boundaries
221 | 5. NEVER ignore communication overhead in microservice architectures
222 | 6. ALWAYS consider operational complexity in the design
223 | 7. NEVER recommend a microservice architecture when a monolith would be more appropriate
224 | 
225 | SPECIFIC CONTEXT CONSIDERATIONS:
226 | 1. Functional Requirements: ${functionalReqs}
227 | 2. Non-Functional Requirements: ${nonFunctionalReqs}
228 | 3. Constraints: ${constraintsText}
229 | 4. Technology Preferences:
230 |    - Languages: ${languagesText}
231 |    - Databases: ${databasesText}
232 |    - Deployment Platform: ${platformText}
233 | 5. Existing Systems: ${existingSystemsText}
234 | 6. Team Structure: ${teamStructureText}
235 | 
236 | Your design must be technically precise, evidence-based, and practically implementable. Focus on creating a microservice architecture that balances business alignment, technical excellence, and operational feasibility.`;
237 | 
238 |         const userQueryText = `Design a comprehensive microservice architecture for the following domain and requirements:
239 | 
240 | Domain Description: ${domain_description}
241 | 
242 | Functional Requirements: ${functionalReqs}
243 | Non-Functional Requirements: ${nonFunctionalReqs}
244 | Constraints: ${constraintsText}
245 | 
246 | Technology Preferences:
247 | - Languages: ${languagesText}
248 | - Databases: ${databasesText}
249 | - Deployment Platform: ${platformText}
250 | 
251 | ${existing_systems.length > 0 ? `Existing Systems to Integrate: ${existingSystemsText}` : ""}
252 | ${team_structure ? `Team Structure: ${teamStructureText}` : ""}
253 | 
254 | Focus Areas: ${focusAreasText}
255 | 
256 | Search for and apply domain-driven design principles and microservice best practices to create a detailed architecture design. Your response should include:
257 | 
258 | 1. Domain analysis with identified bounded contexts
259 | 2. Complete microservice inventory with clear responsibilities
260 | 3. Service boundary justifications and design decisions
261 | 4. Data management strategy across services
262 | 5. Communication patterns and API design
263 | 6. Deployment and operational model
264 | 7. Implementation roadmap
265 | 
266 | For each microservice, provide:
267 | - Business capability and responsibility
268 | - API design and key endpoints
269 | - Data ownership and entity boundaries
270 | - Technology recommendations
271 | - Scaling and resilience considerations
272 | 
273 | Include a text-based architecture diagram showing the relationships between services. Ensure your design addresses all the specified requirements and focus areas while following microservice best practices.`;
274 | 
275 |         return {
276 |             systemInstructionText,
277 |             userQueryText,
278 |             useWebSearch: true,
279 |             enableFunctionCalling: false
280 |         };
281 |     }
282 | };
```

--------------------------------------------------------------------------------
/src/tools/documentation_generator.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
  2 | import { ToolDefinition, modelIdPlaceholder } from "./tool_definition.js";
  3 | 
  4 | export const documentationGeneratorTool: ToolDefinition = {
  5 |     name: "documentation_generator",
  6 |     description: `Creates comprehensive documentation for code, APIs, or systems. Follows industry best practices for technical documentation. Includes examples, diagrams, and user guides. Uses the configured Vertex AI model (${modelIdPlaceholder}) with Google Search. Requires 'content_type' and 'content'.`,
  7 |     inputSchema: {
  8 |         type: "object",
  9 |         properties: {
 10 |             content_type: {
 11 |                 type: "string",
 12 |                 enum: ["api", "code", "system", "library", "user_guide"],
 13 |                 description: "Type of documentation to generate."
 14 |             },
 15 |             content: {
 16 |                 type: "string",
 17 |                 description: "The code, API specification, or system description to document."
 18 |             },
 19 |             language: {
 20 |                 type: "string",
 21 |                 description: "Programming language or API specification format (e.g., 'JavaScript', 'OpenAPI', 'GraphQL').",
 22 |                 default: ""
 23 |             },
 24 |             audience: {
 25 |                 type: "array",
 26 |                 items: {
 27 |                     type: "string",
 28 |                     enum: ["developers", "architects", "end_users", "administrators", "technical_writers"]
 29 |                 },
 30 |                 description: "Optional. Target audience for the documentation.",
 31 |                 default: ["developers"]
 32 |             },
 33 |             documentation_format: {
 34 |                 type: "string",
 35 |                 enum: ["markdown", "html", "asciidoc", "restructuredtext"],
 36 |                 description: "Optional. Output format for the documentation.",
 37 |                 default: "markdown"
 38 |             },
 39 |             detail_level: {
 40 |                 type: "string",
 41 |                 enum: ["minimal", "standard", "comprehensive"],
 42 |                 description: "Optional. Level of detail in the documentation.",
 43 |                 default: "standard"
 44 |             },
 45 |             include_sections: {
 46 |                 type: "array",
 47 |                 items: {
 48 |                     type: "string",
 49 |                     enum: ["overview", "getting_started", "examples", "api_reference", "architecture", "troubleshooting", "faq", "all"]
 50 |                 },
 51 |                 description: "Optional. Specific sections to include in the documentation.",
 52 |                 default: ["all"]
 53 |             }
 54 |         },
 55 |         required: ["content_type", "content"]
 56 |     },
 57 |     buildPrompt: (args: any, modelId: string) => {
 58 |         const { content_type, content, language = "", audience = ["developers"], documentation_format = "markdown", detail_level = "standard", include_sections = ["all"] } = args;
 59 |         
 60 |         if (typeof content_type !== "string" || !content_type)
 61 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'content_type'.");
 62 |         
 63 |         if (typeof content !== "string" || !content)
 64 |             throw new McpError(ErrorCode.InvalidParams, "Missing or invalid 'content'.");
 65 |             
 66 |         const languageText = language ? ` in ${language}` : "";
 67 |         const audienceText = audience.join(', ');
 68 |         
 69 |         const sections = include_sections.includes("all") 
 70 |             ? ["overview", "getting_started", "examples", "api_reference", "architecture", "troubleshooting", "faq"] 
 71 |             : include_sections;
 72 |             
 73 |         const sectionsText = sections.join(', ');
 74 |         
 75 |         const systemInstructionText = `You are DocumentationGPT, an elite technical writer specialized in creating comprehensive, clear, and accurate technical documentation. Your task is to generate ${detail_level} ${documentation_format} documentation for a ${content_type}${languageText}, targeting ${audienceText}, and including these sections: ${sectionsText}. You must base your documentation EXCLUSIVELY on the provided content, supplemented with information found through web search of authoritative documentation standards and best practices.
 76 | 
 77 | SEARCH METHODOLOGY - EXECUTE IN THIS EXACT ORDER:
 78 | 1. FIRST search for: "technical documentation best practices for ${content_type}"
 79 | 2. THEN search for: "${documentation_format} documentation standards"
 80 | 3. THEN search for: "documentation for ${language} ${content_type}"
 81 | 4. THEN search for specific guidance related to each section:
 82 |    ${sections.includes("overview") ? `- "writing effective ${content_type} overview documentation"` : ""}
 83 |    ${sections.includes("getting_started") ? `- "creating ${content_type} getting started guides"` : ""}
 84 |    ${sections.includes("examples") ? `- "writing clear ${content_type} examples"` : ""}
 85 |    ${sections.includes("api_reference") ? `- "api reference documentation standards"` : ""}
 86 |    ${sections.includes("architecture") ? `- "documenting ${content_type} architecture"` : ""}
 87 |    ${sections.includes("troubleshooting") ? `- "creating effective troubleshooting guides"` : ""}
 88 |    ${sections.includes("faq") ? `- "writing technical FAQs best practices"` : ""}
 89 | 5. THEN search for: "documentation for ${audienceText}"
 90 | 6. FINALLY search for: "${detail_level} documentation examples"
 91 | 
 92 | DOCUMENTATION SOURCE PRIORITIZATION (in strict order):
 93 | 1. Official documentation standards (e.g., Google Developer Documentation Style Guide)
 94 | 2. Industry-recognized documentation best practices (e.g., Write the Docs, I'd Rather Be Writing)
 95 | 3. Language or framework-specific documentation guidelines
 96 | 4. Technical writing handbooks and style guides
 97 | 5. Documentation examples from major technology companies
 98 | 6. Academic research on effective technical documentation
 99 | 7. User experience research on documentation usability
100 | 
101 | DOCUMENTATION REQUIREMENTS:
102 | 1. CONTENT ACCURACY AND COMPLETENESS:
103 |    a. Thoroughly analyze the provided content to extract all relevant information
104 |    b. Ensure all documented features, functions, and behaviors match the provided content
105 |    c. Use precise, technically accurate terminology
106 |    d. Maintain consistent naming and terminology throughout
107 |    e. Document all public interfaces, functions, or components
108 | 
109 | 2. STRUCTURAL CLARITY:
110 |    a. Organize documentation with a clear, logical hierarchy
111 |    b. Use consistent heading levels and structure
112 |    c. Include a comprehensive table of contents
113 |    d. Group related information together
114 |    e. Ensure navigability with internal links and references
115 | 
116 | 3. AUDIENCE-APPROPRIATE CONTENT:
117 |    a. Adjust technical depth based on the specified audience
118 |    b. For developers: Focus on implementation details, API usage, and code examples
119 |    c. For architects: Emphasize system design, patterns, and integration points
120 |    d. For end users: Prioritize task-based instructions and user interface elements
121 |    e. For administrators: Focus on configuration, deployment, and maintenance
122 |    f. For technical writers: Include style notes and terminology recommendations
123 | 
124 | 4. COMPREHENSIVE EXAMPLES:
125 |    a. Provide complete, runnable code examples for key functionality
126 |    b. Include both simple "getting started" examples and complex use cases
127 |    c. Annotate examples with explanatory comments
128 |    d. Ensure examples follow best practices for the language/framework
129 |    e. Include expected output or behavior for each example
130 | 
131 | 5. VISUAL CLARITY:
132 |    a. Create text-based diagrams where appropriate (ASCII/Unicode)
133 |    b. Use tables to present structured information
134 |    c. Include flowcharts for complex processes
135 |    d. Use consistent formatting for code blocks, notes, and warnings
136 |    e. Implement clear visual hierarchy with formatting
137 | 
138 | SECTION-SPECIFIC REQUIREMENTS:
139 | ${sections.includes("overview") ? `1. OVERVIEW SECTION:
140 |    a. Clear, concise description of purpose and functionality
141 |    b. Key features and capabilities
142 |    c. When to use (and when not to use)
143 |    d. High-level architecture or concepts
144 |    e. Version information and compatibility` : ""}
145 | 
146 | ${sections.includes("getting_started") ? `2. GETTING STARTED SECTION:
147 |    a. Prerequisites and installation instructions
148 |    b. Basic configuration
149 |    c. Simple end-to-end example
150 |    d. Common initial setup issues and solutions
151 |    e. Next steps for further learning` : ""}
152 | 
153 | ${sections.includes("examples") ? `3. EXAMPLES SECTION:
154 |    a. Progressive examples from basic to advanced
155 |    b. Real-world use case examples
156 |    c. Examples covering different features
157 |    d. Edge case handling examples
158 |    e. Performance optimization examples` : ""}
159 | 
160 | ${sections.includes("api_reference") ? `4. API REFERENCE SECTION:
161 |    a. Complete listing of all public interfaces
162 |    b. Parameter descriptions with types and constraints
163 |    c. Return values and error responses
164 |    d. Method signatures and class definitions
165 |    e. Deprecation notices and version information` : ""}
166 | 
167 | ${sections.includes("architecture") ? `5. ARCHITECTURE SECTION:
168 |    a. Component diagram and descriptions
169 |    b. Data flow and processing model
170 |    c. Integration points and external dependencies
171 |    d. Design patterns and architectural decisions
172 |    e. Scalability and performance considerations` : ""}
173 | 
174 | ${sections.includes("troubleshooting") ? `6. TROUBLESHOOTING SECTION:
175 |    a. Common error messages and their meaning
176 |    b. Diagnostic procedures and debugging techniques
177 |    c. Problem-solution patterns
178 |    d. Performance troubleshooting
179 |    e. Logging and monitoring guidance` : ""}
180 | 
181 | ${sections.includes("faq") ? `7. FAQ SECTION:
182 |    a. Genuinely common questions based on content complexity
183 |    b. Conceptual clarifications
184 |    c. Comparison with alternatives
185 |    d. Best practices questions
186 |    e. Integration and compatibility questions` : ""}
187 | 
188 | FORMAT-SPECIFIC REQUIREMENTS:
189 | ${documentation_format === 'markdown' ? `- Use proper Markdown syntax (GitHub Flavored Markdown)
190 | - Include a table of contents with anchor links
191 | - Use code fences with language specification
192 | - Implement proper heading hierarchy (# to ####)
193 | - Use bold, italic, and lists appropriately
194 | - Include horizontal rules to separate major sections` : ""}
195 | 
196 | ${documentation_format === 'html' ? `- Use semantic HTML5 elements
197 | - Include proper DOCTYPE and metadata
198 | - Implement CSS for basic styling
199 | - Ensure accessibility with proper alt text and ARIA attributes
200 | - Use <code> and <pre> tags for code examples
201 | - Include a navigation sidebar with anchor links` : ""}
202 | 
203 | ${documentation_format === 'asciidoc' ? `- Use proper AsciiDoc syntax
204 | - Implement document header with metadata
205 | - Use appropriate section levels and anchors
206 | - Include callouts and admonitions where relevant
207 | - Properly format code blocks with syntax highlighting
208 | - Use cross-references and includes appropriately` : ""}
209 | 
210 | ${documentation_format === 'restructuredtext' ? `- Use proper reStructuredText syntax
211 | - Include directives for special content
212 | - Implement proper section structure with underlines
213 | - Use roles for inline formatting
214 | - Include a proper table of contents directive
215 | - Format code blocks with appropriate highlighting` : ""}
216 | 
217 | DETAIL LEVEL REQUIREMENTS:
218 | ${detail_level === 'minimal' ? `- Focus on essential information only
219 | - Prioritize getting started and basic usage
220 | - Include only the most common examples
221 | - Keep explanations concise and direct
222 | - Cover only primary features and functions` : ""}
223 | 
224 | ${detail_level === 'standard' ? `- Balance comprehensiveness with readability
225 | - Cover all major features with moderate detail
226 | - Include common examples and use cases
227 | - Provide context and explanations for complex concepts
228 | - Address common questions and issues` : ""}
229 | 
230 | ${detail_level === 'comprehensive' ? `- Document exhaustively with maximum detail
231 | - Cover all features, including edge cases
232 | - Include extensive examples for various scenarios
233 | - Provide in-depth explanations of underlying concepts
234 | - Address advanced usage patterns and optimizations` : ""}
235 | 
236 | CRITICAL REQUIREMENTS:
237 | 1. NEVER include information that contradicts the provided content
238 | 2. ALWAYS use correct syntax for the specified documentation format
239 | 3. NEVER omit critical information present in the provided content
240 | 4. ALWAYS include complete code examples that would actually work
241 | 5. NEVER use placeholder text or "TODO" comments
242 | 6. ALWAYS maintain technical accuracy over marketing language
243 | 7. NEVER generate documentation for features not present in the content
244 | 
245 | Your documentation must be technically precise, well-structured, and immediately usable. Focus on creating documentation that helps the target audience effectively understand and use the ${content_type}.`;
246 | 
247 |         const userQueryText = `Generate ${detail_level} ${documentation_format} documentation for the following ${content_type}${languageText}, targeting ${audienceText}:
248 | 
249 | \`\`\`${language}
250 | ${content}
251 | \`\`\`
252 | 
253 | Include these sections in your documentation: ${sectionsText}
254 | 
255 | Search for and apply documentation best practices for ${content_type} documentation. Ensure your documentation:
256 | 1. Accurately reflects all aspects of the provided content
257 | 2. Is structured with clear hierarchy and navigation
258 | 3. Includes comprehensive examples
259 | 4. Uses appropriate technical depth for the target audience
260 | 5. Follows ${documentation_format} formatting best practices
261 | 
262 | ${detail_level === 'minimal' ? "Focus on essential information with concise explanations." : ""}
263 | ${detail_level === 'standard' ? "Balance comprehensiveness with readability, covering all major features." : ""}
264 | ${detail_level === 'comprehensive' ? "Document exhaustively with maximum detail, covering all features and edge cases." : ""}
265 | 
266 | Format your documentation according to ${documentation_format} standards, with proper syntax, formatting, and structure. Ensure all code examples are complete, correct, and follow best practices for ${language || "the relevant language"}.`;
267 | 
268 |         return {
269 |             systemInstructionText,
270 |             userQueryText,
271 |             useWebSearch: true,
272 |             enableFunctionCalling: false
273 |         };
274 |     }
275 | };
```

--------------------------------------------------------------------------------
/src/tools/save_generate_project_guidelines.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { McpError, ErrorCode } from "@modelcontextprotocol/sdk/types.js";
  2 | import { ToolDefinition, modelIdPlaceholder } from "./tool_definition.js";
  3 | import { z } from "zod";
  4 | import { zodToJsonSchema } from "zod-to-json-schema";
  5 | 
  6 | // Schema for combined arguments
  7 | export const SaveGenerateProjectGuidelinesArgsSchema = z.object({
  8 |     tech_stack: z.array(z.string()).min(1).describe("An array of strings specifying the project's technologies, optionally with versions (e.g., ['React', 'TypeScript 5.x', 'Node.js', 'Express 4.18', 'PostgreSQL 16.x']). If no version is specified, the latest stable version will be assumed."),
  9 |     output_path: z.string().describe("The relative path where the generated guidelines Markdown file should be saved (e.g., 'docs/PROJECT_GUIDELINES.md').")
 10 | });
 11 | 
 12 | // Convert Zod schema to JSON schema
 13 | const SaveGenerateProjectGuidelinesJsonSchema = zodToJsonSchema(SaveGenerateProjectGuidelinesArgsSchema);
 14 | 
 15 | export const saveGenerateProjectGuidelinesTool: ToolDefinition = {
 16 |     name: "save_generate_project_guidelines",
 17 |     description: `Generates comprehensive project guidelines based on a tech stack using web search and saves the result to a specified file path. Uses the configured Vertex AI model (${modelIdPlaceholder}). Requires 'tech_stack' and 'output_path'.`,
 18 |     inputSchema: SaveGenerateProjectGuidelinesJsonSchema as any,
 19 | 
 20 |     // This buildPrompt function contains the core logic for generating the AI prompt.
 21 |     // The main handler in index.ts will call this *part* of the logic.
 22 |     buildPrompt: (args: any, modelId: string) => {
 23 |         // Validate args using the combined schema
 24 |         const parsed = SaveGenerateProjectGuidelinesArgsSchema.safeParse(args);
 25 |         if (!parsed.success) {
 26 |              throw new McpError(ErrorCode.InvalidParams, `Invalid arguments for save_generate_project_guidelines: ${parsed.error.errors.map(e => `${e.path.join('.')}: ${e.message}`).join(', ')}`);
 27 |         }
 28 |         const { tech_stack } = parsed.data; // output_path is used in the handler, not the prompt
 29 | 
 30 |         const techStackString = tech_stack.join(', ');
 31 | 
 32 |         // --- Use the Updated Prompt Logic Provided by User ---
 33 |         const systemInstructionText = `You are an AI assistant acting as a Senior Enterprise Technical Architect and Lead Developer with 15+ years of experience. Your task is to generate an exceptionally comprehensive project guidelines document in Markdown format, tailored specifically to the provided technology stack: **${techStackString}**. You MUST synthesize information EXCLUSIVELY from the latest official documentation, widely accepted style guides, and authoritative best practice articles found via web search for the relevant versions.
 34 | 
 35 | CRITICAL RESEARCH METHODOLOGY REQUIREMENTS:
 36 | 1. **VERSION HANDLING:** For each technology listed in the stack (${techStackString}):
 37 |    a. **If a specific version is provided** (e.g., "TypeScript x.x", "Express x.xx"): Base guidelines ONLY on information found via web search for that EXACT specified version.
 38 |    b. **If NO specific version is provided** (e.g., "React", "Node.js"): You MUST FIRST perform **multiple web searches** (e.g., "[technology] latest stable version", "[technology] releases", "[technology] official blog announcements") to identify the **ABSOLUTE latest, most recent STABLE version** (or the **ABSOLUTE latest, most recent STABLE LTS version** for technologies like Node.js, checking the official release schedule). **Verify this against official sources.** State the identified absolute latest version clearly in the "Technology Stack Overview" section. THEN, base all subsequent guidelines and searches for that technology EXCLUSIVELY on the identified absolute latest stable version. **Do NOT use older stable versions if a newer one exists.**
 39 | 2. TREAT ALL PRE-EXISTING KNOWLEDGE AS POTENTIALLY OUTDATED. Base guidelines ONLY on information found via web search for the relevant versions (either specified or the absolute latest stable identified).
 40 | 3. For EACH technology (using the relevant version):
 41 |    a. First search for "[technology] [version] official documentation" (e.g., "React xx.x official documentation", "Latest Node.js LTS official documentation")
 42 |    b. Then search for "[technology] [version] style guide" or "[technology] [version] best practices"
 43 |    c. Then search for "[technology] [version] release notes" to identify version-specific features
 44 |    d. Finally search for "[technology] [version] security advisories" and "[technology] [version] performance optimization"
 45 | 4. For EACH PAIR of technologies in the stack (using relevant versions), search for specific integration guidelines (e.g., "Latest TypeScript with Latest React best practices")
 46 |    5. Prioritize sources in this order:
 47 |    a. Official documentation (e.g., reactjs.org, nodejs.org)
 48 |    b. Official GitHub repositories and their wikis/READMEs
 49 |    c. Widely-adopted style guides (e.g., Airbnb JavaScript Style Guide, Google's Java Style Guide)
 50 |    d. Technical blogs from the technology creators or major contributors
 51 |    e. Well-established tech companies' engineering blogs (e.g., Meta Engineering, Netflix Tech Blog)
 52 |    f. Reputable developer platforms (StackOverflow only for verified/high-voted answers)
 53 | 5. Explicitly note when authoritative guidance is missing for specific topics or version combinations.
 54 | 
 55 | COMPREHENSIVE DOCUMENT STRUCTURE REQUIREMENTS:
 56 | The document MUST include ALL of the following major sections with appropriate subsections:
 57 | 
 58 | 1. **Executive Summary**
 59 |    * One-paragraph high-level overview of the technology stack
 60 |    * Bullet points highlighting 3-5 most critical guidelines that span the entire stack
 61 | 
 62 | 2. **Technology Stack Overview**
 63 |    * **Identified Versions:** Clearly list each technology and the specific version used for these guidelines (either provided or identified as latest stable/LTS).
 64 |    * Version-specific capabilities and limitations for each component based on the identified version.
 65 |    * Expected technology lifecycle considerations (upcoming EOL dates, migration paths) for the identified versions.
 66 |    * Compatibility matrix showing tested/verified combinations for the identified versions.
 67 |    * Diagram recommendation for visualizing the stack architecture
 68 | 
 69 | 3. **Development Environment Setup**
 70 |    * Required development tools and versions (IDEs, CLIs, extensions)
 71 |    * Recommended local environment configurations with exact version numbers
 72 |    * Docker/containerization standards if applicable
 73 |    * Local development workflow recommendations
 74 | 
 75 | 4. **Code Organization & Architecture**
 76 |    * Directory/folder structure standards
 77 |    * Architectural patterns specific to each technology (e.g., hooks patterns for React)
 78 |    * Module organization principles
 79 |    * State management approach
 80 |    * API design principles specific to the technology versions
 81 |    * Database schema design principles (if applicable)
 82 | 
 83 | 5. **Coding Standards** (language/framework-specific with explicit examples)
 84 |    * Naming conventions with clear examples showing right/wrong approaches
 85 |    * Formatting and linting configurations with tool-specific recommendations
 86 |    * Type definitions and type safety guidelines
 87 |    * Comments and documentation requirements with examples
 88 |    * File size/complexity limits with quantitative metrics
 89 | 
 90 | 6. **Version-Specific Implementations**
 91 |    * Feature usage guidance specifically for the stated versions
 92 |    * Deprecated features to avoid in these versions
 93 |    * Migration strategies from previous versions if applicable
 94 |    * Version-specific optimizations
 95 |    * Innovative patterns enabled by latest versions
 96 | 
 97 | 7. **Component Interaction Guidelines**
 98 |    * How each technology should integrate with others in the stack
 99 |    * Data transformation standards between layers
100 |    * Communication protocols and patterns
101 |    * Error handling and propagation between components
102 | 
103 | 8. **Security Best Practices**
104 |    * Authentication and authorization patterns
105 |    * Input validation and sanitization
106 |    * OWASP security considerations specific to each technology
107 |    * Dependency management and vulnerability scanning
108 |    * Secrets management
109 |    * Version-specific security concerns
110 | 
111 | 9. **Performance Optimization**
112 |    * Stack-specific performance metrics and benchmarks
113 |    * Version-specific performance features and optimizations
114 |    * Resource management (memory, connections, threads)
115 |    * Caching strategies tailored to the stack
116 |    * Load testing recommendations
117 | 
118 | 10. **Testing Strategy**
119 |     * Test pyramid implementation for this specific stack
120 |     * Recommended testing frameworks and tools with exact versions
121 |     * Unit testing standards with coverage expectations (specific percentages)
122 |     * Integration testing approach
123 |     * End-to-end testing methodology
124 |     * Performance testing guidelines
125 |     * Mock/stub implementation guidelines
126 | 
127 | 11. **Error Handling & Logging**
128 |     * Error categorization framework
129 |     * Logging standards and levels
130 |     * Monitoring integration recommendations
131 |     * Debugging best practices
132 |     * Observability considerations
133 | 
134 | 12. **Build & Deployment Pipeline**
135 |     * CI/CD tool recommendations
136 |     * Build process optimization
137 |     * Deployment strategies (e.g., blue-green, canary)
138 |     * Environment-specific configurations
139 |     * Release management process
140 | 
141 | 13. **Documentation Requirements**
142 |     * API documentation standards
143 |     * Technical documentation templates
144 |     * User documentation guidelines
145 |     * Knowledge transfer protocols
146 | 
147 | 14. **Common Pitfalls & Anti-patterns**
148 |     * Technology-specific anti-patterns with explicit examples
149 |     * Known bugs or issues in specified versions
150 |     * Legacy patterns to avoid
151 |     * Performance traps specific to this stack
152 | 
153 | 15. **Collaboration Workflows**
154 |     * Code review checklist tailored to the stack
155 |     * Pull request/merge request standards
156 |     * Branching strategy
157 |     * Communication protocols for technical discussions
158 | 
159 | 16. **Governance & Compliance**
160 |     * Code ownership model
161 |     * Technical debt management approach
162 |     * Accessibility compliance considerations
163 |     * Regulatory requirements affecting implementation (if applicable)
164 | 
165 | CRITICAL FORMATTING & CONTENT REQUIREMENTS:
166 | 
167 | 1. CODE EXAMPLES - For EVERY major guideline (not just a select few):
168 |    * Provide BOTH correct AND incorrect implementations side-by-side
169 |    * Include comments explaining WHY the guidance matters
170 |    * Ensure examples are complete enough to demonstrate the principle
171 |    * Use syntax highlighting appropriate to the language
172 |    * For complex patterns, show progressive implementation steps
173 | 
174 | 2. VISUAL ELEMENTS:
175 |    * Recommend specific diagrams that should be created (architecture diagrams, data flow diagrams)
176 |    * Use Markdown tables for compatibility matrices and feature comparisons
177 |    * Use clear section dividers for readability
178 | 
179 | 3. SPECIFICITY:
180 |    * ALL guidelines must be ACTIONABLE and CONCRETE
181 |    * Include quantitative metrics wherever possible (e.g., "Functions should not exceed 30 lines" instead of "Keep functions short")
182 |    * Specify exact tool versions and configuration options
183 |    * Avoid generic advice that applies to any technology stack
184 | 
185 | 4. CITATIONS:
186 |    * Include inline citations for EVERY significant guideline using format: [Source: URL]
187 |    * For critical security or architectural recommendations, cite multiple sources if available
188 |    * When citing version-specific features, link directly to release notes or version documentation
189 |    * If guidance conflicts between sources, note the conflict and explain your recommendation
190 | 
191 | 5. VERSION SPECIFICITY:
192 |    * Explicitly indicate which guidelines are version-specific vs. universal
193 |    * Note when a practice is specific to the combination of technologies in this stack
194 |    * Identify features that might change in upcoming version releases
195 |    * Include recommended update paths when applicable
196 | 
197 | OUTPUT FORMAT:
198 | - Start with a title: "# Comprehensive Project Guidelines for ${techStackString}"
199 | - Use Markdown headers (##, ###, ####) to structure sections and subsections logically
200 | - Use bulleted lists for individual guidelines
201 | - Use numbered lists for sequential procedures
202 | - Use code blocks with language specification for all code examples
203 | - Use tables for comparative information
204 | - Include a comprehensive table of contents
205 | - Use blockquotes to highlight critical warnings or notes
206 | - End with an "Appendix" section containing links to all cited resources
207 | - The entire output must be a single, coherent Markdown document that feels like it was crafted by an expert technical architect`;
208 | 
209 |         const userQueryText = `Generate an exceptionally detailed and comprehensive project guidelines document in Markdown format for a project using the following technology stack: **${techStackString}**.
210 | 
211 | **Important:** For any technology listed without a specific version, first identify the latest stable version (or latest stable LTS for Node.js) via web search, state it clearly in the overview, and base the guidelines on that version. For technologies with specified versions, use only those versions.
212 | 
213 | Search for and synthesize information from the latest authoritative sources for the relevant versions of each technology:
214 | 1. Official documentation for each relevant version (specified or latest stable).
215 | 2. Established style guides and best practices from technology creators for those versions.
216 | 3. Security advisories and performance optimization guidance for those versions.
217 | 4. Integration patterns between the specific technologies in this stack (using relevant versions).
218 | 
219 | Your document must comprehensively cover:
220 | - Development environment setup with exact tool versions
221 | - Code organization and architectural patterns specific to these versions
222 | - Detailed coding standards with clear examples of both correct and incorrect approaches
223 | - Version-specific implementation details highlighting new features and deprecations
224 | - Component interaction guidelines showing how these technologies should work together
225 | - Comprehensive security best practices addressing OWASP concerns
226 | - Performance optimization techniques validated for these specific versions
227 | - Testing strategy with specific framework recommendations and coverage expectations
228 | - Error handling patterns and logging standards
229 | - Build and deployment pipeline recommendations
230 | - Documentation requirements and standards
231 | - Common pitfalls and anti-patterns with explicit examples
232 | - Team collaboration workflows tailored to this technology stack
233 | - Governance and compliance considerations
234 | 
235 | Ensure each guideline is actionable, specific, and supported by code examples wherever applicable. Cite authoritative sources for all key recommendations. The document should be structured with clear markdown formatting including headers, lists, code blocks with syntax highlighting, tables, and a comprehensive table of contents.`;
236 | 
237 |         // Return the prompt components needed by the handler
238 |         return {
239 |             systemInstructionText: systemInstructionText,
240 |             userQueryText: userQueryText,
241 |             useWebSearch: true, // Always use web search for guidelines
242 |             enableFunctionCalling: false // No function calling needed for generation
243 |         };
244 |     }
245 | };
```

--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | #!/usr/bin/env node
  2 | 
  3 | import dotenv from 'dotenv';
  4 | import path from 'path';
  5 | 
  6 | // Load .env file from the current working directory (where npx/node is run)
  7 | // This ensures it works correctly when run via npx outside the project dir
  8 | dotenv.config({ path: path.resolve(process.cwd(), '.env') });
  9 | 
 10 | import { Server } from "@modelcontextprotocol/sdk/server/index.js";
 11 | import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
 12 | import {
 13 |   CallToolRequestSchema,
 14 |   ListToolsRequestSchema,
 15 |   McpError,
 16 |   ErrorCode,
 17 | } from "@modelcontextprotocol/sdk/types.js";
 18 | // Removed vertexai Content import as CombinedContent covers it
 19 | import fs from "fs/promises";
 20 | import { z } from "zod"; // Needed for schema parsing within handler
 21 | import { diffLines, createTwoFilesPatch } from 'diff';
 22 | import { minimatch } from 'minimatch';
 23 | import { exec } from 'child_process'; // Added for command execution
 24 | import util from 'util'; // Added for promisify
 25 | 
 26 | import { getAIConfig } from './config.js';
 27 | // Import CombinedContent along with callGenerativeAI
 28 | import { callGenerativeAI, CombinedContent } from './vertex_ai_client.js';
 29 | import { allTools, toolMap } from './tools/index.js';
 30 | import { buildInitialContent, getToolsForApi } from './tools/tool_definition.js';
 31 | 
 32 | // Import Zod schemas from tool files for validation within the handler
 33 | import { ReadFileArgsSchema } from './tools/read_file.js';
 34 | // import { ReadMultipleFilesArgsSchema } from './tools/read_multiple_files.js'; // Removed
 35 | import { WriteFileArgsSchema } from './tools/write_file.js';
 36 | import { EditFileArgsSchema, EditOperationSchema } from './tools/edit_file.js'; // Import EditOperationSchema too
 37 | // import { CreateDirectoryArgsSchema } from './tools/create_directory.js'; // Removed
 38 | import { ListDirectoryArgsSchema } from './tools/list_directory.js';
 39 | import { DirectoryTreeArgsSchema } from './tools/directory_tree.js';
 40 | import { MoveFileArgsSchema } from './tools/move_file.js';
 41 | import { SearchFilesArgsSchema } from './tools/search_files.js';
 42 | import { GetFileInfoArgsSchema } from './tools/get_file_info.js';
 43 | // Import schemas for the new combined tools
 44 | import { SaveGenerateProjectGuidelinesArgsSchema } from './tools/save_generate_project_guidelines.js';
 45 | import { SaveDocSnippetArgsSchema } from './tools/save_doc_snippet.js';
 46 | import { SaveTopicExplanationArgsSchema } from './tools/save_topic_explanation.js';
 47 | import { SaveAnswerQueryDirectArgsSchema } from './tools/save_answer_query_direct.js';
 48 | import { SaveAnswerQueryWebsearchArgsSchema } from './tools/save_answer_query_websearch.js';
 49 | import { ExecuteTerminalCommandArgsSchema } from './tools/execute_terminal_command.js'; // Renamed
 50 | 
 51 | 
 52 | // --- Filesystem Helper Functions (Adapted from example.ts) ---
 53 | 
 54 | // Basic security check - ensure path stays within workspace
 55 | function validateWorkspacePath(requestedPath: string): string {
 56 |     const absolutePath = path.resolve(process.cwd(), requestedPath);
 57 |     if (!absolutePath.startsWith(process.cwd())) {
 58 |         throw new Error(`Path traversal attempt detected: ${requestedPath}`);
 59 |     }
 60 |     return absolutePath;
 61 | }
 62 | 
 63 | interface FileInfo {
 64 |   size: number;
 65 |   created: Date;
 66 |   modified: Date;
 67 |   accessed: Date;
 68 |   isDirectory: boolean;
 69 |   isFile: boolean;
 70 |   permissions: string;
 71 | }
 72 | 
 73 | async function getFileStats(filePath: string): Promise<FileInfo> {
 74 |   const stats = await fs.stat(filePath);
 75 |   return {
 76 |     size: stats.size,
 77 |     created: stats.birthtime,
 78 |     modified: stats.mtime,
 79 |     accessed: stats.atime,
 80 |     isDirectory: stats.isDirectory(),
 81 |     isFile: stats.isFile(),
 82 |     permissions: stats.mode.toString(8).slice(-3), // POSIX permissions
 83 |   };
 84 | }
 85 | 
 86 | async function searchFilesRecursive(
 87 |   rootPath: string,
 88 |   currentPath: string,
 89 |   pattern: string,
 90 |   excludePatterns: string[],
 91 |   results: string[]
 92 | ): Promise<void> {
 93 |   const entries = await fs.readdir(currentPath, { withFileTypes: true });
 94 | 
 95 |   for (const entry of entries) {
 96 |     const fullPath = path.join(currentPath, entry.name);
 97 |     const relativePath = path.relative(rootPath, fullPath);
 98 | 
 99 |     const shouldExclude = excludePatterns.some(p => minimatch(relativePath, p, { dot: true, matchBase: true }));
100 |     if (shouldExclude) {
101 |       continue;
102 |     }
103 | 
104 |     if (entry.name.toLowerCase().includes(pattern.toLowerCase())) {
105 |       results.push(path.relative(process.cwd(), fullPath));
106 |     }
107 | 
108 |     if (entry.isDirectory()) {
109 |       try {
110 |           const realPath = await fs.realpath(fullPath);
111 |           if (realPath.startsWith(rootPath)) {
112 |              await searchFilesRecursive(rootPath, fullPath, pattern, excludePatterns, results);
113 |           }
114 |       } catch (e) {
115 |           console.error(`Skipping search in ${fullPath}: ${(e as Error).message}`);
116 |       }
117 |     }
118 |   }
119 | }
120 | 
121 | function normalizeLineEndings(text: string): string {
122 |   return text.replace(/\r\n/g, '\n');
123 | }
124 | 
125 | function createUnifiedDiff(originalContent: string, newContent: string, filepath: string = 'file'): string {
126 |   const normalizedOriginal = normalizeLineEndings(originalContent);
127 |   const normalizedNew = normalizeLineEndings(newContent);
128 |   return createTwoFilesPatch(
129 |     filepath, filepath, normalizedOriginal, normalizedNew, 'original', 'modified'
130 |   );
131 | }
132 | 
133 | async function applyFileEdits(
134 |   filePath: string,
135 |   edits: z.infer<typeof EditOperationSchema>[],
136 |   dryRun = false
137 | ): Promise<string> {
138 |   const content = normalizeLineEndings(await fs.readFile(filePath, 'utf-8'));
139 |   let modifiedContent = content;
140 | 
141 |   for (const edit of edits) {
142 |     const normalizedOld = normalizeLineEndings(edit.oldText);
143 |     const normalizedNew = normalizeLineEndings(edit.newText);
144 | 
145 |     if (modifiedContent.includes(normalizedOld)) {
146 |       modifiedContent = modifiedContent.replace(normalizedOld, normalizedNew);
147 |       continue;
148 |     }
149 | 
150 |     const oldLines = normalizedOld.split('\n');
151 |     const contentLines = modifiedContent.split('\n');
152 |     let matchFound = false;
153 | 
154 |     for (let i = 0; i <= contentLines.length - oldLines.length; i++) {
155 |       const potentialMatch = contentLines.slice(i, i + oldLines.length);
156 |       const isMatch = oldLines.every((oldLine, j) => oldLine.trim() === potentialMatch[j].trim());
157 | 
158 |       if (isMatch) {
159 |         const originalIndent = contentLines[i].match(/^\s*/)?.[0] || '';
160 |         const newLines = normalizedNew.split('\n').map((line, j) => {
161 |           if (j === 0) return originalIndent + line.trimStart();
162 |           const oldIndent = oldLines[j]?.match(/^\s*/)?.[0] || '';
163 |           const newIndent = line.match(/^\s*/)?.[0] || '';
164 |           if (oldIndent && newIndent) {
165 |             const relativeIndent = newIndent.length - oldIndent.length;
166 |             return originalIndent + ' '.repeat(Math.max(0, relativeIndent)) + line.trimStart();
167 |           }
168 |           return line;
169 |         });
170 | 
171 |         contentLines.splice(i, oldLines.length, ...newLines);
172 |         modifiedContent = contentLines.join('\n');
173 |         matchFound = true;
174 |         break;
175 |       }
176 |     }
177 | 
178 |     if (!matchFound) {
179 |       throw new Error(`Could not find exact or whitespace-insensitive match for edit:\n${edit.oldText}`);
180 |     }
181 |   }
182 | 
183 |   const diff = createUnifiedDiff(content, modifiedContent, path.relative(process.cwd(), filePath));
184 | 
185 |   if (!dryRun) {
186 |     await fs.writeFile(filePath, modifiedContent, 'utf-8');
187 |   }
188 | 
189 |   let numBackticks = 3;
190 |   while (diff.includes('`'.repeat(numBackticks))) {
191 |     numBackticks++;
192 |   }
193 |   return `${'`'.repeat(numBackticks)}diff\n${diff}\n${'`'.repeat(numBackticks)}`;
194 | }
195 | 
196 | 
197 | interface TreeEntry {
198 |     name: string;
199 |     type: 'file' | 'directory';
200 |     children?: TreeEntry[];
201 | }
202 | 
203 | async function buildDirectoryTree(currentPath: string): Promise<TreeEntry[]> {
204 |     const entries = await fs.readdir(currentPath, {withFileTypes: true});
205 |     const result: TreeEntry[] = [];
206 | 
207 |     for (const entry of entries) {
208 |         const entryData: TreeEntry = {
209 |             name: entry.name,
210 |             type: entry.isDirectory() ? 'directory' : 'file'
211 |         };
212 | 
213 |         if (entry.isDirectory()) {
214 |             const subPath = path.join(currentPath, entry.name);
215 |              try {
216 |                 const realPath = await fs.realpath(subPath);
217 |                 if (realPath.startsWith(path.dirname(currentPath))) {
218 |                     entryData.children = await buildDirectoryTree(subPath);
219 |                 } else {
220 |                      entryData.children = [];
221 |                 }
222 |             } catch (e) {
223 |                  entryData.children = [];
224 |                  console.error(`Skipping tree build in ${subPath}: ${(e as Error).message}`);
225 |             }
226 |         }
227 |         result.push(entryData);
228 |     }
229 |     result.sort((a, b) => {
230 |         if (a.type === 'directory' && b.type === 'file') return -1;
231 |         if (a.type === 'file' && b.type === 'directory') return 1;
232 |         return a.name.localeCompare(b.name);
233 |     });
234 |     return result;
235 | }
236 | 
237 | 
238 | // Set of filesystem tool names for easy checking
239 | const filesystemToolNames = new Set([
240 |     "read_file_content", // Handles single/multiple
241 |     // "read_multiple_files_content", // Removed
242 |     "write_file_content", // Handles single/multiple
243 |     "edit_file_content",
244 |     // "create_directory", // Removed
245 |     "list_directory_contents",
246 |     "get_directory_tree",
247 |     "move_file_or_directory",
248 |     "search_filesystem",
249 |     "get_filesystem_info",
250 | ]);
251 | 
252 | 
253 | // --- MCP Server Setup ---
254 | const server = new Server(
255 |   { name: "vertex-ai-mcp-server", version: "0.5.0" },
256 |   { capabilities: { tools: {} } }
257 | );
258 | 
259 | // --- Tool Definitions Handler ---
260 | server.setRequestHandler(ListToolsRequestSchema, async () => {
261 |   // Use new config function
262 |   const config = getAIConfig();
263 |   return {
264 |       tools: allTools.map(t => ({
265 |           name: t.name,
266 |           // Inject model ID dynamically from new config structure
267 |           description: t.description.replace("${modelId}", config.modelId),
268 |           inputSchema: t.inputSchema
269 |       }))
270 |   };
271 | });
272 | 
273 | // --- Tool Call Handler ---
274 | server.setRequestHandler(CallToolRequestSchema, async (request) => {
275 |   const toolName = request.params.name;
276 |   const args = request.params.arguments ?? {};
277 | 
278 |   const toolDefinition = toolMap.get(toolName);
279 |   if (!toolDefinition) {
280 |     throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${toolName}`);
281 |   }
282 | 
283 |   try {
284 |     // --- Special Handling for Combined Tool ---
285 |     if (toolName === "save_generate_project_guidelines") {
286 |         const parsedArgs = SaveGenerateProjectGuidelinesArgsSchema.parse(args);
287 |         const { tech_stack, output_path } = parsedArgs;
288 | 
289 |         // Use new config function
290 |         const config = getAIConfig();
291 |         const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
292 | 
293 |         // Use new AI function call and type cast
294 |         const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
295 |         const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);
296 | 
297 |         const generatedContent = await callGenerativeAI(
298 |             initialContents,
299 |             toolsForApi
300 |             // Config args removed
301 |         );
302 | 
303 |         const validOutputPath = validateWorkspacePath(output_path);
304 |         await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
305 |         await fs.writeFile(validOutputPath, generatedContent, "utf-8");
306 | 
307 |         return {
308 |             content: [{ type: "text", text: `Successfully generated guidelines and saved to ${output_path}` }],
309 |         };
310 | 
311 |     } else if (toolName === "save_doc_snippet") {
312 |         const parsedArgs = SaveDocSnippetArgsSchema.parse(args);
313 |         const { output_path } = parsedArgs;
314 | 
315 |         const config = getAIConfig();
316 |         const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
317 | 
318 |         const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
319 |         const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);
320 | 
321 |         const generatedContent = await callGenerativeAI(
322 |             initialContents,
323 |             toolsForApi
324 |         );
325 | 
326 |         const validOutputPath = validateWorkspacePath(output_path);
327 |         await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
328 |         await fs.writeFile(validOutputPath, generatedContent, "utf-8");
329 | 
330 |         return {
331 |             content: [{ type: "text", text: `Successfully generated snippet and saved to ${output_path}` }],
332 |         };
333 | 
334 |     } else if (toolName === "save_topic_explanation") {
335 |         const parsedArgs = SaveTopicExplanationArgsSchema.parse(args);
336 |         const { output_path } = parsedArgs;
337 | 
338 |         const config = getAIConfig();
339 |         const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
340 | 
341 |         const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
342 |         const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);
343 | 
344 |         const generatedContent = await callGenerativeAI(
345 |             initialContents,
346 |             toolsForApi
347 |         );
348 | 
349 |         const validOutputPath = validateWorkspacePath(output_path);
350 |         await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
351 |         await fs.writeFile(validOutputPath, generatedContent, "utf-8");
352 | 
353 |         return {
354 |             content: [{ type: "text", text: `Successfully generated explanation and saved to ${output_path}` }],
355 |         };
356 | 
357 |     } else if (toolName === "save_answer_query_direct") {
358 |         const parsedArgs = SaveAnswerQueryDirectArgsSchema.parse(args);
359 |         const { output_path } = parsedArgs;
360 | 
361 |         const config = getAIConfig();
362 |         const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
363 | 
364 |         const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
365 |         const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);
366 | 
367 |         const generatedContent = await callGenerativeAI(
368 |             initialContents,
369 |             toolsForApi
370 |         );
371 | 
372 |         const validOutputPath = validateWorkspacePath(output_path);
373 |         await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
374 |         await fs.writeFile(validOutputPath, generatedContent, "utf-8");
375 | 
376 |         return {
377 |             content: [{ type: "text", text: `Successfully generated direct answer and saved to ${output_path}` }],
378 |         };
379 | 
380 |     } else if (toolName === "save_answer_query_websearch") {
381 |         const parsedArgs = SaveAnswerQueryWebsearchArgsSchema.parse(args);
382 |         const { output_path } = parsedArgs;
383 | 
384 |         const config = getAIConfig();
385 |         const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
386 | 
387 |         const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[];
388 |         const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);
389 | 
390 |         const generatedContent = await callGenerativeAI(
391 |             initialContents,
392 |             toolsForApi
393 |         );
394 | 
395 |         const validOutputPath = validateWorkspacePath(output_path);
396 |         await fs.mkdir(path.dirname(validOutputPath), { recursive: true });
397 |         await fs.writeFile(validOutputPath, generatedContent, "utf-8");
398 | 
399 |         return {
400 |             content: [{ type: "text", text: `Successfully generated websearch answer and saved to ${output_path}` }],
401 |         };
402 | 
403 |     } // --- Filesystem Tool Execution Logic ---
404 |     else if (filesystemToolNames.has(toolName)) {
405 |       let resultText = "";
406 | 
407 |       switch (toolName) {
408 |         case "read_file_content": {
409 |           const parsed = ReadFileArgsSchema.parse(args);
410 |           if (typeof parsed.paths === 'string') {
411 |             // Handle single file read
412 |             const validPath = validateWorkspacePath(parsed.paths);
413 |             const content = await fs.readFile(validPath, "utf-8");
414 |             resultText = content;
415 |           } else {
416 |             // Handle multiple file read (similar to old read_multiple_files_content)
417 |             const results = await Promise.all(
418 |               parsed.paths.map(async (filePath: string) => {
419 |                 try {
420 |                   const validPath = validateWorkspacePath(filePath);
421 |                   const content = await fs.readFile(validPath, "utf-8");
422 |                   return `${path.relative(process.cwd(), validPath)}:\n${content}\n`;
423 |                 } catch (error) {
424 |                   const errorMessage = error instanceof Error ? error.message : String(error);
425 |                   return `${filePath}: Error - ${errorMessage}`;
426 |                 }
427 |               }),
428 |             );
429 |             resultText = results.join("\n---\n");
430 |           }
431 |           break;
432 |         }
433 |         // case "read_multiple_files_content": // Removed - logic merged into read_file_content
434 |         case "write_file_content": {
435 |           const parsed = WriteFileArgsSchema.parse(args);
436 |           // Access the 'writes' property which contains either a single object or an array
437 |           const writeOperations = Array.isArray(parsed.writes) ? parsed.writes : [parsed.writes];
438 |           const results: string[] = [];
439 | 
440 |           for (const op of writeOperations) {
441 |               try {
442 |                   const validPath = validateWorkspacePath(op.path);
443 |                   await fs.mkdir(path.dirname(validPath), { recursive: true });
444 |                   await fs.writeFile(validPath, op.content, "utf-8");
445 |                   results.push(`Successfully wrote to ${op.path}`);
446 |               } catch (error) {
447 |                   const errorMessage = error instanceof Error ? error.message : String(error);
448 |                   results.push(`Error writing to ${op.path}: ${errorMessage}`);
449 |               }
450 |           }
451 |           resultText = results.join("\n");
452 |           break;
453 |         }
454 |         case "edit_file_content": {
455 |           const parsed = EditFileArgsSchema.parse(args);
456 |           if (parsed.edits.length === 0) {
457 |              throw new McpError(ErrorCode.InvalidParams, `'edits' array cannot be empty for ${toolName}.`);
458 |           }
459 |           const validPath = validateWorkspacePath(parsed.path);
460 |           resultText = await applyFileEdits(validPath, parsed.edits, parsed.dryRun);
461 |           break;
462 |         }
463 |         // case "create_directory": // Removed
464 |         case "list_directory_contents": {
465 |           const parsed = ListDirectoryArgsSchema.parse(args);
466 |           const validPath = validateWorkspacePath(parsed.path);
467 |           const entries = await fs.readdir(validPath, { withFileTypes: true });
468 |           resultText = entries
469 |             .map((entry) => `${entry.isDirectory() ? "[DIR] " : "[FILE]"} ${entry.name}`)
470 |             .sort()
471 |             .join("\n");
472 |            if (!resultText) resultText = "(Directory is empty)";
473 |           break;
474 |         }
475 |         case "get_directory_tree": {
476 |             const parsed = DirectoryTreeArgsSchema.parse(args);
477 |             const validPath = validateWorkspacePath(parsed.path);
478 |             const treeData = await buildDirectoryTree(validPath);
479 |             resultText = JSON.stringify(treeData, null, 2);
480 |             break;
481 |         }
482 |         case "move_file_or_directory": {
483 |           const parsed = MoveFileArgsSchema.parse(args);
484 |            if (parsed.source === parsed.destination) {
485 |              throw new McpError(ErrorCode.InvalidParams, `Source and destination paths cannot be the same for ${toolName}.`);
486 |            }
487 |           const validSourcePath = validateWorkspacePath(parsed.source);
488 |           const validDestPath = validateWorkspacePath(parsed.destination);
489 |           await fs.mkdir(path.dirname(validDestPath), { recursive: true });
490 |           await fs.rename(validSourcePath, validDestPath);
491 |           resultText = `Successfully moved ${parsed.source} to ${parsed.destination}`;
492 |           break;
493 |         }
494 |         case "search_filesystem": {
495 |           const parsed = SearchFilesArgsSchema.parse(args);
496 |           const validPath = validateWorkspacePath(parsed.path);
497 |           const results: string[] = [];
498 |           await searchFilesRecursive(validPath, validPath, parsed.pattern, parsed.excludePatterns, results);
499 |           resultText = results.length > 0 ? results.join("\n") : "No matches found";
500 |           break;
501 |         }
502 |         case "get_filesystem_info": {
503 |           const parsed = GetFileInfoArgsSchema.parse(args);
504 |           const validPath = validateWorkspacePath(parsed.path);
505 |           const info = await getFileStats(validPath);
506 |           resultText = `Path: ${parsed.path}\nType: ${info.isDirectory ? 'Directory' : 'File'}\nSize: ${info.size} bytes\nCreated: ${info.created.toISOString()}\nModified: ${info.modified.toISOString()}\nAccessed: ${info.accessed.toISOString()}\nPermissions: ${info.permissions}`;
507 |           break;
508 |         }
509 |         default:
510 |           throw new McpError(ErrorCode.MethodNotFound, `Filesystem tool handler not implemented: ${toolName}`);
511 |       }
512 | 
513 |       // Return successful filesystem operation result
514 |       return {
515 |         content: [{ type: "text", text: resultText }],
516 |       };
517 | } else if (toolName === "execute_terminal_command") { // Renamed tool name check
518 |     const parsed = ExecuteTerminalCommandArgsSchema.parse(args); // Renamed schema
519 |     const execPromise = util.promisify(exec);
520 | 
521 |     const options: { cwd?: string; timeout?: number; signal?: AbortSignal } = {};
522 |         if (parsed.cwd) {
523 |             options.cwd = validateWorkspacePath(parsed.cwd); // Reuse validation
524 |         } else {
525 |             options.cwd = process.cwd(); // Default to workspace root
526 |         }
527 | 
528 |         let controller: AbortController | undefined;
529 |         if (parsed.timeout) {
530 |             controller = new AbortController();
531 |             options.signal = controller.signal;
532 |             options.timeout = parsed.timeout * 1000; // Convert seconds to milliseconds
533 |         }
534 | 
535 |         try {
536 |             // Execute the command
537 |             const { stdout, stderr } = await execPromise(parsed.command, options);
538 |             const output = `STDOUT:\n${stdout}\nSTDERR:\n${stderr}`;
539 |             return {
540 |                 content: [{ type: "text", text: output.trim() || "(No output)" }],
541 |             };
542 |         } catch (error: any) {
543 |             // Handle different error types
544 |             let errorMessage = "Command execution failed.";
545 |             if (error.signal === 'SIGTERM' || error.code === 'ABORT_ERR') {
546 |                 errorMessage = `Command timed out after ${parsed.timeout} seconds.`;
547 |             } else if (error.stderr || error.stdout) {
548 |                 errorMessage = `Command failed with exit code ${error.code || 'unknown'}.\nSTDOUT:\n${error.stdout}\nSTDERR:\n${error.stderr}`;
549 |             } else if (error instanceof Error) {
550 |                 errorMessage = `Command execution error: ${error.message}`;
551 |             }
552 |             throw new McpError(ErrorCode.InternalError, errorMessage);
553 |         } finally {
554 |              // The finally block might not be strictly necessary here as execPromise handles cleanup
555 |              // if (controller) { controller.abort(); } // Example if manual cleanup were needed
556 |         }
557 | 
558 |     } else {
559 |       // --- Generic AI Tool Logic (Non-filesystem, non-combined) ---
560 |       const config = getAIConfig(); // Use renamed config function
561 |       if (!toolDefinition.buildPrompt) {
562 |         throw new McpError(ErrorCode.MethodNotFound, `Tool ${toolName} is missing required buildPrompt logic.`);
563 |       }
564 |       const { systemInstructionText, userQueryText, useWebSearch, enableFunctionCalling } = toolDefinition.buildPrompt(args, config.modelId);
565 |       const initialContents = buildInitialContent(systemInstructionText, userQueryText) as CombinedContent[]; // Cast
566 |       const toolsForApi = getToolsForApi(enableFunctionCalling, useWebSearch);
567 | 
568 |       // Call the unified AI function
569 |       const responseText = await callGenerativeAI(
570 |           initialContents,
571 |           toolsForApi
572 |           // Config is implicitly used by callGenerativeAI now
573 |       );
574 | 
575 |       return {
576 |         content: [{ type: "text", text: responseText }],
577 |       };
578 |     }
579 | 
580 |   } catch (error) {
581 |      // Centralized error handling
582 |     if (error instanceof z.ZodError) {
583 |         throw new McpError(ErrorCode.InvalidParams, `Invalid arguments for ${toolName}: ${error.errors.map(e => `${e.path.join('.')}: ${e.message}`).join(', ')}`);
584 |     } else if (error instanceof McpError) {
585 |       throw error;
586 |     } else if (error instanceof Error && error.message.includes('ENOENT')) {
587 |          throw new McpError(ErrorCode.InvalidParams, `Path not found for tool ${toolName}: ${error.message}`);
588 |     } else {
589 |       console.error(`[${new Date().toISOString()}] Unexpected error in tool handler (${toolName}):`, error);
590 |       throw new McpError(ErrorCode.InternalError, `Unexpected server error during ${toolName}: ${(error as Error).message || "Unknown"}`);
591 |     }
592 |   }
593 | });
594 | 
595 | // --- Server Start ---
596 | async function main() {
597 |   const transport = new StdioServerTransport();
598 |   console.error(`[${new Date().toISOString()}] vertex-ai-mcp-server connecting via stdio...`);
599 |   await server.connect(transport);
600 |   console.error(`[${new Date().toISOString()}] vertex-ai-mcp-server connected.`);
601 | }
602 | 
603 | main().catch((error) => {
604 |   console.error(`[${new Date().toISOString()}] Server failed to start:`, error);
605 |   process.exit(1);
606 | });
607 | 
608 | // --- Graceful Shutdown ---
609 | const shutdown = async (signal: string) => {
610 |     console.error(`[${new Date().toISOString()}] Received ${signal}. Shutting down server...`);
611 |     try {
612 |       await server.close();
613 |       console.error(`[${new Date().toISOString()}] Server shut down gracefully.`);
614 |       process.exit(0);
615 |     } catch (shutdownError) {
616 |       console.error(`[${new Date().toISOString()}] Error during server shutdown:`, shutdownError);
617 |       process.exit(1);
618 |     }
619 | };
620 | process.on('SIGINT', () => shutdown('SIGINT'));
621 | process.on('SIGTERM', () => shutdown('SIGTERM'));
622 | 
```
Page 2/2FirstPrevNextLast