#
tokens: 45591/50000 9/49 files (page 2/3)
lines: off (toggle) GitHub
raw markdown copy
This is page 2 of 3. Use http://codebase.md/thealchemist6/codecompass-mcp?page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .env.example
├── .gitignore
├── config
│   ├── .eslintrc.json
│   ├── .prettierignore
│   ├── .prettierrc
│   ├── README.md
│   └── tsconfig.dev.json
├── CONTRIBUTING.md
├── docker
│   ├── docker-compose.dev.yml
│   ├── docker-compose.yml
│   ├── Dockerfile.dev
│   └── README.md
├── Dockerfile
├── docs
│   ├── API.md
│   ├── DOCKER.md
│   ├── legacy-tools
│   │   ├── chat.ts
│   │   ├── extract.ts
│   │   ├── files.ts
│   │   ├── README.md
│   │   ├── refactor.ts
│   │   ├── repository.ts
│   │   ├── template.ts
│   │   └── transform.ts
│   ├── MONITORING.md
│   ├── README.md
│   └── SETUP.md
├── examples
│   ├── basic-usage.js
│   └── basic-usage.md
├── LICENSE
├── package-lock.json
├── package.json
├── README.md
├── scripts
│   ├── docker-build.sh
│   ├── docker-logs.sh
│   ├── docker-run.sh
│   ├── monitor.js
│   └── start-mcp.sh
├── src
│   ├── index.ts
│   ├── services
│   │   ├── github.ts
│   │   ├── openai.ts
│   │   └── refactor.ts
│   ├── tools
│   │   └── consolidated.ts
│   ├── types
│   │   ├── index.ts
│   │   └── responses.ts
│   └── utils
│       ├── config.ts
│       ├── file-processor.ts
│       ├── logger.ts
│       ├── monitoring.ts
│       ├── security.ts
│       └── validation.ts
├── tests
│   └── verify-installation.sh
└── tsconfig.json
```

# Files

--------------------------------------------------------------------------------
/src/utils/security.ts:
--------------------------------------------------------------------------------

```typescript
import { SecurityCheck, SecurityIssue } from '../types/index.js';

export class SecurityService {
  
  static checkCode(code: string, language: string): SecurityCheck {
    const issues: SecurityIssue[] = [];
    
    // Common security patterns across languages
    const commonChecks = [
      this.checkHardcodedSecrets(code),
      this.checkSqlInjection(code),
      this.checkXssVulnerabilities(code),
      this.checkCommandInjection(code),
      this.checkInsecureRandomness(code),
    ];
    
    // Language-specific checks
    switch (language.toLowerCase()) {
      case 'javascript':
      case 'typescript':
        commonChecks.push(
          this.checkEvalUsage(code),
          this.checkInnerHtml(code),
          this.checkUnsafeRegex(code)
        );
        break;
      case 'python':
        commonChecks.push(
          this.checkPythonEval(code),
          this.checkPickleUsage(code),
          this.checkShellCommand(code)
        );
        break;
      case 'java':
        commonChecks.push(
          this.checkJavaDeserialization(code),
          this.checkRuntimeExec(code)
        );
        break;
    }
    
    // Flatten and filter issues
    const allIssues = commonChecks.flat().filter(Boolean) as SecurityIssue[];
    
    return {
      passed: allIssues.length === 0,
      issues: allIssues,
      recommendations: this.generateRecommendations(allIssues),
    };
  }
  
  static sanitizeCode(code: string): string {
    // Remove or mask sensitive information
    let sanitized = code;
    
    // Remove API keys and tokens
    sanitized = sanitized.replace(/['"](sk-[a-zA-Z0-9]{32,})['"]/g, '"<API_KEY_MASKED>"');
    sanitized = sanitized.replace(/['"](ghp_[a-zA-Z0-9]{36})['"]/g, '"<GITHUB_TOKEN_MASKED>"');
    sanitized = sanitized.replace(/['"](xoxb-[a-zA-Z0-9-]{51,})['"]/g, '"<SLACK_TOKEN_MASKED>"');
    
    // Remove database connection strings
    sanitized = sanitized.replace(/['"](postgresql:\/\/[^'"]+)['"]/g, '"<DATABASE_URL_MASKED>"');
    sanitized = sanitized.replace(/['"](mysql:\/\/[^'"]+)['"]/g, '"<DATABASE_URL_MASKED>"');
    sanitized = sanitized.replace(/['"](mongodb:\/\/[^'"]+)['"]/g, '"<DATABASE_URL_MASKED>"');
    
    // Remove email addresses
    sanitized = sanitized.replace(/['"]([\w\.-]+@[\w\.-]+\.\w+)['"]/g, '"<EMAIL_MASKED>"');
    
    // Remove potential passwords
    sanitized = sanitized.replace(/password\s*[:=]\s*['"](.*?)['"]/gi, 'password: "<PASSWORD_MASKED>"');
    sanitized = sanitized.replace(/secret\s*[:=]\s*['"](.*?)['"]/gi, 'secret: "<SECRET_MASKED>"');
    
    return sanitized;
  }
  
  static isPathSafe(path: string): boolean {
    // Check for directory traversal
    if (path.includes('..')) return false;
    
    // Check for absolute paths
    if (path.startsWith('/')) return false;
    
    // Check for dangerous characters
    if (/[<>:"|?*]/.test(path)) return false;
    
    // Check for null bytes
    if (path.includes('\0')) return false;
    
    return true;
  }
  
  static validateApiAccess(url: string): boolean {
    // Only allow GitHub API access
    return url.startsWith('https://api.github.com/') || url.startsWith('https://github.com/');
  }
  
  static rateLimit(key: string, limit: number = 100, window: number = 60000): boolean {
    // Simple in-memory rate limiting
    const now = Date.now();
    const windowStart = now - window;
    
    if (!this.rateLimitStore) {
      this.rateLimitStore = new Map();
    }
    
    const requests = this.rateLimitStore.get(key) || [];
    const validRequests = requests.filter((time: number) => time > windowStart);
    
    if (validRequests.length >= limit) {
      return false;
    }
    
    validRequests.push(now);
    this.rateLimitStore.set(key, validRequests);
    
    return true;
  }
  
  private static rateLimitStore: Map<string, number[]>;
  
  private static checkHardcodedSecrets(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      { pattern: /['"](sk-[a-zA-Z0-9]{32,})['"]/, name: 'OpenAI API Key' },
      { pattern: /['"](ghp_[a-zA-Z0-9]{36})['"]/, name: 'GitHub Personal Access Token' },
      { pattern: /['"](xoxb-[a-zA-Z0-9-]{51,})['"]/, name: 'Slack Bot Token' },
      { pattern: /['"](AIza[0-9A-Za-z-_]{35})['"]/, name: 'Google API Key' },
      { pattern: /['"](AKIA[0-9A-Z]{16})['"]/, name: 'AWS Access Key' },
      { pattern: /['"](ya29\.[0-9A-Za-z\-_]+)['"]/, name: 'Google OAuth Token' },
    ];
    
    for (const { pattern, name } of patterns) {
      const matches = code.match(pattern);
      if (matches) {
        issues.push({
          type: 'high',
          description: `Hardcoded ${name} found in code`,
          file: 'current',
          suggestion: `Store ${name} in environment variables or secure configuration`,
        });
      }
    }
    
    return issues;
  }
  
  private static checkSqlInjection(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      /query\s*\(\s*['"]\s*SELECT\s.*?\+.*?['"]\s*\)/i,
      /execute\s*\(\s*['"]\s*SELECT\s.*?\+.*?['"]\s*\)/i,
      /sql\s*=\s*['"]\s*SELECT\s.*?\+.*?['"]/i,
    ];
    
    for (const pattern of patterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'high',
          description: 'Potential SQL injection vulnerability',
          file: 'current',
          suggestion: 'Use parameterized queries or prepared statements',
        });
      }
    }
    
    return issues;
  }
  
  private static checkXssVulnerabilities(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      /dangerouslySetInnerHTML/,
      /innerHTML\s*=\s*.*?\+/,
      /document\.write\s*\(/,
      /eval\s*\(/,
    ];
    
    for (const pattern of patterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'medium',
          description: 'Potential XSS vulnerability',
          file: 'current',
          suggestion: 'Sanitize user input and use safe DOM manipulation methods',
        });
      }
    }
    
    return issues;
  }
  
  private static checkCommandInjection(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      /exec\s*\(\s*.*?\+.*?\)/,
      /system\s*\(\s*.*?\+.*?\)/,
      /spawn\s*\(\s*.*?\+.*?\)/,
      /execSync\s*\(\s*.*?\+.*?\)/,
    ];
    
    for (const pattern of patterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'high',
          description: 'Potential command injection vulnerability',
          file: 'current',
          suggestion: 'Validate and sanitize input before passing to system commands',
        });
      }
    }
    
    return issues;
  }
  
  private static checkInsecureRandomness(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      /Math\.random\(\)/,
      /random\.random\(\)/,
      /rand\(\)/,
    ];
    
    for (const pattern of patterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'medium',
          description: 'Insecure random number generation',
          file: 'current',
          suggestion: 'Use cryptographically secure random number generators',
        });
      }
    }
    
    return issues;
  }
  
  private static checkEvalUsage(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    if (/eval\s*\(/.test(code)) {
      issues.push({
        type: 'high',
        description: 'Use of eval() function',
        file: 'current',
        suggestion: 'Avoid eval() and use safer alternatives like JSON.parse() or Function constructor',
      });
    }
    
    return issues;
  }
  
  private static checkInnerHtml(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    if (/innerHTML\s*=/.test(code)) {
      issues.push({
        type: 'medium',
        description: 'Direct use of innerHTML',
        file: 'current',
        suggestion: 'Use textContent or sanitize HTML content',
      });
    }
    
    return issues;
  }
  
  private static checkUnsafeRegex(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    // Check for potential ReDoS patterns
    const redosPatterns = [
      /\(\.\*\)\+/,
      /\(\.\+\)\+/,
      /\(\.\*\)\*/,
      /\(\.\+\)\*/,
    ];
    
    for (const pattern of redosPatterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'medium',
          description: 'Potential ReDoS (Regular Expression Denial of Service) vulnerability',
          file: 'current',
          suggestion: 'Review regex patterns for potential catastrophic backtracking',
        });
      }
    }
    
    return issues;
  }
  
  private static checkPythonEval(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      /eval\s*\(/,
      /exec\s*\(/,
      /compile\s*\(/,
    ];
    
    for (const pattern of patterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'high',
          description: 'Use of dangerous Python eval/exec functions',
          file: 'current',
          suggestion: 'Use safer alternatives like ast.literal_eval() or avoid dynamic code execution',
        });
      }
    }
    
    return issues;
  }
  
  private static checkPickleUsage(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    if (/pickle\.loads?\s*\(/.test(code)) {
      issues.push({
        type: 'high',
        description: 'Use of pickle.load() or pickle.loads()',
        file: 'current',
        suggestion: 'Avoid pickle for untrusted data, use JSON or other safe serialization formats',
      });
    }
    
    return issues;
  }
  
  private static checkShellCommand(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    const patterns = [
      /subprocess\.call\s*\(\s*.*shell\s*=\s*True/,
      /os\.system\s*\(/,
      /subprocess\.run\s*\(\s*.*shell\s*=\s*True/,
    ];
    
    for (const pattern of patterns) {
      if (pattern.test(code)) {
        issues.push({
          type: 'high',
          description: 'Use of shell commands with potential injection risk',
          file: 'current',
          suggestion: 'Use subprocess without shell=True or validate input thoroughly',
        });
      }
    }
    
    return issues;
  }
  
  private static checkJavaDeserialization(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    if (/ObjectInputStream|readObject\s*\(/.test(code)) {
      issues.push({
        type: 'high',
        description: 'Java deserialization vulnerability',
        file: 'current',
        suggestion: 'Validate serialized data or use safer serialization methods',
      });
    }
    
    return issues;
  }
  
  private static checkRuntimeExec(code: string): SecurityIssue[] {
    const issues: SecurityIssue[] = [];
    
    if (/Runtime\.getRuntime\(\)\.exec\s*\(/.test(code)) {
      issues.push({
        type: 'high',
        description: 'Use of Runtime.exec() for command execution',
        file: 'current',
        suggestion: 'Use ProcessBuilder with proper input validation',
      });
    }
    
    return issues;
  }
  
  private static generateRecommendations(issues: SecurityIssue[]): string[] {
    const recommendations: string[] = [];
    
    if (issues.length === 0) {
      recommendations.push('No security issues detected');
      return recommendations;
    }
    
    const highIssues = issues.filter(issue => issue.type === 'high');
    const mediumIssues = issues.filter(issue => issue.type === 'medium');
    const lowIssues = issues.filter(issue => issue.type === 'low');
    
    if (highIssues.length > 0) {
      recommendations.push(`Address ${highIssues.length} high-priority security issues immediately`);
    }
    
    if (mediumIssues.length > 0) {
      recommendations.push(`Review ${mediumIssues.length} medium-priority security issues`);
    }
    
    if (lowIssues.length > 0) {
      recommendations.push(`Consider addressing ${lowIssues.length} low-priority security issues`);
    }
    
    recommendations.push('Implement security linting in your CI/CD pipeline');
    recommendations.push('Regular security audits and dependency updates');
    recommendations.push('Use environment variables for sensitive configuration');
    
    return recommendations;
  }
}
```

--------------------------------------------------------------------------------
/src/utils/file-processor.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from 'zod';
import { getConfig } from './config.js';

// File processing result interface
export interface FileProcessingResult {
  success: boolean;
  filePath: string;
  content?: string;
  metadata?: FileMetadata;
  error?: {
    code: string;
    message: string;
    details?: any;
  };
}

// File metadata interface
export interface FileMetadata {
  name: string;
  extension: string;
  size: number;
  type: 'text' | 'binary' | 'image' | 'archive' | 'unknown';
  mimeType?: string;
  encoding?: string;
  lineCount?: number;
  language?: string;
  lastModified?: string;
  checksum?: string;
}

// File validation schema
const FilePathSchema = z.string().refine(
  (path) => {
    // Security validation: prevent path traversal
    const normalizedPath = path.replace(/\\/g, '/');
    return !normalizedPath.includes('../') && 
           !normalizedPath.includes('..\\') &&
           !normalizedPath.startsWith('/') &&
           !normalizedPath.includes('//');
  },
  { message: 'Invalid file path: potential security risk detected' }
);

// Batch processing options
export interface BatchProcessingOptions {
  maxConcurrent: number;
  continueOnError: boolean;
  validatePaths: boolean;
  includeMetadata: boolean;
  maxFileSize: number;
  allowedExtensions?: string[];
  excludePatterns?: string[];
}

// File type detection based on extension
const FILE_TYPE_MAP: Record<string, string> = {
  // Text files
  '.txt': 'text/plain',
  '.md': 'text/markdown',
  '.json': 'application/json',
  '.yaml': 'application/yaml',
  '.yml': 'application/yaml',
  '.xml': 'application/xml',
  '.csv': 'text/csv',
  '.log': 'text/plain',
  
  // Code files
  '.js': 'text/javascript',
  '.jsx': 'text/javascript',
  '.ts': 'text/typescript',
  '.tsx': 'text/typescript',
  '.py': 'text/python',
  '.java': 'text/java',
  '.cpp': 'text/cpp',
  '.c': 'text/c',
  '.h': 'text/c',
  '.go': 'text/go',
  '.rs': 'text/rust',
  '.php': 'text/php',
  '.rb': 'text/ruby',
  '.swift': 'text/swift',
  '.kt': 'text/kotlin',
  '.scala': 'text/scala',
  '.clj': 'text/clojure',
  '.html': 'text/html',
  '.css': 'text/css',
  '.scss': 'text/scss',
  '.sass': 'text/sass',
  '.less': 'text/less',
  '.sql': 'text/sql',
  '.sh': 'text/shell',
  '.bash': 'text/shell',
  '.zsh': 'text/shell',
  '.fish': 'text/shell',
  '.ps1': 'text/powershell',
  '.r': 'text/r',
  '.m': 'text/matlab',
  '.pl': 'text/perl',
  '.lua': 'text/lua',
  '.dart': 'text/dart',
  '.elm': 'text/elm',
  '.ex': 'text/elixir',
  '.exs': 'text/elixir',
  '.erl': 'text/erlang',
  '.hrl': 'text/erlang',
  '.fs': 'text/fsharp',
  '.fsx': 'text/fsharp',
  '.ml': 'text/ocaml',
  '.mli': 'text/ocaml',
  '.hs': 'text/haskell',
  '.lhs': 'text/haskell',
  '.jl': 'text/julia',
  '.nim': 'text/nim',
  '.nims': 'text/nim',
  '.cr': 'text/crystal',
  '.d': 'text/d',
  '.zig': 'text/zig',
  '.v': 'text/v',
  '.vsh': 'text/v',
  
  // Configuration files
  '.toml': 'application/toml',
  '.ini': 'text/plain',
  '.cfg': 'text/plain',
  '.conf': 'text/plain',
  '.env': 'text/plain',
  '.properties': 'text/plain',
  
  // Documentation
  '.rst': 'text/restructuredtext',
  '.adoc': 'text/asciidoc',
  '.tex': 'text/latex',
  
  // Images
  '.jpg': 'image/jpeg',
  '.jpeg': 'image/jpeg',
  '.png': 'image/png',
  '.gif': 'image/gif',
  '.bmp': 'image/bmp',
  '.svg': 'image/svg+xml',
  '.webp': 'image/webp',
  '.ico': 'image/x-icon',
  
  // Archives
  '.zip': 'application/zip',
  '.tar': 'application/tar',
  '.gz': 'application/gzip',
  '.7z': 'application/x-7z-compressed',
  '.rar': 'application/vnd.rar',
  '.bz2': 'application/bzip2',
  '.xz': 'application/xz',
  
  // Binary
  '.exe': 'application/octet-stream',
  '.dll': 'application/octet-stream',
  '.so': 'application/octet-stream',
  '.dylib': 'application/octet-stream',
  '.bin': 'application/octet-stream',
};

// Language detection based on file extension
const LANGUAGE_MAP: Record<string, string> = {
  '.js': 'javascript',
  '.jsx': 'javascript',
  '.ts': 'typescript',
  '.tsx': 'typescript',
  '.py': 'python',
  '.java': 'java',
  '.cpp': 'cpp',
  '.c': 'c',
  '.h': 'c',
  '.go': 'go',
  '.rs': 'rust',
  '.php': 'php',
  '.rb': 'ruby',
  '.swift': 'swift',
  '.kt': 'kotlin',
  '.scala': 'scala',
  '.clj': 'clojure',
  '.html': 'html',
  '.css': 'css',
  '.scss': 'scss',
  '.sass': 'sass',
  '.less': 'less',
  '.sql': 'sql',
  '.sh': 'shell',
  '.bash': 'shell',
  '.zsh': 'shell',
  '.fish': 'shell',
  '.ps1': 'powershell',
  '.r': 'r',
  '.m': 'matlab',
  '.pl': 'perl',
  '.lua': 'lua',
  '.dart': 'dart',
  '.elm': 'elm',
  '.ex': 'elixir',
  '.exs': 'elixir',
  '.erl': 'erlang',
  '.hrl': 'erlang',
  '.fs': 'fsharp',
  '.fsx': 'fsharp',
  '.ml': 'ocaml',
  '.mli': 'ocaml',
  '.hs': 'haskell',
  '.lhs': 'haskell',
  '.jl': 'julia',
  '.nim': 'nim',
  '.nims': 'nim',
  '.cr': 'crystal',
  '.d': 'd',
  '.zig': 'zig',
  '.v': 'v',
  '.vsh': 'v',
};

// File type categorization
function categorizeFileType(mimeType: string): 'text' | 'binary' | 'image' | 'archive' | 'unknown' {
  if (mimeType.startsWith('text/') || mimeType.includes('json') || mimeType.includes('xml') || mimeType.includes('yaml')) {
    return 'text';
  }
  if (mimeType.startsWith('image/')) {
    return 'image';
  }
  if (mimeType.includes('zip') || mimeType.includes('tar') || mimeType.includes('compress')) {
    return 'archive';
  }
  if (mimeType.includes('octet-stream') || mimeType.includes('binary')) {
    return 'binary';
  }
  return 'unknown';
}

// Validate file path for security
export function validateFilePath(filePath: string): { valid: boolean; error?: string } {
  try {
    FilePathSchema.parse(filePath);
    return { valid: true };
  } catch (error) {
    return { 
      valid: false, 
      error: error instanceof z.ZodError ? error.errors[0].message : 'Invalid file path' 
    };
  }
}

// Extract file metadata
export function extractFileMetadata(filePath: string, content?: string): FileMetadata {
  const name = filePath.split('/').pop() || filePath;
  const extensionMatch = name.match(/\.[^.]+$/);
  const extension = extensionMatch ? extensionMatch[0].toLowerCase() : '';
  
  const mimeType = FILE_TYPE_MAP[extension] || 'application/octet-stream';
  const type = categorizeFileType(mimeType);
  const language = LANGUAGE_MAP[extension];
  
  const metadata: FileMetadata = {
    name,
    extension,
    size: content ? Buffer.byteLength(content, 'utf8') : 0,
    type,
    mimeType,
    language,
  };
  
  // Add line count for text files
  if (type === 'text' && content) {
    metadata.lineCount = content.split('\n').length;
    metadata.encoding = 'utf8';
  }
  
  return metadata;
}

// Process a single file with error handling
export async function processSingleFile(
  filePath: string,
  content: string,
  options: Partial<BatchProcessingOptions> = {}
): Promise<FileProcessingResult> {
  const config = getConfig();
  
  try {
    // Validate file path if requested
    if (options.validatePaths !== false) {
      const validation = validateFilePath(filePath);
      if (!validation.valid) {
        return {
          success: false,
          filePath,
          error: {
            code: 'INVALID_FILE_PATH',
            message: validation.error || 'Invalid file path',
          },
        };
      }
    }
    
    // Check file size
    const fileSize = Buffer.byteLength(content, 'utf8');
    const maxSize = options.maxFileSize || config.limits.maxFileSize;
    
    if (fileSize > maxSize) {
      return {
        success: false,
        filePath,
        error: {
          code: 'FILE_TOO_LARGE',
          message: `File size (${fileSize} bytes) exceeds maximum allowed size (${maxSize} bytes)`,
          details: { fileSize, maxSize },
        },
      };
    }
    
    // Check allowed extensions
    if (options.allowedExtensions && options.allowedExtensions.length > 0) {
      const metadata = extractFileMetadata(filePath);
      if (!options.allowedExtensions.includes(metadata.extension)) {
        return {
          success: false,
          filePath,
          error: {
            code: 'EXTENSION_NOT_ALLOWED',
            message: `File extension '${metadata.extension}' is not allowed`,
            details: { extension: metadata.extension, allowedExtensions: options.allowedExtensions },
          },
        };
      }
    }
    
    // Check exclude patterns
    if (options.excludePatterns && options.excludePatterns.length > 0) {
      const shouldExclude = options.excludePatterns.some(pattern => {
        const regex = new RegExp(pattern);
        return regex.test(filePath);
      });
      
      if (shouldExclude) {
        return {
          success: false,
          filePath,
          error: {
            code: 'FILE_EXCLUDED',
            message: `File matches exclude pattern`,
            details: { filePath, excludePatterns: options.excludePatterns },
          },
        };
      }
    }
    
    // Extract metadata if requested
    const metadata = options.includeMetadata ? extractFileMetadata(filePath, content) : undefined;
    
    return {
      success: true,
      filePath,
      content,
      metadata,
    };
    
  } catch (error) {
    return {
      success: false,
      filePath,
      error: {
        code: 'PROCESSING_ERROR',
        message: error instanceof Error ? error.message : 'Unknown processing error',
        details: error,
      },
    };
  }
}

// Batch process multiple files
export async function batchProcessFiles(
  files: Array<{ path: string; content: string }>,
  options: Partial<BatchProcessingOptions> = {}
): Promise<{
  results: FileProcessingResult[];
  summary: {
    total: number;
    successful: number;
    failed: number;
    errors: Array<{ filePath: string; error: string }>;
  };
}> {
  const config = getConfig();
  const maxConcurrent = options.maxConcurrent || config.limits.maxConcurrentRequests;
  const continueOnError = options.continueOnError !== false;
  
  const results: FileProcessingResult[] = [];
  const errors: Array<{ filePath: string; error: string }> = [];
  
  // Process files in batches to respect concurrency limits
  for (let i = 0; i < files.length; i += maxConcurrent) {
    const batch = files.slice(i, i + maxConcurrent);
    
    const batchPromises = batch.map(async (file) => {
      try {
        const result = await processSingleFile(file.path, file.content, options);
        
        if (!result.success && result.error) {
          errors.push({
            filePath: file.path,
            error: result.error.message,
          });
          
          if (!continueOnError) {
            throw new Error(`Processing failed for ${file.path}: ${result.error.message}`);
          }
        }
        
        return result;
      } catch (error) {
        const errorResult: FileProcessingResult = {
          success: false,
          filePath: file.path,
          error: {
            code: 'BATCH_PROCESSING_ERROR',
            message: error instanceof Error ? error.message : 'Unknown batch processing error',
            details: error,
          },
        };
        
        errors.push({
          filePath: file.path,
          error: errorResult.error!.message,
        });
        
        if (!continueOnError) {
          throw error;
        }
        
        return errorResult;
      }
    });
    
    const batchResults = await Promise.all(batchPromises);
    results.push(...batchResults);
  }
  
  const successful = results.filter(r => r.success).length;
  const failed = results.filter(r => !r.success).length;
  
  return {
    results,
    summary: {
      total: files.length,
      successful,
      failed,
      errors,
    },
  };
}

// Helper function to filter files by extension
export function filterFilesByExtension(
  files: Array<{ path: string; content: string }>,
  allowedExtensions: string[]
): Array<{ path: string; content: string }> {
  return files.filter(file => {
    const metadata = extractFileMetadata(file.path);
    return allowedExtensions.includes(metadata.extension);
  });
}

// Helper function to get file statistics
export function getFileStatistics(results: FileProcessingResult[]): {
  totalSize: number;
  totalLines: number;
  languageDistribution: Record<string, number>;
  typeDistribution: Record<string, number>;
} {
  let totalSize = 0;
  let totalLines = 0;
  const languageDistribution: Record<string, number> = {};
  const typeDistribution: Record<string, number> = {};
  
  results.forEach(result => {
    if (result.success && result.metadata) {
      totalSize += result.metadata.size;
      totalLines += result.metadata.lineCount || 0;
      
      if (result.metadata.language) {
        languageDistribution[result.metadata.language] = (languageDistribution[result.metadata.language] || 0) + 1;
      }
      
      typeDistribution[result.metadata.type] = (typeDistribution[result.metadata.type] || 0) + 1;
    }
  });
  
  return {
    totalSize,
    totalLines,
    languageDistribution,
    typeDistribution,
  };
}
```

--------------------------------------------------------------------------------
/docs/SETUP.md:
--------------------------------------------------------------------------------

```markdown
# CodeCompass MCP Setup Guide

## 🚀 **Quick Start**

### **Option 1: Docker Deployment (Recommended)**
```bash
# Clone the repository
git clone https://github.com/your-org/codecompass-mcp.git
cd codecompass-mcp

# Set up environment variables
cp .env.example .env
# Edit .env with your API keys

# Build and run with Docker
./scripts/docker-build.sh
./scripts/docker-run.sh --env-file .env
```

### **Option 2: Local Development**
```bash
# Install dependencies
npm install

# Set up environment
export GITHUB_TOKEN=your_github_token
export OPENROUTER_API_KEY=your_openrouter_key

# Build and run
npm run build
npm run dev
```

### **Option 3: Global Installation**
```bash
# Install globally for system-wide access
npm install -g codecompass-mcp

# Run from anywhere
codecompass-mcp --help
```

## 🔧 **Configuration**

### **Required Environment Variables**
```bash
# GitHub API access (required for repository analysis)
GITHUB_TOKEN=ghp_your_github_token_here

# OpenRouter API access (required for AI-powered tools)
OPENROUTER_API_KEY=sk-or-v1-your_openrouter_key_here
```

### **Optional Configuration**
```bash
# AI Model Configuration
OPENAI_MODEL=anthropic/claude-3.5-sonnet    # Default AI model
OPENROUTER_API_URL=https://openrouter.ai/api/v1  # Custom API endpoint

# Response Management
MAX_RESPONSE_TOKENS=25000                    # Maximum response size
MAX_FILE_CONTENT_LENGTH=5000                 # Maximum file content per response
CHUNK_SIZE=medium                            # Chunking strategy (small/medium/large)

# Performance Tuning
MAX_CONCURRENT_REQUESTS=10                   # Concurrent processing limit
MAX_FILE_SIZE=10485760                       # Maximum file size (10MB)
RATE_LIMIT_REQUESTS=1000                     # Rate limit per window
RATE_LIMIT_WINDOW=3600000                    # Rate limit window (1 hour)

# Logging Configuration
LOG_LEVEL=info                               # Logging level (debug/info/warn/error)
NODE_ENV=production                          # Environment mode
```

### **Configuration File (Optional)**
Create a `config.json` file for persistent configuration:
```json
{
  "github": {
    "token": "your_github_token",
    "apiUrl": "https://api.github.com"
  },
  "openrouter": {
    "apiKey": "your_openrouter_key",
    "defaultModel": "anthropic/claude-3.5-sonnet"
  },
  "response": {
    "maxTokens": 25000,
    "maxFileContentLength": 5000,
    "chunkSizes": {
      "small": { "filesPerChunk": 5, "fileContent": 1000 },
      "medium": { "filesPerChunk": 10, "fileContent": 2000 },
      "large": { "filesPerChunk": 20, "fileContent": 5000 }
    }
  },
  "logging": {
    "level": "info",
    "enableTimestamps": true,
    "enableColors": true
  }
}
```

## 🔑 **API Key Setup**

### **GitHub Token**
1. Go to [GitHub Settings → Developer settings → Personal access tokens](https://github.com/settings/tokens)
2. Click "Generate new token (classic)"
3. Set expiration and select scopes:
   - `repo` (for private repositories)
   - `public_repo` (for public repositories)
   - `read:org` (for organization repositories)
4. Copy the token and set as `GITHUB_TOKEN`

**Rate Limits:**
- **Authenticated**: 5,000 requests/hour
- **Unauthenticated**: 60 requests/hour

### **OpenRouter API Key**
1. Go to [OpenRouter](https://openrouter.ai/)
2. Sign up/login and go to [API Keys](https://openrouter.ai/keys)
3. Create a new API key
4. Copy the key and set as `OPENROUTER_API_KEY`

**Available Models:**
- `anthropic/claude-3.5-sonnet` (recommended)
- `openai/gpt-4` 
- `google/gemini-pro`
- `switchpoint/router` (automatic model selection)

## 🐳 **Docker Deployment**

### **Docker Compose (Recommended)**
```yaml
# docker-compose.yml
version: '3.8'
services:
  codecompass-mcp:
    build: .
    container_name: codecompass-mcp
    restart: unless-stopped
    environment:
      - GITHUB_TOKEN=${GITHUB_TOKEN}
      - OPENROUTER_API_KEY=${OPENROUTER_API_KEY}
      - NODE_ENV=production
      - LOG_LEVEL=info
    ports:
      - "3000:3000"
    volumes:
      - ./data:/app/data:ro
    healthcheck:
      test: ["CMD", "node", "-e", "console.log('Health check')"]
      interval: 30s
      timeout: 10s
      retries: 3
```

### **Docker Build Options**
```bash
# Production build
./scripts/docker-build.sh

# Development build with hot reload
./scripts/docker-build.sh --dev

# Custom tag and registry
./scripts/docker-build.sh -t v1.0.0 -r your-registry.com --push

# With build arguments
./scripts/docker-build.sh --build-arg NODE_ENV=production
```

### **Docker Run Options**
```bash
# Basic run
./scripts/docker-run.sh

# With environment file
./scripts/docker-run.sh --env-file .env

# Interactive mode
./scripts/docker-run.sh --interactive

# With custom configuration
./scripts/docker-run.sh \
  -e GITHUB_TOKEN=your_token \
  -e OPENROUTER_API_KEY=your_key \
  -e LOG_LEVEL=debug
```

## 🖥️ **MCP Client Integration**

### **Claude Desktop Integration**
Add to your Claude Desktop configuration:
```json
{
  "mcpServers": {
    "codecompass": {
      "command": "docker",
      "args": [
        "exec", "-i", "codecompass-mcp",
        "node", "build/index.js"
      ],
      "env": {
        "GITHUB_TOKEN": "your_github_token",
        "OPENROUTER_API_KEY": "your_openrouter_key"
      }
    }
  }
}
```

### **Claude Code Integration**
```bash
# Add MCP server to Claude Code
claude mcp add codecompass-docker -s user -- \
  docker exec -i codecompass-mcp node build/index.js
```

### **Other MCP Clients**
The server is compatible with any MCP client that supports the JSON-RPC protocol:
- **Cline**: VS Code extension
- **Continue**: VS Code/JetBrains extension
- **Custom clients**: Using the MCP SDK

## 🔍 **Verification**

### **Health Check**
```bash
# Test server health
curl -X POST http://localhost:3000/health \
  -H "Content-Type: application/json" \
  -d '{"name": "health_check", "arguments": {"options": {"include_metrics": true}}}'

# Or using the monitoring script
./scripts/monitor.js
```

### **Tool Testing**
```bash
# Test repository analysis
echo '{"jsonrpc": "2.0", "id": 1, "method": "tools/call", "params": {"name": "fetch_repository_data", "arguments": {"url": "https://github.com/microsoft/typescript"}}}' | \
  docker exec -i codecompass-mcp node build/index.js
```

### **Performance Testing**
```bash
# Run performance tests
npm run test:performance

# Monitor resource usage
docker stats codecompass-mcp

# Check logs
./scripts/docker-logs.sh -f --timestamps
```

## 📊 **Monitoring Setup**

### **Real-time Dashboard**
```bash
# Start monitoring dashboard
./scripts/monitor.js --watch

# Export metrics
./scripts/monitor.js --export > metrics.json

# View specific metrics
./scripts/monitor.js --export | jq '.metrics.toolUsage'
```

### **Health Monitoring**
```bash
# Comprehensive health check
{
  "name": "health_check",
  "arguments": {
    "checks": ["api-limits", "monitoring", "configuration"],
    "options": {
      "include_metrics": true,
      "include_insights": true,
      "include_logs": true
    }
  }
}
```

### **Log Analysis**
```bash
# View structured logs
docker logs codecompass-mcp | jq .

# Filter by log level
docker logs codecompass-mcp | jq 'select(.level == "ERROR")'

# Search for specific events
docker logs codecompass-mcp | jq 'select(.message | contains("Request started"))'
```

## 🛠️ **Development Setup**

### **Local Development**
```bash
# Clone and setup
git clone https://github.com/your-org/codecompass-mcp.git
cd codecompass-mcp

# Install dependencies
npm install

# Set up environment
cp .env.example .env
# Edit .env with your configuration

# Start development server
npm run dev

# Run tests
npm run test

# Type checking
npm run type-check

# Linting
npm run lint
```

### **Hot Reload Development**
```bash
# Start with hot reload
npm run dev:watch

# Or with Docker
./scripts/docker-build.sh --dev
./scripts/docker-run.sh --interactive -v $(pwd):/app
```

### **Testing**
```bash
# Run all tests
npm test

# Run specific test suites
npm run test:unit
npm run test:integration
npm run test:performance

# Test coverage
npm run test:coverage
```

## 🔧 **Troubleshooting**

### **Common Error Messages & Solutions**

#### **1. "API key missing" or "Neither GITHUB_TOKEN nor OPENROUTER_API_KEY is set"**
**Cause**: Environment variables not properly configured.

**Solution**:
```bash
# Check if your .env file exists and has content
cat .env

# Verify tokens are set correctly
echo $GITHUB_TOKEN | cut -c1-10  # Should show: ghp_xxxxxx
echo $OPENROUTER_API_KEY | cut -c1-10  # Should show: sk-or-v1-x

# Fix: Edit your .env file with real tokens
nano .env
```

**Expected .env content:**
```bash
GITHUB_TOKEN=ghp_your_actual_40_character_token_here
OPENROUTER_API_KEY=sk-or-v1-your_actual_key_here
```

#### **2. "repository 'https://github.com/...' not found" (404 Error)**
**Cause**: Repository is private or doesn't exist.

**Solution**:
```bash
# Test GitHub token permissions
curl -H "Authorization: token $GITHUB_TOKEN" https://api.github.com/user

# Expected response: Your GitHub user info
# If error: Check token has 'repo' scope at github.com/settings/tokens
```

#### **3. "Container codecompass-mcp not found"**
**Cause**: Docker container not running.

**Solution**:
```bash
# Check container status
docker ps -a

# If not running, start it
./scripts/docker-run.sh --env-file .env

# If failed to start, check logs
docker logs codecompass-mcp
```

#### **4. "Permission denied" or "EACCES" errors**
**Cause**: File permissions issue.

**Solution**:
```bash
# Fix script permissions
chmod +x scripts/*.sh

# Fix Docker permissions (Linux)
sudo usermod -aG docker $USER
newgrp docker
```

#### **5. "Port already in use" or "EADDRINUSE"**
**Cause**: Port 3000 already occupied.

**Solution**:
```bash
# Check what's using port 3000
lsof -i :3000

# Kill the process or use different port
export MCP_PORT=3001
./scripts/docker-run.sh --env-file .env
```

#### **6. "Request timeout" errors**
**Cause**: Large repository or slow network.

**Solution**:
```bash
# Increase timeouts in .env
echo "REQUEST_TIMEOUT=60000" >> .env

# Use chunking for large repos
echo "CHUNK_MODE=true" >> .env
```

#### **7. "Docker build failed" or "npm install failed"**
**Cause**: Network issues or missing dependencies.

**Solution**:
```bash
# Clear Docker cache and rebuild
docker system prune -f
./scripts/docker-build.sh --no-cache

# For local development
rm -rf node_modules package-lock.json
npm install
```

### **Debug Mode**
```bash
# Enable verbose logging
export LOG_LEVEL=debug

# Run with debug output
DEBUG=codecompass:* ./scripts/docker-run.sh --env-file .env

# Check logs location
./scripts/docker-logs.sh -f --timestamps
```

### **Health Check Commands**
```bash
# Quick health check
curl -X POST http://localhost:3000/health

# Detailed health check with metrics
echo '{"jsonrpc": "2.0", "id": 1, "method": "tools/call", "params": {"name": "health_check", "arguments": {"options": {"include_metrics": true}}}}' | docker exec -i codecompass-mcp node build/index.js
```

### **Getting Help**
If you're still having issues:

1. **Check GitHub Issues**: [github.com/TheAlchemist6/codecompass-mcp/issues](https://github.com/TheAlchemist6/codecompass-mcp/issues)
2. **Include in your issue**:
   - Operating system and version
   - Docker version (`docker --version`)
   - Node.js version (`node --version`)
   - Complete error message
   - Steps to reproduce

3. **Useful diagnostic commands**:
```bash
# System info
docker --version
node --version
npm --version

# Container logs
docker logs codecompass-mcp --tail 50

# Environment check
env | grep -E "(GITHUB|OPENROUTER|MCP)"
```

### **Debug Mode**
```bash
# Enable debug logging
export LOG_LEVEL=debug

# Run with debug output
DEBUG=codecompass:* npm run dev

# Docker debug mode
./scripts/docker-run.sh -e LOG_LEVEL=debug
```

### **Performance Optimization**
```bash
# Optimize for large repositories
export CHUNK_MODE=true
export CHUNK_SIZE=large
export MAX_CONCURRENT_REQUESTS=20

# Reduce response sizes
export MAX_RESPONSE_TOKENS=15000
export MAX_FILE_CONTENT_LENGTH=2000
```

## 🔄 **Updates and Maintenance**

### **Updating**
```bash
# Pull latest changes
git pull origin main

# Rebuild and restart
./scripts/docker-build.sh
./scripts/docker-run.sh --remove-existing
```

### **Backup Configuration**
```bash
# Backup environment configuration
cp .env .env.backup

# Export Docker configuration
docker inspect codecompass-mcp > container-config.json
```

### **Monitoring Health**
```bash
# Set up automated health checks
echo "*/5 * * * * curl -f http://localhost:3000/health || echo 'Health check failed'" | crontab -

# Monitor logs for errors
tail -f $(docker inspect codecompass-mcp | jq -r '.[0].LogPath') | grep ERROR
```

## 🎯 **Next Steps**

1. **Test the setup** with a small repository
2. **Configure monitoring** for your environment
3. **Integrate with your MCP client** (Claude Desktop, VS Code, etc.)
4. **Customize configuration** for your specific needs
5. **Set up automated backups** for important data

## 📚 **Additional Resources**

- [API Documentation](API.md) - Complete tool reference
- [Docker Guide](DOCKER.md) - Advanced Docker configuration
- [Monitoring Guide](MONITORING.md) - Observability setup
- [Contributing Guide](../CONTRIBUTING.md) - Development guidelines
- [Examples](../examples/) - Usage examples and templates

For support, please check the [Issues](https://github.com/your-org/codecompass-mcp/issues) page or create a new issue with detailed information about your setup and the problem you're experiencing.
```

--------------------------------------------------------------------------------
/examples/basic-usage.md:
--------------------------------------------------------------------------------

```markdown
# CodeCompass MCP Usage Examples

This document provides practical examples of how to use the CodeCompass MCP server's 18 tools.

## 🔍 Core Data Tools

### 1. Repository Analysis

```javascript
// Basic repository analysis
{
  "tool": "fetch_repository_data",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "options": {
      "include_structure": true,
      "include_dependencies": true,
      "include_key_files": true,
      "max_files": 50
    }
  }
}

// For large repositories, control response size
{
  "tool": "fetch_repository_data",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "options": {
      "include_structure": true,
      "include_dependencies": true,
      "include_key_files": true,
      "max_files": 30,
      "max_response_tokens": 15000,
      "max_file_content_length": 500
    }
  }
}

// For large repositories, use chunking (recommended)
{
  "tool": "fetch_repository_data",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "options": {
      "chunk_mode": true,
      "chunk_index": 0,
      "chunk_size": "medium"
    }
  }
}

// Get next chunk
{
  "tool": "fetch_repository_data",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "options": {
      "chunk_mode": true,
      "chunk_index": 1,
      "chunk_size": "medium"
    }
  }
}

// Expected response (regular mode):
{
  "success": true,
  "data": {
    "info": {
      "name": "react",
      "description": "The library for web and native user interfaces",
      "language": "JavaScript",
      "stars": 220000,
      "forks": 45000
    },
    "structure": {
      "fileCount": 850,
      "lineCount": 125000,
      "keyFiles": {
        "package.json": "...",
        "README.md": "..."
      }
    },
    "dependencies": {
      "production": 5,
      "development": 45
    }
  }
}

// Expected response (chunked mode):
{
  "success": true,
  "data": {
    "info": { /* repository info */ },
    "structure": {
      "fileCount": 850,
      "lineCount": 125000,
      "keyFiles": {
        "package.json": "...",
        "README.md": "...",
        "src/index.js": "..."
        // Only 20 files in this chunk
      }
    },
    "dependencies": { /* dependency analysis */ },
    "architecture": { /* architecture analysis */ },
    "chunkInfo": {
      "chunkIndex": 0,
      "chunkSize": "medium",
      "totalFiles": 85,
      "totalChunks": 5,
      "filesInChunk": 20,
      "hasMore": true,
      "nextChunkIndex": 1
    }
  }
}
```

### 2. Advanced Repository Search

```javascript
// Search for React hooks
{
  "tool": "search_repository",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "query": "useState",
    "search_type": "function",
    "options": {
      "file_extensions": [".js", ".jsx", ".ts", ".tsx"],
      "include_context": true,
      "max_results": 20
    }
  }
}

// Search with regex pattern
{
  "tool": "search_repository",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "query": "use[A-Z]\\w+",
    "search_type": "regex",
    "options": {
      "case_sensitive": false,
      "include_context": true
    }
  }
}
```

### 3. File Content Retrieval

```javascript
// Basic file retrieval
{
  "tool": "get_file_content",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "file_paths": [
      "package.json",
      "README.md",
      "packages/react/src/React.js"
    ],
    "options": {
      "max_size": 50000,
      "include_metadata": true
    }
  }
}

// Advanced batch processing with filtering
{
  "tool": "get_file_content",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "file_paths": [
      "src/index.js",
      "src/components/Button.js",
      "src/utils/helpers.js",
      "test/Button.test.js"
    ],
    "options": {
      "max_concurrent": 10,
      "continue_on_error": true,
      "include_metadata": true,
      "file_extensions": [".js", ".jsx", ".ts", ".tsx"],
      "exclude_patterns": ["node_modules", "\\.test\\.", "\\.spec\\."]
    }
  }
}

// Expected response with rich metadata:
{
  "success": true,
  "data": {
    "files": {
      "src/index.js": {
        "content": "import React from 'react'...",
        "metadata": {
          "name": "index.js",
          "extension": ".js",
          "size": 1024,
          "type": "text",
          "mimeType": "text/javascript",
          "language": "javascript",
          "lineCount": 42,
          "encoding": "utf8"
        },
        "size": 1024,
        "truncated": false
      }
    },
    "summary": {
      "total": 4,
      "successful": 3,
      "failed": 1,
      "fetchErrors": 0,
      "statistics": {
        "totalSize": 3072,
        "totalLines": 126,
        "languageDistribution": {
          "javascript": 3
        },
        "typeDistribution": {
          "text": 3
        }
      }
    }
  }
}
```

### 4. Code Structure Analysis

```javascript
// Analyze code structure
{
  "tool": "analyze_code_structure",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "options": {
      "include_functions": true,
      "include_classes": true,
      "include_complexity": true,
      "languages": ["javascript", "typescript"]
    }
  }
}
```

## 🤖 AI-Enhanced Tools

### 1. AI Code Review

```javascript
// Comprehensive AI code review
{
  "tool": "ai_code_review",
  "arguments": {
    "url": "https://github.com/your-username/your-repo",
    "review_focus": ["security", "performance", "maintainability"],
    "options": {
      "ai_model": "auto",
      "severity_threshold": "medium",
      "include_examples": true
    }
  }
}

// Review specific files
{
  "tool": "ai_code_review",
  "arguments": {
    "url": "https://github.com/your-username/your-repo",
    "file_paths": ["src/auth.js", "src/api.js"],
    "review_focus": ["security"],
    "options": {
      "ai_model": "anthropic/claude-3.5-sonnet",
      "severity_threshold": "high"
    }
  }
}
```

### 2. AI Code Explanation

```javascript
// Get overview explanation
{
  "tool": "ai_explain_code",
  "arguments": {
    "url": "https://github.com/sindresorhus/is-online",
    "explanation_type": "overview",
    "options": {
      "ai_model": "auto",
      "target_audience": "intermediate",
      "include_examples": true,
      "include_diagrams": true
    }
  }
}

// Detailed architectural explanation
{
  "tool": "ai_explain_code",
  "arguments": {
    "url": "https://github.com/facebook/react",
    "explanation_type": "architecture",
    "options": {
      "ai_model": "anthropic/claude-3-opus",
      "target_audience": "advanced",
      "focus_on_patterns": true
    }
  }
}

// Tutorial-style explanation
{
  "tool": "ai_explain_code",
  "arguments": {
    "url": "https://github.com/simple-example/todo-app",
    "explanation_type": "tutorial",
    "options": {
      "target_audience": "beginner",
      "include_examples": true
    }
  }
}
```

### 3. AI Refactoring Suggestions

```javascript
// Modernization suggestions
{
  "tool": "ai_refactor_suggestions",
  "arguments": {
    "url": "https://github.com/legacy-app/old-codebase",
    "refactoring_goals": ["modernize", "performance", "maintainability"],
    "options": {
      "ai_model": "auto",
      "include_code_examples": true,
      "estimate_effort": true
    }
  }
}

// Framework migration suggestions
{
  "tool": "ai_refactor_suggestions",
  "arguments": {
    "url": "https://github.com/jquery-app/legacy-frontend",
    "refactoring_goals": ["modernize"],
    "target_framework": "react",
    "options": {
      "ai_model": "anthropic/claude-3.5-sonnet",
      "priority_level": "high"
    }
  }
}
```

## 🔧 Code Transformation Tools

### 1. Code Transformation

```javascript
// Modernize JavaScript code
{
  "tool": "transform_code",
  "arguments": {
    "code": "var userName = 'John'; function greet() { return 'Hello ' + userName; }",
    "transformations": [
      {
        "type": "modernize",
        "options": {
          "target_es_version": "ES2020"
        }
      }
    ],
    "language": "javascript",
    "options": {
      "preserve_comments": true,
      "validate_syntax": true
    }
  }
}

// Convert to TypeScript
{
  "tool": "transform_code",
  "arguments": {
    "code": "function calculateTotal(items) { return items.reduce((sum, item) => sum + item.price, 0); }",
    "transformations": [
      {
        "type": "modernize"
      }
    ],
    "language": "javascript",
    "target_language": "typescript"
  }
}
```

### 2. Component Extraction

```javascript
// Extract React components
{
  "tool": "extract_components",
  "arguments": {
    "url": "https://github.com/react-app/components",
    "extraction_types": ["components", "hooks", "utilities"],
    "options": {
      "min_reusability_score": 75,
      "framework": "react",
      "include_examples": true
    }
  }
}

// Extract utility functions
{
  "tool": "extract_components",
  "arguments": {
    "url": "https://github.com/utils-library/helpers",
    "extraction_types": ["functions", "utilities"],
    "options": {
      "min_reusability_score": 60,
      "include_dependencies": true
    }
  }
}
```

### 3. Code Structure Adaptation

```javascript
// Migrate to React from Vue
{
  "tool": "adapt_code_structure",
  "arguments": {
    "url": "https://github.com/vue-app/frontend",
    "target_structure": {
      "framework": "react",
      "pattern": "mvc"
    },
    "options": {
      "preserve_logic": true,
      "update_imports": true,
      "generate_config": true
    }
  }
}

// Restructure for microservices
{
  "tool": "adapt_code_structure",
  "arguments": {
    "url": "https://github.com/monolith-app/backend",
    "target_structure": {
      "pattern": "microservices",
      "folder_structure": {
        "services/": "Individual service modules",
        "shared/": "Shared utilities and types",
        "gateway/": "API gateway configuration"
      }
    }
  }
}
```

## 📊 Analysis Tools

### 1. Architecture Analysis

```javascript
// Analyze architectural patterns
{
  "tool": "analyze_architecture",
  "arguments": {
    "url": "https://github.com/enterprise-app/backend",
    "options": {
      "pattern_types": ["mvc", "clean", "hexagonal"],
      "include_frameworks": true,
      "confidence_threshold": 0.8
    }
  }
}
```

### 2. Implementation Comparison

```javascript
// Compare different implementations
{
  "tool": "compare_implementations",
  "arguments": {
    "implementations": [
      {
        "name": "React Implementation",
        "url": "https://github.com/team-a/react-solution",
        "focus_areas": ["performance", "maintainability"]
      },
      {
        "name": "Vue Implementation",
        "url": "https://github.com/team-b/vue-solution",
        "focus_areas": ["performance", "maintainability"]
      }
    ],
    "comparison_criteria": ["performance", "maintainability", "security", "complexity"],
    "options": {
      "include_metrics": true,
      "include_recommendations": true
    }
  }
}
```

### 3. Code Quality Validation

```javascript
// Comprehensive quality check
{
  "tool": "validate_code_quality",
  "arguments": {
    "url": "https://github.com/production-app/codebase",
    "validation_types": ["security", "performance", "best-practices"],
    "options": {
      "severity_level": "medium",
      "include_fixes": true,
      "framework_specific": true
    }
  }
}
```

## 🔄 Utility Tools

### 1. Batch Processing

```javascript
// Process multiple operations
{
  "tool": "batch_process",
  "arguments": {
    "operations": [
      {
        "id": "health",
        "tool": "health_check",
        "params": {
          "checks": ["api-limits", "system-health"]
        },
        "priority": 1
      },
      {
        "id": "analyze",
        "tool": "analyze_code_structure",
        "params": {
          "url": "https://github.com/example/repo"
        },
        "priority": 2
      },
      {
        "id": "metrics",
        "tool": "calculate_metrics",
        "params": {
          "url": "https://github.com/example/repo"
        },
        "priority": 3
      }
    ],
    "options": {
      "max_concurrent": 2,
      "fail_fast": false
    }
  }
}
```

### 2. Health Check

```javascript
// Basic health check
{
  "tool": "health_check",
  "arguments": {
    "checks": ["api-limits", "system-health"],
    "options": {
      "include_metrics": true
    }
  }
}

// Comprehensive health check
{
  "tool": "health_check",
  "arguments": {
    "checks": ["api-limits", "cache-status", "system-health", "dependencies"],
    "options": {
      "include_metrics": true,
      "include_diagnostics": true
    }
  }
}
```

## 📈 Quality Metrics

### 1. Calculate Metrics

```javascript
// Basic quality metrics
{
  "tool": "calculate_metrics",
  "arguments": {
    "url": "https://github.com/example/repo",
    "options": {
      "metrics": ["complexity", "maintainability", "security"],
      "include_file_level": true
    }
  }
}

// Comprehensive metrics with trends
{
  "tool": "calculate_metrics",
  "arguments": {
    "url": "https://github.com/example/repo",
    "options": {
      "metrics": ["complexity", "maintainability", "duplication", "security"],
      "include_file_level": true,
      "include_trend_analysis": true
    }
  }
}
```

### 2. Dependency Analysis

```javascript
// Security-focused dependency analysis
{
  "tool": "analyze_dependencies",
  "arguments": {
    "url": "https://github.com/production-app/backend",
    "options": {
      "include_security_scan": true,
      "include_version_analysis": true,
      "check_outdated": true
    }
  }
}
```

## 🔄 Workflow Examples

### 1. Complete Repository Analysis

```javascript
// Step 1: Get repository overview
{
  "tool": "fetch_repository_data",
  "arguments": {
    "url": "https://github.com/new-project/analysis-target"
  }
}

// Step 2: Analyze code structure
{
  "tool": "analyze_code_structure",
  "arguments": {
    "url": "https://github.com/new-project/analysis-target",
    "options": {
      "include_complexity": true
    }
  }
}

// Step 3: Check dependencies
{
  "tool": "analyze_dependencies",
  "arguments": {
    "url": "https://github.com/new-project/analysis-target",
    "options": {
      "include_security_scan": true
    }
  }
}

// Step 4: Get AI insights
{
  "tool": "ai_explain_code",
  "arguments": {
    "url": "https://github.com/new-project/analysis-target",
    "explanation_type": "overview",
    "options": {
      "ai_model": "auto"
    }
  }
}
```

### 2. Code Review Workflow

```javascript
// Step 1: Quality validation
{
  "tool": "validate_code_quality",
  "arguments": {
    "url": "https://github.com/team/pull-request-branch",
    "validation_types": ["security", "performance", "best-practices"]
  }
}

// Step 2: AI code review
{
  "tool": "ai_code_review",
  "arguments": {
    "url": "https://github.com/team/pull-request-branch",
    "review_focus": ["security", "performance", "maintainability"],
    "options": {
      "ai_model": "anthropic/claude-3.5-sonnet",
      "severity_threshold": "medium"
    }
  }
}

// Step 3: Calculate metrics
{
  "tool": "calculate_metrics",
  "arguments": {
    "url": "https://github.com/team/pull-request-branch",
    "options": {
      "metrics": ["complexity", "maintainability"]
    }
  }
}
```

### 3. Refactoring Planning

```javascript
// Step 1: Analyze current architecture
{
  "tool": "analyze_architecture",
  "arguments": {
    "url": "https://github.com/legacy-app/monolith"
  }
}

// Step 2: Get AI refactoring suggestions
{
  "tool": "ai_refactor_suggestions",
  "arguments": {
    "url": "https://github.com/legacy-app/monolith",
    "refactoring_goals": ["modernize", "performance", "maintainability"],
    "options": {
      "ai_model": "auto",
      "estimate_effort": true
    }
  }
}

// Step 3: Plan structure adaptation
{
  "tool": "adapt_code_structure",
  "arguments": {
    "url": "https://github.com/legacy-app/monolith",
    "target_structure": {
      "framework": "react",
      "pattern": "clean"
    }
  }
}
```

## 🎯 Best Practices

### 1. Model Selection

```javascript
// Use auto for most cases
{
  "options": {
    "ai_model": "auto"
  }
}

// Use specific models for specialized tasks
{
  "options": {
    "ai_model": "anthropic/claude-3-opus"  // For complex analysis
  }
}

// Cost-effective for batch operations
{
  "options": {
    "ai_model": "openai/gpt-4o-mini"  // For large-scale processing
  }
}
```

### 2. Rate Limit Management

```javascript
// Check limits before large operations
{
  "tool": "health_check",
  "arguments": {
    "checks": ["api-limits"]
  }
}

// Use batch processing for multiple operations
{
  "tool": "batch_process",
  "arguments": {
    "operations": [/* multiple operations */],
    "options": {
      "max_concurrent": 2  // Respect rate limits
    }
  }
}
```

### 3. Chunking Best Practices

```javascript
// Function to process all chunks
async function processRepositoryInChunks(url, chunkSize = 'medium') {
  let chunkIndex = 0;
  let hasMore = true;
  const allFiles = {};
  
  while (hasMore) {
    const response = await callTool('fetch_repository_data', {
      url,
      options: {
        chunk_mode: true,
        chunk_index: chunkIndex,
        chunk_size: chunkSize
      }
    });
    
    if (response.success) {
      // Merge files from this chunk
      Object.assign(allFiles, response.data.structure.keyFiles);
      
      // Check if there are more chunks
      hasMore = response.data.chunkInfo.hasMore;
      chunkIndex = response.data.chunkInfo.nextChunkIndex;
    } else {
      break;
    }
  }
  
  return allFiles;
}

// Usage
const allFiles = await processRepositoryInChunks('https://github.com/facebook/react');
```

### 4. Error Handling

```javascript
// Always check response success
const response = await callTool('fetch_repository_data', params);
if (!response.success) {
  console.error('Tool failed:', response.error);
  // Handle error appropriately
}
```

This comprehensive guide covers the main usage patterns for all 18 tools in the CodeCompass MCP server. Each example includes realistic parameters and expected response formats to help you get started quickly.
```

--------------------------------------------------------------------------------
/src/services/openai.ts:
--------------------------------------------------------------------------------

```typescript
import OpenAI from 'openai';
import { GitHubRepoInfo, ChatContext, RefactoringPlan, ArchitectureExplanation } from '../types/index.js';

export class OpenAIService {
  private openai: OpenAI;
  private currentConfig: {
    apiKey: string;
    model: string;
    systemPrompt: string;
  };
  
  // Model characteristics for intelligent selection and warnings
  private modelCharacteristics = {
    'anthropic/claude-3.5-sonnet': { speed: 'fast', cost: 'medium', quality: 'high', recommended: true },
    'anthropic/claude-3-opus': { speed: 'slow', cost: 'high', quality: 'highest', recommended: false },
    'anthropic/claude-3-haiku': { speed: 'fastest', cost: 'low', quality: 'good', recommended: true },
    'openai/gpt-4o': { speed: 'fast', cost: 'medium', quality: 'high', recommended: true },
    'openai/gpt-4o-mini': { speed: 'fastest', cost: 'low', quality: 'good', recommended: true },
    'openai/gpt-4-turbo': { speed: 'medium', cost: 'medium', quality: 'high', recommended: true },
    'openai/o1-mini': { speed: 'slow', cost: 'high', quality: 'highest', recommended: false },
    'openai/o1-preview': { speed: 'slowest', cost: 'highest', quality: 'highest', recommended: false },
    'meta-llama/llama-3.1-405b-instruct': { speed: 'medium', cost: 'high', quality: 'high', recommended: false },
  };

  constructor() {
    this.currentConfig = {
      apiKey: process.env.OPENROUTER_API_KEY || process.env.OPENAI_API_KEY || '',
      model: process.env.OPENAI_MODEL || 'anthropic/claude-3.5-sonnet',
      systemPrompt: `You are CodeCompass AI, an expert assistant specialized in code analysis, refactoring, and architectural guidance. Your primary role is to help developers understand, analyze, and refactor code from GitHub repositories for integration into their own projects.

Core Capabilities:
1. Code Analysis: Understand code structure, patterns, and dependencies
2. Refactoring Guidance: Suggest improvements and modernization strategies
3. Architecture Explanation: Explain design patterns and architectural decisions
4. Integration Support: Help adapt code for different projects and frameworks
5. Best Practices: Recommend coding standards and optimization techniques

Response Guidelines:
- Provide clear, actionable advice with code examples
- Focus on practical solutions that can be implemented
- Consider maintainability, performance, and scalability
- Explain the reasoning behind recommendations
- Suggest multiple approaches when appropriate
- Include potential risks and trade-offs

Communication Style:
- Be concise but comprehensive
- Use code examples to illustrate points
- Structure responses with clear headings
- Prioritize actionable insights over theoretical discussions`,
    };

    this.openai = new OpenAI({
      apiKey: this.currentConfig.apiKey,
      baseURL: 'https://openrouter.ai/api/v1',
      defaultHeaders: {
        'HTTP-Referer': 'https://github.com/codecompass/codecompass-mcp',
        'X-Title': 'CodeCompass MCP Server',
      },
    });
  }

  updateConfig(config: { apiKey?: string; model?: string; systemPrompt?: string }) {
    if (config.apiKey) {
      this.currentConfig.apiKey = config.apiKey;
      this.openai = new OpenAI({
        apiKey: config.apiKey,
        baseURL: 'https://openrouter.ai/api/v1',
        defaultHeaders: {
          'HTTP-Referer': 'https://github.com/codecompass/codecompass-mcp',
          'X-Title': 'CodeCompass MCP Server',
        },
      });
    }
    if (config.model) this.currentConfig.model = config.model;
    if (config.systemPrompt) this.currentConfig.systemPrompt = config.systemPrompt;
  }

  // Intelligent model selection based on task type
  private selectModel(requestedModel: string | undefined, taskType: 'review' | 'explain' | 'refactor', isBatchJob: boolean = false): string {
    if (requestedModel && requestedModel !== 'auto') {
      return requestedModel;
    }

    // Auto model selection logic
    if (isBatchJob) {
      // For batch jobs, prefer faster, cost-effective models
      return 'openai/gpt-4o-mini';
    }

    // Task-specific model selection
    switch (taskType) {
      case 'review':
        return 'anthropic/claude-3.5-sonnet'; // Good balance of quality and speed
      case 'explain':
        return 'openai/gpt-4o'; // Good for explanations
      case 'refactor':
        return 'anthropic/claude-3.5-sonnet'; // Good for strategic thinking
      default:
        return this.currentConfig.model;
    }
  }

  // Generate model warning message
  private generateModelWarning(model: string, isBatchJob: boolean = false): string | undefined {
    const characteristics = this.modelCharacteristics[model as keyof typeof this.modelCharacteristics];
    if (!characteristics) return undefined;

    const warnings = [];
    
    if (isBatchJob) {
      if (characteristics.speed === 'slow' || characteristics.speed === 'slowest') {
        warnings.push(`⚠️ Model ${model} is ${characteristics.speed} - batch job may take significant time`);
      }
      if (characteristics.cost === 'high' || characteristics.cost === 'highest') {
        warnings.push(`💰 Model ${model} has ${characteristics.cost} cost - batch job may be expensive`);
      }
    } else {
      if (characteristics.speed === 'slowest') {
        warnings.push(`⚠️ Model ${model} is very slow - expect longer response times`);
      }
      if (characteristics.cost === 'highest') {
        warnings.push(`💰 Model ${model} has highest cost - consider alternatives for frequent use`);
      }
    }

    return warnings.length > 0 ? warnings.join('\n') : undefined;
  }

  // Log model selection for audit/debug
  private logModelSelection(model: string, requestedModel: string | undefined, taskType: string, isBatchJob: boolean) {
    const timestamp = new Date().toISOString();
    const logEntry = {
      timestamp,
      taskType,
      requestedModel: requestedModel || 'auto',
      selectedModel: model,
      isBatchJob,
      characteristics: this.modelCharacteristics[model as keyof typeof this.modelCharacteristics]
    };
    
    console.log(`[MODEL_SELECTION] ${JSON.stringify(logEntry)}`);
  }

  async chatWithRepository(url: string, message: string, context?: ChatContext, model?: string, isBatchJob: boolean = false): Promise<{ content: string; modelUsed: string; warning?: string }> {
    if (!this.currentConfig.apiKey) {
      throw new Error('OpenRouter API key not configured. Please set OPENROUTER_API_KEY environment variable.');
    }

    // Select model intelligently
    const modelToUse = this.selectModel(model, 'explain', isBatchJob);
    
    // Log model selection
    this.logModelSelection(modelToUse, model, 'explain_code', isBatchJob);
    
    // Generate warning if needed
    const warning = this.generateModelWarning(modelToUse, isBatchJob);

    try {
      const messages: any[] = [
        { role: 'system', content: this.currentConfig.systemPrompt },
      ];

      // Add context if provided
      if (context) {
        const contextMessage = this.buildContextMessage(context);
        messages.push({ role: 'system', content: contextMessage });

        // Add conversation history
        if (context.conversationHistory) {
          messages.push(...context.conversationHistory.map(msg => ({
            role: msg.role,
            content: msg.content,
          })));
        }
      }

      messages.push({ role: 'user', content: message });

      const response = await this.openai.chat.completions.create({
        model: modelToUse,
        messages,
        max_tokens: 2000,
        temperature: 0.7,
      });

      const content = response.choices[0].message.content || 'I apologize, but I couldn\'t generate a response.';
      
      return {
        content,
        modelUsed: modelToUse,
        warning
      };
    } catch (error: any) {
      console.error('OpenAI API error:', error);
      throw new Error(`Failed to generate response: ${error.message}`);
    }
  }

  async suggestRefactoringPlan(url: string, targetProject: any, goals?: string[], model?: string, isBatchJob: boolean = false): Promise<{ content: string; modelUsed: string; warning?: string }> {
    if (!this.currentConfig.apiKey) {
      throw new Error('OpenRouter API key not configured. Please set OPENROUTER_API_KEY environment variable.');
    }

    // Select model intelligently
    const modelToUse = this.selectModel(model, 'refactor', isBatchJob);
    
    // Log model selection
    this.logModelSelection(modelToUse, model, 'refactor_suggestions', isBatchJob);
    
    // Generate warning if needed
    const warning = this.generateModelWarning(modelToUse, isBatchJob);

    const prompt = `
    Analyze the repository at ${url} and create a comprehensive refactoring plan.
    
    Target Project Context:
    - Framework: ${targetProject.framework}
    - Language: ${targetProject.language || 'Not specified'}
    - Constraints: ${targetProject.constraints?.join(', ') || 'None specified'}
    - Timeline: ${targetProject.timeline || 'Not specified'}
    
    Refactoring Goals:
    ${goals?.map(goal => `- ${goal}`).join('\n') || '- General modernization and improvement'}
    
    Please provide a detailed refactoring plan that includes:
    1. Executive Summary
    2. Phase-by-phase breakdown
    3. Time estimates for each phase
    4. Risk assessment
    5. Success metrics
    6. Recommended tools and resources
    
    Format the response as a structured plan with clear sections and actionable items.
    `;

    try {
      const response = await this.openai.chat.completions.create({
        model: modelToUse,
        messages: [
          { role: 'system', content: this.currentConfig.systemPrompt },
          { role: 'user', content: prompt },
        ],
        max_tokens: 1800,
        temperature: 0.7,
      });

      const content = response.choices[0].message.content || 'Failed to generate refactoring plan.';
      
      return {
        content,
        modelUsed: modelToUse,
        warning
      };
    } catch (error: any) {
      throw new Error(`Failed to generate refactoring plan: ${error.message}`);
    }
  }

  async explainArchitecture(url: string, repositoryInfo?: GitHubRepoInfo): Promise<string> {
    if (!this.currentConfig.apiKey) {
      throw new Error('OpenRouter API key not configured. Please set OPENROUTER_API_KEY environment variable.');
    }

    let contextInfo = '';
    if (repositoryInfo) {
      contextInfo = `
      Repository Information:
      - Name: ${repositoryInfo.name}
      - Description: ${repositoryInfo.description || 'No description'}
      - Primary Language: ${repositoryInfo.language || 'Unknown'}
      - File Count: ${repositoryInfo.fileCount}
      - Key Files: ${Object.keys(repositoryInfo.keyFiles).join(', ')}
      
      File Structure:
      ${JSON.stringify(repositoryInfo.fileTree, null, 2)}
      
      Key File Contents:
      ${Object.entries(repositoryInfo.keyFiles).map(([path, content]) => 
        `--- ${path} ---\n${content.substring(0, 1000)}${content.length > 1000 ? '...' : ''}`
      ).join('\n\n')}
      `;
    }

    const prompt = `
    Explain the architecture of the repository at ${url} in detail.
    
    ${contextInfo}
    
    Please provide a comprehensive architectural explanation that covers:
    1. Overall Architecture Overview
    2. Design Patterns Used
    3. Project Structure and Organization
    4. Data Flow and Components
    5. Dependencies and External Integrations
    6. Strengths and Potential Improvements
    7. Scalability Considerations
    8. Recommendations for Different Use Cases
    
    Make the explanation accessible to developers who want to understand and potentially adapt this architecture for their own projects.
    `;

    try {
      const response = await this.openai.chat.completions.create({
        model: this.currentConfig.model,
        messages: [
          { role: 'system', content: this.currentConfig.systemPrompt },
          { role: 'user', content: prompt },
        ],
        max_tokens: 1800,
        temperature: 0.7,
      });

      return response.choices[0].message.content || 'Failed to generate architecture explanation.';
    } catch (error: any) {
      throw new Error(`Failed to explain architecture: ${error.message}`);
    }
  }

  async generateCodeReview(code: string, language: string, focusAreas?: string[], model?: string, isBatchJob: boolean = false): Promise<{ content: string; modelUsed: string; warning?: string }> {
    if (!this.currentConfig.apiKey) {
      throw new Error('OpenRouter API key not configured. Please set OPENROUTER_API_KEY environment variable.');
    }

    // Select model intelligently
    const modelToUse = this.selectModel(model, 'review', isBatchJob);
    
    // Log model selection
    this.logModelSelection(modelToUse, model, 'code_review', isBatchJob);
    
    // Generate warning if needed
    const warning = this.generateModelWarning(modelToUse, isBatchJob);

    const prompt = `
    Please provide a comprehensive code review for the following ${language} code.
    
    ${focusAreas ? `Focus Areas: ${focusAreas.join(', ')}` : ''}
    
    Code to review:
    \`\`\`${language}
    ${code}
    \`\`\`
    
    Please provide feedback on:
    1. Code Quality and Best Practices
    2. Potential Bugs and Issues
    3. Performance Optimizations
    4. Security Considerations
    5. Maintainability and Readability
    6. Refactoring Suggestions
    7. Testing Recommendations
    
    Format your response with clear sections and provide specific code examples for improvements.
    `;

    try {
      const response = await this.openai.chat.completions.create({
        model: modelToUse,
        messages: [
          { role: 'system', content: this.currentConfig.systemPrompt },
          { role: 'user', content: prompt },
        ],
        max_tokens: 1800,
        temperature: 0.7,
      });

      const content = response.choices[0].message.content || 'Failed to generate code review.';
      
      return {
        content,
        modelUsed: modelToUse,
        warning
      };
    } catch (error: any) {
      throw new Error(`Failed to generate code review: ${error.message}`);
    }
  }

  async generateRefactoringPlan(repositoryInfo: GitHubRepoInfo, goals: string[]): Promise<RefactoringPlan> {
    if (!this.currentConfig.apiKey) {
      throw new Error('OpenRouter API key not configured. Please set OPENROUTER_API_KEY environment variable.');
    }

    const prompt = `
    Create a detailed refactoring plan for the following repository:
    
    Repository: ${repositoryInfo.name}
    Description: ${repositoryInfo.description}
    Language: ${repositoryInfo.language}
    File Count: ${repositoryInfo.fileCount}
    
    Goals: ${goals.join(', ')}
    
    Please provide a JSON response with the following structure:
    {
      "overview": "Brief overview of the refactoring plan",
      "phases": [
        {
          "name": "Phase name",
          "description": "Phase description",
          "tasks": [
            {
              "name": "Task name",
              "description": "Task description",
              "type": "extract|transform|modernize|optimize|test",
              "files": ["file1.js", "file2.js"],
              "estimatedTimeHours": 4,
              "priority": "low|medium|high"
            }
          ],
          "estimatedTimeHours": 16,
          "dependencies": ["Phase 1", "Phase 2"]
        }
      ],
      "estimatedTimeHours": 40,
      "risks": ["Risk 1", "Risk 2"],
      "recommendations": ["Recommendation 1", "Recommendation 2"]
    }
    `;

    try {
      const response = await this.openai.chat.completions.create({
        model: this.currentConfig.model,
        messages: [
          { role: 'system', content: this.currentConfig.systemPrompt },
          { role: 'user', content: prompt },
        ],
        max_tokens: 2000,
        temperature: 0.7,
      });

      const content = response.choices[0].message.content;
      if (!content) {
        throw new Error('No response generated');
      }

      try {
        return JSON.parse(content);
      } catch (parseError) {
        // If JSON parsing fails, return a basic structure
        return {
          overview: content.substring(0, 200) + '...',
          phases: [],
          estimatedTimeHours: 0,
          risks: ['Failed to parse detailed plan'],
          recommendations: ['Review the generated plan manually'],
        };
      }
    } catch (error: any) {
      throw new Error(`Failed to generate refactoring plan: ${error.message}`);
    }
  }

  async generateWelcomeMessage(repositoryInfo: GitHubRepoInfo): Promise<string> {
    if (!this.currentConfig.apiKey) {
      // Return a default welcome message if no API key is configured
      return `Hello! I've analyzed the ${repositoryInfo.name} repository and I'm ready to help you understand and refactor the codebase.

⚠️ **Setup Required**: To enable AI-powered features, please configure your OpenAI API key.

Repository Overview:
- **${repositoryInfo.name}** by ${repositoryInfo.owner}
- Language: ${repositoryInfo.language || 'Multiple'}
- Files: ${repositoryInfo.fileCount}
- Stars: ${repositoryInfo.stars}

I can help you with:
- Code analysis and understanding
- Refactoring suggestions
- Architecture explanations
- Component extraction
- Integration guidance

What would you like to know about this repository?`;
    }

    const prompt = `
    Generate a welcoming introduction message for a repository analysis chat. The repository is:
    - Name: ${repositoryInfo.name}
    - Description: ${repositoryInfo.description || 'No description'}
    - Primary Language: ${repositoryInfo.language || 'Unknown'}
    - ${repositoryInfo.fileCount} files
    - ${repositoryInfo.stars} stars
    
    Create a friendly, professional greeting that:
    1. Welcomes the user
    2. Briefly summarizes what I found in the repository
    3. Mentions what I can help with regarding refactoring and code analysis
    4. Asks what they'd like to know
    
    Keep it concise (3-4 sentences) and engaging.
    `;

    try {
      const response = await this.openai.chat.completions.create({
        model: this.currentConfig.model,
        messages: [
          { role: 'system', content: this.currentConfig.systemPrompt },
          { role: 'user', content: prompt },
        ],
        max_tokens: 300,
        temperature: 0.8,
      });

      return response.choices[0].message.content || 
        `Hello! I've analyzed the ${repositoryInfo.name} repository. I can help you understand the codebase, suggest refactoring opportunities, and guide you through integrating components into your own projects. What would you like to explore?`;
    } catch (error) {
      return `Hello! I've analyzed the ${repositoryInfo.name} repository. I can help you understand the codebase, suggest refactoring opportunities, and guide you through integrating components into your own projects. What would you like to explore?`;
    }
  }

  private buildContextMessage(context: ChatContext): string {
    let contextMessage = `Repository Context: ${context.repositoryUrl}\n`;
    
    if (context.currentFile) {
      contextMessage += `Current File: ${context.currentFile}\n`;
    }
    
    if (context.selectedCode) {
      contextMessage += `Selected Code:\n\`\`\`\n${context.selectedCode}\n\`\`\`\n`;
    }
    
    if (context.refactoringGoals && context.refactoringGoals.length > 0) {
      contextMessage += `Refactoring Goals: ${context.refactoringGoals.join(', ')}\n`;
    }
    
    return contextMessage;
  }
}
```

--------------------------------------------------------------------------------
/src/tools/consolidated.ts:
--------------------------------------------------------------------------------

```typescript
import { Tool } from '@modelcontextprotocol/sdk/types.js';

/**
 * Streamlined tool definitions for CodeCompass MCP Server
 * Rationalized to 12 atomic, composable tools with clear boundaries
 * Each tool does one thing well with no overlapping responsibilities
 */

export const consolidatedTools: Tool[] = [
  // Core Data Tools (6 tools)
  {
    name: 'get_repository_info',
    description: '📊 Get basic repository metadata, statistics, and key information. Atomic tool focused purely on repository-level data without file content analysis.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL (e.g., https://github.com/owner/repo)',
        },
        options: {
          type: 'object',
          properties: {
            include_stats: {
              type: 'boolean',
              description: 'Include repository statistics (stars, forks, etc.)',
              default: true,
            },
            include_languages: {
              type: 'boolean',
              description: 'Include language breakdown',
              default: true,
            },
            include_topics: {
              type: 'boolean',
              description: 'Include repository topics and tags',
              default: true,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  {
    name: 'get_file_tree',
    description: '🌳 Get complete directory structure and file listing with filtering options. Focused on file system structure without content analysis.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        options: {
          type: 'object',
          properties: {
            max_depth: {
              type: 'number',
              description: 'Maximum directory depth to traverse',
              default: 10,
            },
            include_hidden: {
              type: 'boolean',
              description: 'Include hidden files and directories',
              default: false,
            },
            file_extensions: {
              type: 'array',
              items: { type: 'string' },
              description: 'Filter by file extensions (e.g., [".js", ".ts"])',
            },
            exclude_paths: {
              type: 'array',
              items: { type: 'string' },
              description: 'Paths to exclude from listing',
              default: ['node_modules', 'dist', 'build', '.git'],
            },
            include_file_info: {
              type: 'boolean',
              description: 'Include file metadata (size, modified date)',
              default: true,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  {
    name: 'search_repository',
    description: 'Search for patterns, text, functions, or classes across the entire repository with advanced filtering options.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        query: {
          type: 'string',
          description: 'Search query (supports regex patterns)',
        },
        search_type: {
          type: 'string',
          enum: ['text', 'regex', 'function', 'class', 'variable', 'import'],
          description: 'Type of search to perform',
          default: 'text',
        },
        options: {
          type: 'object',
          properties: {
            case_sensitive: {
              type: 'boolean',
              description: 'Case sensitive search',
              default: false,
            },
            file_extensions: {
              type: 'array',
              items: { type: 'string' },
              description: 'File extensions to search in',
            },
            exclude_paths: {
              type: 'array',
              items: { type: 'string' },
              description: 'Paths to exclude from search',
              default: ['node_modules', 'dist', 'build', '.git'],
            },
            max_results: {
              type: 'number',
              description: 'Maximum number of results',
              default: 100,
            },
            include_context: {
              type: 'boolean',
              description: 'Include surrounding code context',
              default: true,
            },
          },
        },
      },
      required: ['url', 'query'],
    },
  },

  {
    name: 'get_file_content',
    description: '📁 Retrieve content of specific files with smart truncation and batch processing capabilities.\n\n⚠️ FEATURES:\n• Batch processing with concurrent file retrieval\n• Automatic file validation and security checks\n• Rich metadata extraction (file type, language, size, line count)\n• Configurable processing limits and error handling\n• Support for multiple file formats with type detection',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        file_paths: {
          type: 'array',
          items: { type: 'string' },
          description: 'Paths to files to retrieve (supports batch processing)',
        },
        options: {
          type: 'object',
          properties: {
            max_size: {
              type: 'number',
              description: 'Maximum file size in bytes',
              default: 100000,
            },
            include_metadata: {
              type: 'boolean',
              description: 'Include file metadata (size, modified date, etc.)',
              default: false,
            },
            truncate_large_files: {
              type: 'boolean',
              description: 'Truncate files larger than max_size',
              default: true,
            },
            max_concurrent: {
              type: 'number',
              description: 'Maximum concurrent file processing',
              default: 5,
              minimum: 1,
              maximum: 20,
            },
            continue_on_error: {
              type: 'boolean',
              description: 'Continue processing other files if one fails',
              default: true,
            },
            file_extensions: {
              type: 'array',
              items: { type: 'string' },
              description: 'Only process files with these extensions (e.g., [".js", ".ts"])',
            },
            exclude_patterns: {
              type: 'array',
              items: { type: 'string' },
              description: 'Exclude files matching these regex patterns',
            },
            format: {
              type: 'string',
              enum: ['raw', 'parsed', 'summary'],
              description: 'Format for file content',
              default: 'raw',
            },
          },
        },
      },
      required: ['url', 'file_paths'],
    },
  },

  {
    name: 'analyze_codebase',
    description: '🔬 Comprehensive codebase analysis combining structure, architecture, and metrics. Provides unified view of code organization, design patterns, complexity, and quality indicators.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        file_paths: {
          type: 'array',
          items: { type: 'string' },
          description: 'Specific files to analyze (optional - analyzes all code files if not specified)',
        },
        analysis_types: {
          type: 'array',
          items: {
            type: 'string',
            enum: ['structure', 'architecture', 'metrics', 'patterns', 'complexity'],
          },
          description: 'Types of analysis to perform',
          default: ['structure', 'architecture', 'metrics'],
        },
        options: {
          type: 'object',
          properties: {
            include_functions: {
              type: 'boolean',
              description: 'Include function analysis',
              default: true,
            },
            include_classes: {
              type: 'boolean',
              description: 'Include class analysis',
              default: true,
            },
            include_imports: {
              type: 'boolean',
              description: 'Include import/dependency analysis',
              default: true,
            },
            include_complexity: {
              type: 'boolean',
              description: 'Include complexity metrics',
              default: true,
            },
            include_patterns: {
              type: 'boolean',
              description: 'Include design pattern detection',
              default: true,
            },
            include_components: {
              type: 'boolean',
              description: 'Include reusable component identification',
              default: false,
            },
            languages: {
              type: 'array',
              items: { type: 'string' },
              description: 'Programming languages to analyze',
            },
            confidence_threshold: {
              type: 'number',
              description: 'Minimum confidence score for pattern detection',
              default: 0.7,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  {
    name: 'analyze_dependencies',
    description: '📦 Comprehensive dependency analysis including external packages, internal dependencies, security vulnerabilities, and version conflicts.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        options: {
          type: 'object',
          properties: {
            include_dev_dependencies: {
              type: 'boolean',
              description: 'Include development dependencies',
              default: true,
            },
            include_security_scan: {
              type: 'boolean',
              description: 'Include security vulnerability scanning',
              default: true,
            },
            include_version_analysis: {
              type: 'boolean',
              description: 'Include version conflict analysis',
              default: true,
            },
            check_outdated: {
              type: 'boolean',
              description: 'Check for outdated packages',
              default: true,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  // AI-Enhanced Tools (3 tools)
  {
    name: 'review_code',
    description: '🔍 Comprehensive code review combining AI insights with rule-based validation. Provides intelligent analysis, security scanning, and actionable recommendations.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        file_paths: {
          type: 'array',
          items: { type: 'string' },
          description: 'Specific files to review (optional - reviews key files if not specified)',
        },
        review_mode: {
          type: 'string',
          enum: ['ai', 'rules', 'combined'],
          description: 'Review approach: AI-powered, rule-based, or combined',
          default: 'combined',
        },
        review_focus: {
          type: 'array',
          items: {
            type: 'string',
            enum: ['security', 'performance', 'maintainability', 'best-practices', 'bugs', 'accessibility'],
          },
          description: 'Areas to focus the review on',
          default: ['security', 'performance', 'maintainability'],
        },
        options: {
          type: 'object',
          properties: {
            ai_model: {
              type: 'string',
              description: 'AI model to use for analysis (OpenRouter models). Use "auto" for intelligent model selection',
              default: 'auto',
            },
            severity_threshold: {
              type: 'string',
              enum: ['low', 'medium', 'high', 'critical'],
              description: 'Minimum severity level to report',
              default: 'medium',
            },
            include_fixes: {
              type: 'boolean',
              description: 'Include suggested fixes',
              default: true,
            },
            include_examples: {
              type: 'boolean',
              description: 'Include code examples in suggestions',
              default: true,
            },
            language_specific: {
              type: 'boolean',
              description: 'Include language-specific best practices',
              default: true,
            },
            framework_specific: {
              type: 'boolean',
              description: 'Include framework-specific checks',
              default: true,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  {
    name: 'explain_code',
    description: '📚 AI-powered code explanation generating human-readable documentation, tutorials, and architectural insights. Transforms technical analysis into accessible explanations.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        file_paths: {
          type: 'array',
          items: { type: 'string' },
          description: 'Specific files to explain (optional - explains key files if not specified)',
        },
        explanation_type: {
          type: 'string',
          enum: ['overview', 'detailed', 'architecture', 'tutorial', 'integration'],
          description: 'Type of explanation to generate',
          default: 'overview',
        },
        options: {
          type: 'object',
          properties: {
            ai_model: {
              type: 'string',
              description: 'AI model to use for explanation (OpenRouter models). Use "auto" for intelligent model selection',
              default: 'auto',
            },
            target_audience: {
              type: 'string',
              enum: ['beginner', 'intermediate', 'advanced'],
              description: 'Target audience for explanation',
              default: 'intermediate',
            },
            include_examples: {
              type: 'boolean',
              description: 'Include code examples in explanations',
              default: true,
            },
            include_diagrams: {
              type: 'boolean',
              description: 'Include ASCII diagrams where helpful',
              default: true,
            },
            focus_on_patterns: {
              type: 'boolean',
              description: 'Focus on design patterns and architecture',
              default: true,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  {
    name: 'suggest_improvements',
    description: '💡 AI-powered improvement suggestions providing strategic refactoring recommendations, modernization plans, and architectural enhancements.',
    inputSchema: {
      type: 'object',
      properties: {
        url: {
          type: 'string',
          description: 'GitHub repository URL',
        },
        file_paths: {
          type: 'array',
          items: { type: 'string' },
          description: 'Specific files to analyze for improvements (optional - analyzes key files if not specified)',
        },
        improvement_goals: {
          type: 'array',
          items: {
            type: 'string',
            enum: ['modernize', 'performance', 'maintainability', 'security', 'readability', 'testability'],
          },
          description: 'Goals for improvement suggestions',
          default: ['modernize', 'maintainability'],
        },
        target_framework: {
          type: 'string',
          description: 'Target framework for improvement suggestions',
        },
        options: {
          type: 'object',
          properties: {
            ai_model: {
              type: 'string',
              description: 'AI model to use for suggestions (OpenRouter models). Use "auto" for intelligent model selection',
              default: 'auto',
            },
            include_code_examples: {
              type: 'boolean',
              description: 'Include before/after code examples',
              default: true,
            },
            priority_level: {
              type: 'string',
              enum: ['low', 'medium', 'high'],
              description: 'Minimum priority level for suggestions',
              default: 'medium',
            },
            estimate_effort: {
              type: 'boolean',
              description: 'Include effort estimates for improvement tasks',
              default: true,
            },
          },
        },
      },
      required: ['url'],
    },
  },

  // Transformation Tools (1 tool)
  {
    name: 'transform_code',
    description: '🔧 Apply code transformations including syntax changes, structural reorganization, framework migration, and modernization. Combines syntax-level and structural changes.',
    inputSchema: {
      type: 'object',
      properties: {
        code: {
          type: 'string',
          description: 'Source code to transform',
        },
        transformations: {
          type: 'array',
          items: {
            type: 'object',
            properties: {
              type: {
                type: 'string',
                enum: ['naming', 'modernize', 'framework', 'performance', 'security', 'structure', 'migration'],
                description: 'Type of transformation',
              },
              options: {
                type: 'object',
                description: 'Transformation-specific options',
              },
            },
            required: ['type'],
          },
          description: 'List of transformations to apply',
        },
        language: {
          type: 'string',
          description: 'Programming language of the code',
        },
        target_language: {
          type: 'string',
          description: 'Target language (for language conversion)',
        },
        target_framework: {
          type: 'string',
          description: 'Target framework (for framework migration)',
        },
        options: {
          type: 'object',
          properties: {
            preserve_comments: {
              type: 'boolean',
              description: 'Preserve code comments',
              default: true,
            },
            preserve_logic: {
              type: 'boolean',
              description: 'Preserve business logic during transformation',
              default: true,
            },
            update_imports: {
              type: 'boolean',
              description: 'Update import paths automatically',
              default: true,
            },
            include_instructions: {
              type: 'boolean',
              description: 'Include transformation instructions',
              default: true,
            },
            validate_syntax: {
              type: 'boolean',
              description: 'Validate syntax after transformation',
              default: true,
            },
          },
        },
      },
      required: ['code', 'transformations', 'language'],
    },
  },


  // Utility Tools (1 tool)
  {
    name: 'health_check',
    description: '🏥 System Health Check - Monitor server health, performance, and operational metrics. Provides comprehensive monitoring dashboard with real-time insights.',
    inputSchema: {
      type: 'object',
      properties: {
        checks: {
          type: 'array',
          items: {
            type: 'string',
            enum: ['api-limits', 'system-health', 'monitoring', 'dependencies', 'configuration'],
          },
          description: 'Types of health checks to perform',
          default: ['api-limits', 'system-health', 'monitoring'],
        },
        options: {
          type: 'object',
          properties: {
            include_metrics: {
              type: 'boolean',
              description: 'Include comprehensive system metrics in response',
              default: false,
            },
            include_insights: {
              type: 'boolean',
              description: 'Include performance insights and recommendations',
              default: false,
            },
            include_logs: {
              type: 'boolean',
              description: 'Include recent log entries',
              default: false,
            },
            include_diagnostics: {
              type: 'boolean',
              description: 'Include diagnostic information',
              default: false,
            },
          },
        },
      },
      required: [],
    },
  },
];
```

--------------------------------------------------------------------------------
/docs/API.md:
--------------------------------------------------------------------------------

```markdown
# CodeCompass MCP API Reference

This document provides comprehensive documentation for all 18 tools available in the CodeCompass MCP server.

## Response Format

All tools return responses in this standardized format:

```json
{
  "success": true,
  "data": { /* tool-specific data */ },
  "metadata": {
    "processing_time": 1234,
    "rate_limit_remaining": 4999,
    "cache_hit": false,
    "truncated": false,  // Present when response is truncated
    "truncationReason": "Response size exceeded maximum token limit",
    "maxTokens": 25000,
    "estimatedTokens": 18500,
    "suggestion": "Use max_response_tokens and max_file_content_length options to control response size"
  },
  "error": {  // Only present if success: false
    "code": "ERROR_CODE",
    "message": "Error description",
    "details": { /* additional error info */ }
  }
}
```

### Response Size Management

The server offers two approaches to handle large repository responses:

#### 1. **Truncation Mode (Default)**
Automatically truncates responses when they exceed token limits:
- **Default limit**: 25,000 tokens (approximately 100KB of JSON)
- **Token estimation**: 1 token ≈ 4 characters in JSON format
- **Truncation order**: File contents → File tree depth → Metadata

#### 2. **Chunking Mode (Recommended for Large Repos)**
Splits large responses into multiple manageable chunks:
- **No data loss**: Access all repository content across multiple requests
- **Chunk sizes**: "small" (~10k tokens), "medium" (~20k tokens), "large" (~40k tokens)
- **Pagination**: Use `chunk_index` to navigate through chunks

**Control options**:
- `max_response_tokens`: Set custom token limit (1,000 - 100,000)
- `max_file_content_length`: Limit individual file content length (100 - 10,000 chars)
- `chunk_mode`: Enable chunked responses (true/false)
- `chunk_index`: Specify which chunk to retrieve (0-based)
- `chunk_size`: Choose chunk size ("small", "medium", "large")

When truncation occurs, the response includes truncation metadata. When chunking is enabled, the response includes chunk navigation information.

## Core Data Tools

### 1. `fetch_repository_data`

**Description**: Comprehensive repository analysis and metadata retrieval.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "options": {
    "include_structure": true,
    "include_dependencies": true,
    "include_key_files": true,
    "max_files": 50,
    "file_extensions": [".js", ".ts"],
    "max_response_tokens": 25000,
    "max_file_content_length": 1000
  }
}
```

**Option Details**:
- `include_structure` (boolean): Include file tree structure (default: true)
- `include_dependencies` (boolean): Include dependency analysis (default: true)
- `include_key_files` (boolean): Include key file contents (README, package.json, etc.) (default: true)
- `max_files` (number): Maximum number of files to analyze (default: 50)
- `file_extensions` (array): File extensions to focus on (e.g., [".js", ".ts"])
- `max_response_tokens` (number): Maximum response size in tokens (default: 25000, range: 1000-100000)
- `max_file_content_length` (number): Maximum content length per file (default: 1000, range: 100-10000)
- `chunk_mode` (boolean): Enable chunked responses for large repositories (default: false)
- `chunk_index` (number): Chunk index to retrieve (0-based, use with chunk_mode) (default: 0)
- `chunk_size` (string): Chunk size - "small", "medium", or "large" (default: "medium")

**Response**:
```json
{
  "success": true,
  "data": {
    "info": {
      "name": "repository-name",
      "description": "Repository description",
      "language": "JavaScript",
      "owner": "owner-name",
      "stars": 1234,
      "forks": 567,
      "created_at": "2024-01-01T00:00:00Z"
    },
    "structure": {
      "fileCount": 150,
      "lineCount": 10000,
      "fileTree": { /* file structure */ },
      "keyFiles": { /* key file contents */ }
    },
    "dependencies": { /* dependency analysis */ },
    "architecture": { /* architecture analysis */ }
  }
}
```

### 2. `search_repository`

**Description**: Advanced search within repositories with filtering and context.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "query": "search pattern",
  "search_type": "text|regex|function|class|variable|import",
  "options": {
    "case_sensitive": false,
    "file_extensions": [".js", ".ts"],
    "exclude_paths": ["node_modules", "dist"],
    "max_results": 100,
    "include_context": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "results": [
      {
        "file": "path/to/file.js",
        "line": 42,
        "match": "matched code",
        "context": "surrounding code context"
      }
    ],
    "total_matches": 15,
    "files_searched": 100
  }
}
```

### 3. `get_file_content`

**Description**: Advanced batch file retrieval with smart truncation, security validation, and rich metadata extraction.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "file_paths": ["README.md", "package.json", "src/index.js"],
  "options": {
    "max_size": 100000,
    "include_metadata": true,
    "truncate_large_files": true,
    "format": "raw|parsed|summary"
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "README.md": {
      "content": "file content",
      "size": 1234,
      "truncated": false
    },
    "package.json": {
      "content": "{ \"name\": \"example\" }",
      "size": 567,
      "truncated": false
    }
  }
}
```

### 4. `analyze_code_structure`

**Description**: Technical code structure analysis with complexity metrics.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "file_paths": ["src/index.js"],  // optional
  "options": {
    "include_functions": true,
    "include_classes": true,
    "include_imports": true,
    "include_complexity": true,
    "languages": ["javascript", "typescript"]
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "functions": [
      {
        "name": "functionName",
        "file": "src/index.js",
        "line": 10,
        "complexity": 5,
        "parameters": ["param1", "param2"]
      }
    ],
    "classes": [
      {
        "name": "ClassName",
        "file": "src/class.js",
        "line": 1,
        "methods": ["method1", "method2"]
      }
    ],
    "imports": [
      {
        "module": "react",
        "type": "default",
        "file": "src/component.js"
      }
    ],
    "complexity": {
      "cyclomatic": 25,
      "cognitive": 30,
      "overall": "medium"
    }
  }
}
```

### 5. `analyze_dependencies`

**Description**: Dependency analysis and security scanning.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "options": {
    "include_dev_dependencies": true,
    "include_security_scan": true,
    "include_version_analysis": true,
    "check_outdated": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "dependencies": {
      "production": [
        {
          "name": "react",
          "version": "^18.0.0",
          "type": "runtime"
        }
      ],
      "development": [
        {
          "name": "jest",
          "version": "^29.0.0",
          "type": "testing"
        }
      ]
    },
    "security": {
      "vulnerabilities": [
        {
          "package": "package-name",
          "severity": "high",
          "description": "Vulnerability description"
        }
      ]
    },
    "outdated": [
      {
        "package": "package-name",
        "current": "1.0.0",
        "latest": "2.0.0"
      }
    ]
  }
}
```

### 6. `calculate_metrics`

**Description**: Quantitative code quality metrics and technical debt analysis.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "options": {
    "metrics": ["complexity", "maintainability", "duplication", "security"],
    "include_file_level": true,
    "include_trend_analysis": false
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "overall": {
      "complexity": 6.5,
      "maintainability": 75,
      "duplication": 15,
      "security": 85,
      "technical_debt": "medium"
    },
    "file_level": {
      "src/index.js": {
        "complexity": 8.2,
        "maintainability": 70,
        "lines": 150
      }
    },
    "recommendations": [
      "Reduce complexity in src/index.js",
      "Address code duplication in utils/"
    ]
  }
}
```

## Code Transformation Tools

### 7. `transform_code`

**Description**: Syntax modernization and language conversion.

**Parameters**:
```json
{
  "code": "var x = 5; function test() { return x + 1; }",
  "transformations": [
    {
      "type": "modernize|framework|performance|security",
      "options": { /* transformation-specific options */ }
    }
  ],
  "language": "javascript",
  "target_language": "typescript",  // optional
  "options": {
    "preserve_comments": true,
    "include_instructions": true,
    "validate_syntax": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "transformed_code": "const x = 5; const test = () => x + 1;",
    "transformations_applied": [
      {
        "type": "modernize",
        "description": "Converted var to const, function to arrow function"
      }
    ],
    "syntax_valid": true,
    "instructions": "Code has been modernized to use ES6+ features"
  }
}
```

### 8. `extract_components`

**Description**: Component extraction with reusability scoring.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "extraction_types": ["components", "functions", "utilities", "hooks", "types"],
  "options": {
    "min_reusability_score": 60,
    "include_dependencies": true,
    "include_examples": true,
    "framework": "react"
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "components": [
      {
        "name": "Button",
        "file": "src/components/Button.js",
        "reusability_score": 85,
        "dependencies": ["react"],
        "props": ["onClick", "children", "variant"],
        "example_usage": "<Button onClick={handleClick}>Click me</Button>"
      }
    ],
    "functions": [
      {
        "name": "formatDate",
        "file": "src/utils/date.js",
        "reusability_score": 90,
        "parameters": ["date", "format"],
        "return_type": "string"
      }
    ]
  }
}
```

### 9. `adapt_code_structure`

**Description**: Framework migration and architectural restructuring.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "target_structure": {
    "framework": "react|vue|angular|express",
    "pattern": "mvc|mvvm|clean",
    "folder_structure": { /* custom structure */ }
  },
  "options": {
    "preserve_logic": true,
    "update_imports": true,
    "generate_config": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "migration_plan": {
      "steps": [
        {
          "action": "move",
          "from": "src/components/",
          "to": "components/",
          "reason": "Framework convention"
        }
      ],
      "estimated_effort": "medium",
      "breaking_changes": ["Import paths", "Config structure"]
    },
    "file_mappings": {
      "src/index.js": "src/main.js",
      "src/App.js": "src/App.vue"
    },
    "config_files": {
      "package.json": "{ \"scripts\": { \"dev\": \"vite\" } }",
      "vite.config.js": "export default { /* config */ }"
    }
  }
}
```

### 10. `generate_project_template`

**Description**: Template generation from analysis.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "template_type": "starter|library|microservice|fullstack|component-library",
  "options": {
    "project_name": "my-new-project",
    "framework": "react",
    "language": "typescript",
    "include_tests": true,
    "include_docs": true,
    "include_ci": true,
    "package_manager": "npm|yarn|pnpm|bun"
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "template": {
      "name": "my-new-project",
      "structure": {
        "src/": "Source code directory",
        "tests/": "Test files",
        "docs/": "Documentation"
      },
      "files": {
        "package.json": "{ \"name\": \"my-new-project\" }",
        "README.md": "# My New Project",
        "tsconfig.json": "{ \"compilerOptions\": {} }"
      }
    },
    "setup_instructions": [
      "npm install",
      "npm run build",
      "npm test"
    ]
  }
}
```

## Analysis Tools

### 11. `analyze_architecture`

**Description**: Design patterns, layering, and scalability analysis.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "options": {
    "pattern_types": ["mvc", "mvvm", "clean", "hexagonal"],
    "include_frameworks": true,
    "include_conventions": true,
    "confidence_threshold": 0.7
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "patterns": [
      {
        "type": "mvc",
        "confidence": 0.85,
        "evidence": ["Controllers found", "Models directory", "View components"],
        "implementation": "Well-structured MVC with clear separation"
      }
    ],
    "frameworks": [
      {
        "name": "React",
        "version": "18.x",
        "usage": "Frontend framework"
      }
    ],
    "scalability": {
      "score": 75,
      "strengths": ["Modular architecture", "Good separation of concerns"],
      "concerns": ["Monolithic structure", "Tight coupling in utils"]
    }
  }
}
```

### 12. `compare_implementations`

**Description**: Multi-repository comparison and benchmarking.

**Parameters**:
```json
{
  "implementations": [
    {
      "name": "Implementation A",
      "url": "https://github.com/owner/repo-a",
      "focus_areas": ["performance", "security"]
    },
    {
      "name": "Implementation B", 
      "url": "https://github.com/owner/repo-b"
    }
  ],
  "comparison_criteria": ["performance", "maintainability", "security", "complexity"],
  "options": {
    "include_metrics": true,
    "include_recommendations": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "comparison": {
      "performance": {
        "Implementation A": 85,
        "Implementation B": 70,
        "winner": "Implementation A"
      },
      "maintainability": {
        "Implementation A": 75,
        "Implementation B": 90,
        "winner": "Implementation B"
      }
    },
    "recommendations": [
      "Implementation A has better performance optimizations",
      "Implementation B follows better coding practices"
    ],
    "summary": "Implementation A is better for performance-critical applications"
  }
}
```

### 13. `validate_code_quality`

**Description**: Rule-based validation and standards compliance.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "validation_types": ["security", "performance", "best-practices", "accessibility"],
  "options": {
    "severity_level": "low|medium|high|critical",
    "include_fixes": true,
    "framework_specific": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "validation_results": {
      "security": {
        "score": 85,
        "issues": [
          {
            "type": "XSS vulnerability",
            "severity": "high",
            "file": "src/component.js",
            "line": 42,
            "fix": "Use proper sanitization"
          }
        ]
      },
      "performance": {
        "score": 70,
        "issues": [
          {
            "type": "Large bundle size",
            "severity": "medium",
            "suggestion": "Implement code splitting"
          }
        ]
      }
    },
    "overall_score": 78,
    "certification": "Good"
  }
}
```

## Utility Tools

### 14. `batch_process`

**Description**: Parallel execution of multiple operations.

**Parameters**:
```json
{
  "operations": [
    {
      "id": "op1",
      "tool": "health_check",
      "params": {},
      "priority": 5
    },
    {
      "id": "op2",
      "tool": "fetch_repository_data",
      "params": {
        "url": "https://github.com/owner/repo"
      },
      "priority": 8
    }
  ],
  "options": {
    "max_concurrent": 3,
    "fail_fast": false,
    "include_progress": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "operations": [
      {
        "type": "health_check",
        "id": "op1"
      }
    ],
    "results": [
      {
        "id": "op1",
        "success": true,
        "data": { /* operation result */ },
        "processingTime": 1200
      }
    ],
    "totalTime": 5000,
    "successCount": 2,
    "failureCount": 0
  }
}
```

### 15. `health_check`

**Description**: Server health, API limits, and system monitoring.

**Parameters**:
```json
{
  "checks": ["api-limits", "cache-status", "system-health", "dependencies"],
  "options": {
    "include_metrics": true,
    "include_diagnostics": false
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "status": "healthy",
    "timestamp": "2024-01-17T23:41:31.079Z",
    "checks": {
      "api-limits": {
        "github": {
          "remaining": 4950,
          "limit": 5000,
          "reset": "2024-01-17T24:00:00Z"
        },
        "openrouter": {
          "status": "healthy"
        }
      },
      "system-health": {
        "status": "healthy",
        "memory": {
          "used": 128,
          "total": 512
        },
        "uptime": 3600
      }
    },
    "metrics": {
      "uptime": 3600,
      "memory": {
        "rss": 50331648,
        "heapTotal": 20971520,
        "heapUsed": 15728640
      },
      "version": "1.0.0"
    }
  }
}
```

## AI-Enhanced Tools

### 16. `ai_code_review`

**Description**: AI-powered code review with intelligent insights.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "file_paths": ["src/index.js"],  // optional
  "review_focus": ["security", "performance", "maintainability", "best-practices"],
  "options": {
    "ai_model": "auto|anthropic/claude-3.5-sonnet|openai/gpt-4o",
    "severity_threshold": "low|medium|high",
    "include_examples": true,
    "language_specific": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "repository": {
      "name": "repo-name",
      "description": "Repository description",
      "language": "JavaScript",
      "owner": "owner-name"
    },
    "review": {
      "files_reviewed": ["src/index.js"],
      "focus_areas": ["security", "performance"],
      "ai_model_used": "anthropic/claude-3.5-sonnet",
      "ai_model_requested": "auto",
      "analysis": "Comprehensive AI-generated review content...",
      "severity_threshold": "medium",
      "timestamp": "2024-01-17T23:41:31.079Z",
      "model_warning": null
    },
    "recommendations": {
      "priority_fixes": [
        "Fix XSS vulnerability in line 42",
        "Optimize database queries in user service"
      ],
      "suggestions": [
        "Consider implementing caching",
        "Add input validation"
      ],
      "best_practices": [
        "Use TypeScript for better type safety",
        "Add comprehensive tests"
      ]
    }
  }
}
```

### 17. `ai_explain_code`

**Description**: AI-generated explanations and documentation.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "file_paths": ["src/index.js"],  // optional
  "explanation_type": "overview|detailed|architecture|tutorial|integration",
  "options": {
    "ai_model": "auto|anthropic/claude-3.5-sonnet|openai/gpt-4o",
    "target_audience": "beginner|intermediate|advanced",
    "include_examples": true,
    "include_diagrams": true,
    "focus_on_patterns": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "repository": {
      "name": "repo-name",
      "description": "Repository description",
      "language": "JavaScript",
      "owner": "owner-name"
    },
    "explanation": {
      "type": "overview",
      "files_analyzed": ["src/index.js"],
      "ai_model_used": "openai/gpt-4o",
      "ai_model_requested": "auto",
      "target_audience": "intermediate",
      "content": "Detailed AI-generated explanation...",
      "timestamp": "2024-01-17T23:41:31.079Z",
      "model_warning": null
    },
    "metadata": {
      "file_count": 5,
      "total_lines": 1200
    }
  }
}
```

### 18. `ai_refactor_suggestions`

**Description**: AI-powered refactoring strategies and recommendations.

**Parameters**:
```json
{
  "url": "https://github.com/owner/repo",
  "file_paths": ["src/index.js"],  // optional
  "refactoring_goals": ["modernize", "performance", "maintainability", "security"],
  "target_framework": "react|vue|angular",  // optional
  "options": {
    "ai_model": "auto|anthropic/claude-3.5-sonnet|openai/gpt-4o",
    "include_code_examples": true,
    "priority_level": "low|medium|high",
    "estimate_effort": true
  }
}
```

**Response**:
```json
{
  "success": true,
  "data": {
    "repository": {
      "name": "repo-name",
      "description": "Repository description",
      "language": "JavaScript",
      "owner": "owner-name"
    },
    "refactoring": {
      "goals": ["modernize", "performance"],
      "target_framework": "react",
      "files_analyzed": ["src/index.js"],
      "ai_model_used": "anthropic/claude-3.5-sonnet",
      "ai_model_requested": "auto",
      "suggestions": "Detailed AI-generated refactoring plan...",
      "priority_level": "medium",
      "timestamp": "2024-01-17T23:41:31.079Z",
      "model_warning": null
    },
    "metadata": {
      "file_count": 5,
      "total_lines": 1200,
      "estimated_effort": "2-3 days for experienced developer"
    }
  }
}
```

## Error Handling

### Error Codes

- `INVALID_URL`: Invalid GitHub repository URL
- `REPOSITORY_NOT_FOUND`: Repository does not exist or is private
- `RATE_LIMIT_EXCEEDED`: API rate limit exceeded
- `PROCESSING_ERROR`: General processing error
- `VALIDATION_ERROR`: Input validation failed
- `OPENROUTER_ERROR`: OpenRouter API error
- `NETWORK_ERROR`: Network connectivity issue

### Error Response Format

```json
{
  "success": false,
  "error": {
    "code": "RATE_LIMIT_EXCEEDED",
    "message": "GitHub API rate limit exceeded",
    "details": {
      "limit": 5000,
      "remaining": 0,
      "reset": "2024-01-17T24:00:00Z"
    }
  },
  "metadata": {
    "processing_time": 1234
  }
}
```

## Rate Limiting

### GitHub API
- **Unauthenticated**: 60 requests/hour
- **With token**: 5,000 requests/hour
- **GraphQL**: 5,000 points/hour

### OpenRouter API
- Varies by model and subscription plan
- Check OpenRouter documentation for specific limits

### Best Practices
- Use GitHub token for better rate limits
- Implement exponential backoff
- Monitor rate limits with `health_check`
- Cache responses when possible

## Model Selection Guide

### Auto Selection Logic
- **Code Review**: `anthropic/claude-3.5-sonnet`
- **Explanation**: `openai/gpt-4o`
- **Refactoring**: `anthropic/claude-3.5-sonnet`
- **Batch Jobs**: `openai/gpt-4o-mini`

### Model Characteristics
- **Speed**: `fastest` → `slowest`
- **Cost**: `low` → `highest`
- **Quality**: `good` → `highest`
- **Recommended**: Production-ready models

### Model Transparency
All AI responses include:
- `ai_model_used`: Actual model used
- `ai_model_requested`: Requested model
- `model_warning`: Performance/cost warnings

This comprehensive API reference covers all 18 tools with detailed parameters, responses, and usage examples.
```

--------------------------------------------------------------------------------
/src/index.ts:
--------------------------------------------------------------------------------

```typescript
#!/usr/bin/env node

import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import {
  CallToolRequestSchema,
  ErrorCode,
  ListToolsRequestSchema,
  McpError,
} from '@modelcontextprotocol/sdk/types.js';
import { GitHubService } from './services/github.js';
import { RefactorService } from './services/refactor.js';
import { OpenAIService } from './services/openai.js';
import { consolidatedTools } from './tools/consolidated.js';
import { ToolResponse, ErrorCodes } from './types/responses.js';
import { getConfig } from './utils/config.js';
import { batchProcessFiles, processSingleFile, validateFilePath, extractFileMetadata, getFileStatistics } from './utils/file-processor.js';
import { log, createPerformanceTimer } from './utils/logger.js';
import { monitoring, monitorTool } from './utils/monitoring.js';
import { z } from 'zod';

// Initialize configuration
const config = getConfig();

// Log server startup
log.info('CodeCompass MCP Server starting up', {
  nodeVersion: process.version,
  nodeEnv: process.env.NODE_ENV,
  configSummary: {
    hasGitHubToken: !!config.github.token,
    hasOpenRouterKey: !!config.openrouter.apiKey,
    defaultModel: config.openrouter.defaultModel,
    maxResponseTokens: config.response.maxTokens,
    logLevel: config.logging.level,
  },
});

const server = new Server(
  {
    name: 'codecompass-mcp',
    version: '1.0.0',
  },
  {
    capabilities: {
      tools: {},
    },
  }
);

// Initialize services with configuration
const githubService = new GitHubService();
const refactorService = new RefactorService();
const openaiService = new OpenAIService();

// Helper function to create standardized responses
function createResponse<T>(data: T, error?: any, metadata?: any): ToolResponse<T> {
  const response: ToolResponse<T> = {
    success: !error,
    metadata: {
      processing_time: Date.now(),
      ...metadata,
    },
  };

  if (error) {
    // Enhanced error handling with contextual messages
    const errorMessage = error.message || 'An error occurred';
    const errorCode = error.code || ErrorCodes.PROCESSING_ERROR;
    
    // Add contextual suggestions based on error type
    let suggestion = '';
    if (errorMessage.includes('rate limit')) {
      suggestion = 'Try reducing request frequency or adding GitHub token for higher limits.';
    } else if (errorMessage.includes('not found') || errorMessage.includes('404')) {
      suggestion = 'Verify the repository URL is correct and the repository is publicly accessible.';
    } else if (errorMessage.includes('timeout')) {
      suggestion = 'Use chunking mode for large repositories: set chunk_mode=true in options.';
    } else if (errorMessage.includes('token')) {
      suggestion = 'Check your GitHub and OpenRouter API tokens in environment variables.';
    } else if (errorMessage.includes('permission') || errorMessage.includes('403')) {
      suggestion = 'Ensure your GitHub token has the necessary permissions for this repository.';
    }
    
    response.error = {
      code: errorCode,
      message: errorMessage,
      details: error.details || error,
      suggestion: suggestion || 'Check the API documentation for more details.',
      timestamp: new Date().toISOString(),
      context: {
        tool: metadata?.tool || 'unknown',
        url: metadata?.url || 'unknown'
      }
    };
  } else {
    response.data = data;
  }

  return response;
}

// Helper function to chunk large responses
function chunkResponse<T>(response: ToolResponse<T>, chunkIndex: number, chunkSize: string): ToolResponse<T> {
  if (!response.success || !response.data) {
    return response;
  }

  const data = response.data as any;
  const chunkedResponse = { ...response };
  const chunkedData = { ...data };

  // Get chunk size limits from configuration
  const chunkLimits = config.response.chunkSizes;
  const limits = chunkLimits[chunkSize as keyof typeof chunkLimits] || chunkLimits.medium;

  // Chunk key files
  if (data.structure?.keyFiles) {
    const keyFiles = data.structure.keyFiles;
    const fileEntries = Object.entries(keyFiles);
    const startIndex = chunkIndex * limits.filesPerChunk;
    const endIndex = startIndex + limits.filesPerChunk;
    const chunkedFiles = fileEntries.slice(startIndex, endIndex);

    chunkedData.structure = {
      ...data.structure,
      keyFiles: Object.fromEntries(chunkedFiles.map(([filename, content]) => [
        filename,
        typeof content === 'string' && content.length > limits.fileContent
          ? content.substring(0, limits.fileContent) + '\n\n... [Content truncated for chunking] ...'
          : content
      ]))
    };

    // Add chunking metadata
    chunkedData.chunkInfo = {
      chunkIndex,
      chunkSize,
      totalFiles: fileEntries.length,
      totalChunks: Math.ceil(fileEntries.length / limits.filesPerChunk),
      filesInChunk: chunkedFiles.length,
      hasMore: endIndex < fileEntries.length,
      nextChunkIndex: endIndex < fileEntries.length ? chunkIndex + 1 : null
    };
  }

  chunkedResponse.data = chunkedData;
  return chunkedResponse;
}

// Helper function to truncate large responses (fallback for non-chunked mode)
function truncateResponse<T>(response: ToolResponse<T>, maxTokens: number = 25000, maxFileContentLength: number = 1000): ToolResponse<T> {
  const jsonString = JSON.stringify(response, null, 2);
  
  // Rough token estimation: 1 token ≈ 4 characters
  const estimatedTokens = jsonString.length / 4;
  
  if (estimatedTokens <= maxTokens) {
    return response;
  }
  
  // If response is too large, truncate data while preserving structure
  const truncatedResponse = { ...response };
  
  if (truncatedResponse.success && truncatedResponse.data) {
    const data = truncatedResponse.data as any;
    
    // Truncate file contents first
    if (data.structure?.keyFiles) {
      const keyFiles = data.structure.keyFiles;
      
      Object.keys(keyFiles).forEach(filename => {
        if (keyFiles[filename] && keyFiles[filename].length > maxFileContentLength) {
          keyFiles[filename] = keyFiles[filename].substring(0, maxFileContentLength) + 
            '\n\n... [Content truncated due to size limits] ...';
        }
      });
    }
    
    // Truncate file tree if still too large
    if (data.structure?.fileTree) {
      const truncateFileTree = (tree: any[], maxDepth: number = 3, currentDepth: number = 0): any[] => {
        if (currentDepth >= maxDepth) {
          return [{ name: '...', type: 'truncated', message: 'Directory tree truncated due to size limits' }];
        }
        
        return tree.slice(0, 50).map(node => ({
          ...node,
          children: node.children ? truncateFileTree(node.children, maxDepth, currentDepth + 1) : undefined
        }));
      };
      
      data.structure.fileTree = truncateFileTree(data.structure.fileTree);
    }
    
    // Add truncation warning
    if (!data.metadata) {
      data.metadata = {};
    }
    data.metadata.truncated = true;
    data.metadata.truncationReason = 'Response size exceeded maximum token limit';
    data.metadata.maxTokens = maxTokens;
    data.metadata.estimatedTokens = Math.round(estimatedTokens);
    data.metadata.suggestion = 'Use chunk_mode=true for complete data access or adjust max_response_tokens and max_file_content_length';
  }
  
  return truncatedResponse;
}

// Helper function to format tool responses for MCP
function formatToolResponse<T>(
  response: ToolResponse<T>, 
  maxTokens: number = 25000, 
  maxFileContentLength: number = 1000,
  chunkMode: boolean = false,
  chunkIndex: number = 0,
  chunkSize: string = 'medium'
) {
  let processedResponse;
  
  if (chunkMode) {
    processedResponse = chunkResponse(response, chunkIndex, chunkSize);
  } else {
    processedResponse = truncateResponse(response, maxTokens, maxFileContentLength);
  }
  
  return {
    content: [
      {
        type: 'text',
        text: JSON.stringify(processedResponse, null, 2),
      },
    ],
  };
}

server.setRequestHandler(ListToolsRequestSchema, async () => {
  return {
    tools: consolidatedTools,
  };
});

server.setRequestHandler(CallToolRequestSchema, async (request) => {
  const { name, arguments: args } = request.params;
  const requestId = monitoring.generateRequestId();
  const startTime = Date.now();
  
  // Start monitoring this request
  monitoring.startRequest(name, requestId);

  try {
    let result;
    switch (name) {
      // Core Data Tools (6 tools)
      case 'get_repository_info':
        result = await handleGetRepositoryInfo(args);
        break;
      case 'get_file_tree':
        result = await handleGetFileTree(args);
        break;
      case 'search_repository':
        result = await handleSearchRepository(args);
        break;
      case 'get_file_content':
        result = await handleGetFileContent(args);
        break;
      case 'analyze_dependencies':
        result = await handleAnalyzeDependencies(args);
        break;
      case 'analyze_codebase':
        result = await handleAnalyzeCodebase(args);
        break;

      // AI-Enhanced Tools (3 tools)
      case 'review_code':
        result = await handleReviewCode(args);
        break;
      case 'explain_code':
        result = await handleExplainCode(args);
        break;
      case 'suggest_improvements':
        result = await handleSuggestImprovements(args);
        break;

      // Transformation Tools (1 tool)
      case 'transform_code':
        result = await handleTransformCode(args);
        break;

      // Utility Tools (1 tool)
      case 'health_check':
        result = await handleHealthCheck(args);
        break;

      default:
        throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`);
    }
    
    // Mark request as successful
    monitoring.completeRequest(name, startTime, true, undefined, requestId);
    return result;
    
  } catch (error) {
    // Mark request as failed
    monitoring.completeRequest(name, startTime, false, (error as Error).message, requestId);
    
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
});

// Tool handlers using CodeCompass-main core functionality
async function handleGetRepositoryInfo(args: any) {
  const { url, options = {} } = args;
  
  try {
    const info = await githubService.getRepositoryInfo(url);
    
    const response = {
      repository: {
        name: info.name,
        description: info.description,
        owner: info.owner,
        language: info.language,
        defaultBranch: info.defaultBranch,
        createdAt: info.createdAt,
        updatedAt: info.updatedAt,
        license: info.license,
        ...(options.include_stats && {
          stats: {
            stars: info.stars,
            fileCount: info.fileCount,
            lineCount: info.lineCount,
          }
        }),
        ...(options.include_languages && {
          languages: info.languages
        }),
        ...(options.include_topics && {
          topics: []  // Add topics to GitHubRepoInfo interface if needed
        })
      }
    };
    
    return formatToolResponse(createResponse(response, null, { tool: 'get_repository_info', url }));
  } catch (error) {
    return formatToolResponse(createResponse(null, error, { tool: 'get_repository_info', url }));
  }
}

async function handleGetFileTree(args: any) {
  const { url, options = {} } = args;
  
  try {
    const tree = await githubService.getFileTree(url);
    
    const response = {
      file_tree: tree,
      metadata: {
        max_depth: options.max_depth || 10,
        include_hidden: options.include_hidden || false,
        total_files: tree.length,
        filtered_extensions: options.file_extensions || null,
        excluded_paths: options.exclude_paths || ['node_modules', 'dist', 'build', '.git']
      }
    };
    
    return formatToolResponse(createResponse(response, null, { tool: 'get_file_tree', url }));
  } catch (error) {
    return formatToolResponse(createResponse(null, error, { tool: 'get_file_tree', url }));
  }
}

// Legacy handler - remove after testing
async function handleFetchRepositoryData(args: any) {
  try {
    const { url, options = {} } = args;
    
    // Extract size control options with config defaults
    const maxTokens = options.max_response_tokens || config.response.maxTokens;
    const maxFileContentLength = options.max_file_content_length || config.response.maxFileContentLength;
    const chunkMode = options.chunk_mode || false;
    const chunkIndex = options.chunk_index || 0;
    const chunkSize = options.chunk_size || 'medium';
    
    // Use CodeCompass-main's core GitHub service functionality
    const repositoryInfo = await githubService.getRepositoryInfo(url);
    const analysis = await githubService.analyzeRepository(url);
    
    const result = {
      info: repositoryInfo,
      structure: {
        fileCount: repositoryInfo.fileCount,
        lineCount: repositoryInfo.lineCount,
        fileTree: repositoryInfo.fileTree,
        keyFiles: repositoryInfo.keyFiles,
      },
      dependencies: analysis.dependencies,
      architecture: analysis.architecture,
    };

    const response = createResponse(result);
    return formatToolResponse(response, maxTokens, maxFileContentLength, chunkMode, chunkIndex, chunkSize);
  } catch (error) {
    const response = createResponse(null, error, { tool: 'fetch_repository_data', url: args.url });
    return formatToolResponse(response);
  }
}

async function handleSearchRepository(args: any) {
  try {
    const { url, query, search_type = 'text', options = {} } = args;
    
    // Get repository content and perform search
    const repositoryInfo = await githubService.getRepositoryInfo(url);
    const searchResults = await githubService.searchInRepository(url, query, {
      type: search_type,
      ...options,
    });

    const response = createResponse(searchResults);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error, { tool: 'search_repository', url: args.url, query: args.query });
    return formatToolResponse(response);
  }
}

async function handleGetFileContent(args: any) {
  try {
    const { url, file_paths, options = {} } = args;
    
    // Validate file paths first
    const pathValidationErrors: string[] = [];
    for (const filePath of file_paths) {
      const validation = validateFilePath(filePath);
      if (!validation.valid) {
        pathValidationErrors.push(`${filePath}: ${validation.error}`);
      }
    }
    
    if (pathValidationErrors.length > 0) {
      throw new Error(`Invalid file paths detected:\n${pathValidationErrors.join('\n')}`);
    }
    
    // Fetch file contents from GitHub
    const fileContents: Array<{ path: string; content: string }> = [];
    const fetchErrors: Record<string, string> = {};
    
    for (const filePath of file_paths) {
      try {
        const content = await githubService.getFileContent(url, filePath);
        fileContents.push({ path: filePath, content });
      } catch (error: any) {
        fetchErrors[filePath] = error.message;
      }
    }
    
    // Process files using batch processing
    const batchOptions = {
      maxConcurrent: options.max_concurrent || config.limits.maxConcurrentRequests,
      continueOnError: options.continue_on_error !== false,
      validatePaths: false, // Already validated above
      includeMetadata: options.include_metadata !== false,
      maxFileSize: options.max_size || config.limits.maxFileSize,
      allowedExtensions: options.file_extensions,
      excludePatterns: options.exclude_patterns,
    };
    
    const batchResult = await batchProcessFiles(fileContents, batchOptions);
    
    // Combine results with fetch errors
    const results: Record<string, any> = {};
    
    // Add successful and failed processing results
    batchResult.results.forEach(result => {
      if (result.success) {
        results[result.filePath] = {
          content: result.content,
          metadata: result.metadata,
          size: result.metadata?.size || 0,
          truncated: result.metadata?.size ? result.metadata.size > (options.max_size || config.limits.maxFileSize) : false,
        };
      } else {
        results[result.filePath] = {
          error: result.error?.message || 'Processing failed',
          details: result.error?.details,
        };
      }
    });
    
    // Add fetch errors
    Object.entries(fetchErrors).forEach(([filePath, error]) => {
      results[filePath] = {
        error: `Failed to fetch: ${error}`,
      };
    });
    
    // Add processing statistics
    const statistics = getFileStatistics(batchResult.results.filter(r => r.success));
    
    const response = createResponse({
      files: results,
      summary: {
        ...batchResult.summary,
        fetchErrors: Object.keys(fetchErrors).length,
        statistics,
      },
    });
    
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error, { tool: 'get_file_content', url: args.url });
    return formatToolResponse(response);
  }
}

async function handleAnalyzeCodebase(args: any) {
  try {
    const { url, file_paths, options = {} } = args;
    
    const analysis = await githubService.analyzeCodeStructure(url, file_paths, options);
    
    const response = createResponse(analysis);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleAnalyzeDependencies(args: any) {
  try {
    const { url, options = {} } = args;
    
    const dependencies = await githubService.analyzeDependencies(url);
    
    const response = createResponse(dependencies);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleCalculateMetrics(args: any) {
  try {
    const { url, options = {} } = args;
    
    const metrics = await githubService.calculateMetrics(url, options);
    
    const response = createResponse(metrics);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleTransformCode(args: any) {
  try {
    const { code, transformations, language, target_language, options = {} } = args;
    
    const result = await refactorService.transformCode(
      code,
      transformations,
      language,
      target_language,
      options
    );
    
    const response = createResponse(result);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleExtractComponents(args: any) {
  try {
    const { url, extraction_types = ['components', 'functions', 'utilities'], options = {} } = args;
    
    const components = await refactorService.extractReusableComponents(url, extraction_types);
    
    const response = createResponse(components);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleAdaptCodeStructure(args: any) {
  try {
    const { url, target_structure, options = {} } = args;
    
    const result = await refactorService.adaptCodeStructure(url, target_structure, options);
    
    const response = createResponse(result);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleGenerateProjectTemplate(args: any) {
  try {
    const { url, template_type, options = {} } = args;
    
    const template = await refactorService.generateBoilerplate(url, template_type, options);
    
    const response = createResponse(template);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleAnalyzeArchitecture(args: any) {
  try {
    const { url, options = {} } = args;
    
    const architecture = await githubService.analyzeArchitecturePublic(url, options);
    
    const response = createResponse(architecture);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleCompareImplementations(args: any) {
  try {
    const { implementations, comparison_criteria, options = {} } = args;
    
    const comparison = await githubService.compareRepositories(implementations, comparison_criteria, options);
    
    const response = createResponse(comparison);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleValidateCodeQuality(args: any) {
  try {
    const { url, validation_types, options = {} } = args;
    
    const validation = await githubService.validateCodeQuality(url, validation_types, options);
    
    const response = createResponse(validation);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleBatchProcess(args: any) {
  try {
    const { operations, options = {} } = args;
    
    const results = [];
    const startTime = Date.now();
    
    for (const operation of operations) {
      const operationStartTime = Date.now();
      try {
        // Recursively call the appropriate handler
        const result = await server.request(
          { method: 'tools/call', params: { name: operation.tool, arguments: operation.params } } as any,
          {} as any
        );
        
        results.push({
          id: operation.id,
          success: true,
          data: result,
          processingTime: Date.now() - operationStartTime,
        });
      } catch (error: any) {
        results.push({
          id: operation.id,
          success: false,
          error: error.message,
          processingTime: Date.now() - operationStartTime,
        });
      }
    }
    
    const batchResult = {
      operations: operations.map((op: any) => ({ type: op.tool, params: op.params, id: op.id })),
      results,
      totalTime: Date.now() - startTime,
      successCount: results.filter(r => r.success).length,
      failureCount: results.filter(r => !r.success).length,
    };
    
    const response = createResponse(batchResult);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleHealthCheck(args: any) {
  try {
    const { checks = ['api-limits', 'system-health', 'monitoring'], options = {} } = args;
    
    // Get comprehensive health status from monitoring system
    const monitoringHealth = monitoring.getHealthStatus();
    const serverMetrics = monitoring.getMetrics();
    
    const health: any = {
      status: monitoringHealth.status,
      timestamp: new Date().toISOString(),
      checks: { ...monitoringHealth.checks },
      metrics: options.include_metrics ? {
        uptime: serverMetrics.uptime,
        memory: serverMetrics.memory,
        version: '1.0.0',
        requestCount: serverMetrics.requestCount,
        errorCount: serverMetrics.errorCount,
        averageResponseTime: serverMetrics.responseTime.average,
        toolUsage: serverMetrics.toolUsage,
      } : undefined,
    };
    
    // Add additional checks based on requested types
    for (const check of checks) {
      switch (check) {
        case 'api-limits':
          try {
            health.checks[check] = await githubService.checkApiLimits();
          } catch (error: any) {
            health.checks[check] = { status: 'error', error: error.message };
          }
          break;
        case 'monitoring':
          health.checks[check] = {
            status: 'healthy',
            totalRequests: serverMetrics.requestCount,
            errorRate: serverMetrics.requestCount > 0 ? Math.round((serverMetrics.errorCount / serverMetrics.requestCount) * 100) : 0,
            uptime: serverMetrics.uptime,
            memoryUsage: Math.round((serverMetrics.memory.heapUsed / serverMetrics.memory.heapTotal) * 100),
          };
          break;
        case 'dependencies':
          health.checks[check] = { status: 'healthy' };
          break;
        case 'configuration':
          health.checks[check] = {
            status: 'healthy',
            hasGitHubToken: !!config.github.token,
            hasOpenRouterKey: !!config.openrouter.apiKey,
            logLevel: config.logging.level,
            maxResponseTokens: config.response.maxTokens,
          };
          break;
      }
    }
    
    // Add performance insights if requested
    if (options.include_insights) {
      const insights = monitoring.getPerformanceInsights();
      health.insights = insights;
    }
    
    // Add recent logs if requested
    if (options.include_logs) {
      const logBuffer = log.getLogBuffer();
      health.recentLogs = logBuffer.slice(-10);
    }
    
    const response = createResponse(health);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

// AI-Enhanced Tool Handlers
async function handleReviewCode(args: any) {
  try {
    const { url, file_paths, review_focus = ['security', 'performance', 'maintainability'], options = {} } = args;
    
    // Get repository info and code content
    const repoInfo = await githubService.getRepositoryInfo(url);
    let filesToReview: Record<string, string> = {};
    
    if (file_paths && file_paths.length > 0) {
      // Get specific files
      for (const filePath of file_paths) {
        try {
          const content = await githubService.getFileContent(url, filePath);
          filesToReview[filePath] = content;
        } catch (error) {
          // Skip files that can't be fetched
        }
      }
    } else {
      // Use key files from repository
      filesToReview = repoInfo.keyFiles;
    }
    
    if (Object.keys(filesToReview).length === 0) {
      throw new Error('No files found to review');
    }
    
    // Prepare code for AI review
    const codeContext = Object.entries(filesToReview)
      .map(([path, content]) => `--- ${path} ---\n${content}`)
      .join('\n\n');
    
    const focusAreas = review_focus.join(', ');
    
    // Generate AI review with specified model
    const aiReviewResult = await openaiService.generateCodeReview(
      codeContext,
      repoInfo.language || 'javascript',
      review_focus,
      options.ai_model
    );
    
    const result = {
      repository: {
        name: repoInfo.name,
        description: repoInfo.description,
        language: repoInfo.language,
        owner: repoInfo.owner,
      },
      review: {
        files_reviewed: Object.keys(filesToReview),
        focus_areas: review_focus,
        ai_model_used: aiReviewResult.modelUsed,
        ai_model_requested: options.ai_model || 'auto',
        analysis: aiReviewResult.content,
        severity_threshold: options.severity_threshold || 'medium',
        timestamp: new Date().toISOString(),
        model_warning: aiReviewResult.warning,
      },
      recommendations: {
        priority_fixes: [],
        suggestions: [],
        best_practices: [],
      },
    };
    
    const response = createResponse(result);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleExplainCode(args: any) {
  try {
    const { url, file_paths, explanation_type = 'overview', options = {} } = args;
    
    // Get repository info and code content
    const repoInfo = await githubService.getRepositoryInfo(url);
    let filesToExplain: Record<string, string> = {};
    
    if (file_paths && file_paths.length > 0) {
      // Get specific files
      for (const filePath of file_paths) {
        try {
          const content = await githubService.getFileContent(url, filePath);
          filesToExplain[filePath] = content;
        } catch (error) {
          // Skip files that can't be fetched
        }
      }
    } else {
      // Use key files from repository
      filesToExplain = repoInfo.keyFiles;
    }
    
    if (Object.keys(filesToExplain).length === 0) {
      throw new Error('No files found to explain');
    }
    
    // Generate AI explanation based on type
    let aiExplanation: string;
    let aiExplanationResult: { content: string; modelUsed: string; warning?: string };
    
    switch (explanation_type) {
      case 'architecture':
        aiExplanation = await openaiService.explainArchitecture(url, repoInfo);
        // For architecture, create a mock result for consistency
        aiExplanationResult = {
          content: aiExplanation,
          modelUsed: options.ai_model || 'anthropic/claude-3.5-sonnet',
          warning: undefined
        };
        break;
      case 'overview':
      case 'detailed':
      case 'tutorial':
      case 'integration':
      default:
        // Create a prompt for the specific explanation type
        const codeContext = Object.entries(filesToExplain)
          .map(([path, content]) => `--- ${path} ---\n${content}`)
          .join('\n\n');
        
        const prompt = `Please provide a ${explanation_type} explanation of this ${repoInfo.language || 'code'} repository:

Repository: ${repoInfo.name}
Description: ${repoInfo.description || 'No description'}
Language: ${repoInfo.language || 'Multiple'}

Code:
${codeContext}

Please focus on:
${options.focus_on_patterns ? '- Design patterns and architecture' : ''}
${options.include_examples ? '- Code examples and usage' : ''}
${options.include_diagrams ? '- Visual diagrams where helpful' : ''}

Target audience: ${options.target_audience || 'intermediate'}`;
        
        aiExplanationResult = await openaiService.chatWithRepository(url, prompt, undefined, options.ai_model);
        aiExplanation = aiExplanationResult.content;
        break;
    }
    
    const result = {
      repository: {
        name: repoInfo.name,
        description: repoInfo.description,
        language: repoInfo.language,
        owner: repoInfo.owner,
      },
      explanation: {
        type: explanation_type,
        files_analyzed: Object.keys(filesToExplain),
        ai_model_used: aiExplanationResult.modelUsed,
        ai_model_requested: options.ai_model || 'auto',
        target_audience: options.target_audience || 'intermediate',
        content: aiExplanation,
        timestamp: new Date().toISOString(),
        model_warning: aiExplanationResult.warning,
      },
      metadata: {
        file_count: Object.keys(filesToExplain).length,
        total_lines: Object.values(filesToExplain).reduce((sum, content) => sum + content.split('\n').length, 0),
      },
    };
    
    const response = createResponse(result);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

async function handleSuggestImprovements(args: any) {
  try {
    const { url, file_paths, refactoring_goals = ['modernize', 'maintainability'], target_framework, options = {} } = args;
    
    // Get repository info and code content
    const repoInfo = await githubService.getRepositoryInfo(url);
    let filesToRefactor: Record<string, string> = {};
    
    if (file_paths && file_paths.length > 0) {
      // Get specific files
      for (const filePath of file_paths) {
        try {
          const content = await githubService.getFileContent(url, filePath);
          filesToRefactor[filePath] = content;
        } catch (error) {
          // Skip files that can't be fetched
        }
      }
    } else {
      // Use key files from repository
      filesToRefactor = repoInfo.keyFiles;
    }
    
    if (Object.keys(filesToRefactor).length === 0) {
      throw new Error('No files found to analyze for refactoring');
    }
    
    // Generate AI refactoring suggestions
    const targetProject = {
      framework: target_framework || 'Not specified',
      language: repoInfo.language || 'javascript',
      constraints: [],
      timeline: 'Not specified',
    };
    
    const aiSuggestionsResult = await openaiService.suggestRefactoringPlan(url, targetProject, refactoring_goals, options.ai_model);
    
    const result = {
      repository: {
        name: repoInfo.name,
        description: repoInfo.description,
        language: repoInfo.language,
        owner: repoInfo.owner,
      },
      refactoring: {
        goals: refactoring_goals,
        target_framework: target_framework,
        files_analyzed: Object.keys(filesToRefactor),
        ai_model_used: aiSuggestionsResult.modelUsed,
        ai_model_requested: options.ai_model || 'auto',
        suggestions: aiSuggestionsResult.content,
        priority_level: options.priority_level || 'medium',
        timestamp: new Date().toISOString(),
        model_warning: aiSuggestionsResult.warning,
      },
      metadata: {
        file_count: Object.keys(filesToRefactor).length,
        total_lines: Object.values(filesToRefactor).reduce((sum, content) => sum + content.split('\n').length, 0),
        estimated_effort: options.estimate_effort ? 'Will be provided by AI' : null,
      },
    };
    
    const response = createResponse(result);
    return formatToolResponse(response);
  } catch (error) {
    const response = createResponse(null, error);
    return formatToolResponse(response);
  }
}

// Start the server
async function main() {
  const transport = new StdioServerTransport();
  await server.connect(transport);
}

main().catch(console.error);
```

--------------------------------------------------------------------------------
/src/services/github.ts:
--------------------------------------------------------------------------------

```typescript
import { Octokit } from '@octokit/rest';
import { GitHubRepoInfo, FileNode, DependencyInfo, RepositoryAnalysis } from '../types/index.js';

export class GitHubService {
  private octokit: Octokit;
  private isAuthenticated: boolean;
  private cache: Map<string, any> = new Map();
  private cacheTimeout: number = 5 * 60 * 1000; // 5 minutes

  constructor() {
    const token = process.env.GITHUB_TOKEN || process.env.GITHUB_API_KEY;
    
    // Initialize with or without authentication
    this.octokit = new Octokit({
      auth: token,
    });
    
    this.isAuthenticated = !!token;
    
    if (!this.isAuthenticated) {
      console.warn('GitHub token not provided. Using public API with rate limits.');
    }
  }

  private getCacheKey(method: string, params: any): string {
    return `${method}:${JSON.stringify(params)}`;
  }

  private getCachedResult<T>(key: string): T | null {
    const cached = this.cache.get(key);
    if (cached && Date.now() - cached.timestamp < this.cacheTimeout) {
      return cached.data;
    }
    return null;
  }

  private setCachedResult<T>(key: string, data: T): void {
    this.cache.set(key, {
      data,
      timestamp: Date.now(),
    });
  }

  private async withRetry<T>(
    operation: () => Promise<T>,
    maxRetries: number = 3,
    delay: number = 1000
  ): Promise<T> {
    for (let i = 0; i < maxRetries; i++) {
      try {
        return await operation();
      } catch (error: any) {
        if (error.status === 403 && error.response?.headers?.['x-ratelimit-remaining'] === '0') {
          const resetTime = parseInt(error.response.headers['x-ratelimit-reset']) * 1000;
          const waitTime = resetTime - Date.now();
          
          if (waitTime > 0 && waitTime < 5 * 60 * 1000) { // Only wait if less than 5 minutes
            console.log(`Rate limit exceeded. Waiting ${Math.ceil(waitTime / 1000)} seconds...`);
            await new Promise(resolve => setTimeout(resolve, waitTime));
            continue;
          }
        }
        
        if (i === maxRetries - 1) {
          throw error;
        }
        
        // Exponential backoff
        await new Promise(resolve => setTimeout(resolve, delay * Math.pow(2, i)));
      }
    }
    
    throw new Error('Max retries exceeded');
  }

  async analyzeRepository(url: string): Promise<RepositoryAnalysis> {
    const info = await this.getRepositoryInfo(url);
    const dependencies = await this.analyzeDependencies(url);
    
    return {
      info,
      dependencies,
      architecture: await this.analyzeArchitecture(url, info),
      codeQuality: await this.analyzeCodeQuality(url, info),
      refactoringPotential: await this.analyzeRefactoringPotential(url, info),
    };
  }

  async getRepositoryInfo(url: string): Promise<GitHubRepoInfo> {
    const { owner, repo } = this.parseGitHubUrl(url);
    const cacheKey = this.getCacheKey('getRepositoryInfo', { owner, repo });
    
    // Check cache first
    const cached = this.getCachedResult<GitHubRepoInfo>(cacheKey);
    if (cached) {
      return cached;
    }

    try {
      // Get repository info with retry logic
      const { data: repoData } = await this.withRetry(() => 
        this.octokit.rest.repos.get({ owner, repo })
      );

      // Get languages with retry logic
      const { data: languages } = await this.withRetry(() => 
        this.octokit.rest.repos.listLanguages({ owner, repo })
      );

      // Get file tree with retry logic and fallback for rate limits
      let fileTree: FileNode[] = [];
      let fileCount = 0;
      let keyFiles: Record<string, string> = {};
      
      try {
        const { data: treeData } = await this.withRetry(() => 
          this.octokit.rest.git.getTree({
            owner,
            repo,
            tree_sha: repoData.default_branch,
            recursive: 'true',
          })
        );

        fileTree = this.buildFileTree(treeData.tree);
        fileCount = treeData.tree.filter(item => item.type === 'blob').length;
        
        // Fetch key files for comprehensive analysis
        console.log(`Fetching key files for ${repoData.name}...`);
        keyFiles = await this.getKeyRepositoryFiles(url, fileTree);
        console.log(`Fetched ${Object.keys(keyFiles).length} key files`);
      } catch (treeError: any) {
        // If we hit rate limits on tree API, try to get basic structure
        console.warn('Failed to fetch full file tree, falling back to basic analysis');
        
        // Try to get at least README and package.json
        try {
          const basicFiles = ['README.md', 'README.txt', 'README', 'package.json'];
          for (const fileName of basicFiles) {
            try {
              const content = await this.getFileContent(url, fileName);
              keyFiles[fileName] = content;
            } catch (fileError) {
              // Skip files that don't exist
            }
          }
        } catch (fallbackError) {
          console.warn('Failed to fetch basic files, continuing with minimal info');
        }
      }
      
      // Calculate actual line count from fetched files
      const actualLineCount = Object.values(keyFiles).reduce((total, content) => {
        return total + content.split('\n').length;
      }, 0);
      
      // Estimate total line count based on fetched files ratio
      const estimatedLineCount = actualLineCount > 0 
        ? Math.floor((actualLineCount / Math.max(1, Object.keys(keyFiles).length)) * Math.max(fileCount, 10))
        : Math.floor(Math.max(fileCount, 10) * 50);

      const result: GitHubRepoInfo = {
        name: repoData.name,
        description: repoData.description,
        owner: repoData.owner.login,
        stars: repoData.stargazers_count,
        language: repoData.language,
        languages,
        fileCount,
        lineCount: estimatedLineCount,
        fileTree,
        keyFiles,
        license: repoData.license?.name,
        defaultBranch: repoData.default_branch,
        createdAt: repoData.created_at,
        updatedAt: repoData.updated_at,
      };

      // Cache the result
      this.setCachedResult(cacheKey, result);
      return result;
    } catch (error: any) {
      if (error.status === 404) {
        throw new Error('Repository not found or not accessible');
      }
      if (error.status === 403 && error.message.includes('rate limit')) {
        throw new Error(`GitHub API rate limit exceeded. Please provide a GitHub token for higher limits. Error: ${error.message}`);
      }
      throw new Error(`Failed to fetch repository: ${error.message}`);
    }
  }

  async getFileTree(url: string, path?: string): Promise<FileNode[]> {
    const { owner, repo } = this.parseGitHubUrl(url);

    try {
      const { data: repoData } = await this.octokit.rest.repos.get({
        owner,
        repo,
      });

      const { data: treeData } = await this.octokit.rest.git.getTree({
        owner,
        repo,
        tree_sha: repoData.default_branch,
        recursive: 'true',
      });

      const fileTree = this.buildFileTree(treeData.tree);
      
      if (path) {
        return this.filterTreeByPath(fileTree, path);
      }
      
      return fileTree;
    } catch (error: any) {
      throw new Error(`Failed to fetch file tree: ${error.message}`);
    }
  }

  async getFileContent(url: string, filePath: string): Promise<string> {
    const { owner, repo } = this.parseGitHubUrl(url);
    const cacheKey = this.getCacheKey('getFileContent', { owner, repo, filePath });
    
    // Check cache first
    const cached = this.getCachedResult<string>(cacheKey);
    if (cached) {
      return cached;
    }

    try {
      const { data } = await this.withRetry(() => 
        this.octokit.rest.repos.getContent({
          owner,
          repo,
          path: filePath,
        })
      );

      if ('content' in data) {
        const content = Buffer.from(data.content, 'base64').toString('utf-8');
        this.setCachedResult(cacheKey, content);
        return content;
      }
      
      throw new Error('File content not available');
    } catch (error: any) {
      if (error.status === 404) {
        throw new Error(`File not found: ${filePath}`);
      }
      if (error.status === 403 && error.message.includes('rate limit')) {
        throw new Error(`GitHub API rate limit exceeded. Please provide a GitHub token for higher limits. Error: ${error.message}`);
      }
      throw new Error(`Failed to fetch file content: ${error.message}`);
    }
  }

  async getKeyFiles(url: string): Promise<Record<string, string>> {
    const fileTree = await this.getFileTree(url);
    return await this.getKeyRepositoryFiles(url, fileTree);
  }

  async analyzeDependencies(url: string): Promise<DependencyInfo[]> {
    const dependencies: DependencyInfo[] = [];
    
    try {
      // Check for package.json
      try {
        const packageJson = await this.getFileContent(url, 'package.json');
        const pkg = JSON.parse(packageJson);
        
        // Add regular dependencies
        if (pkg.dependencies) {
          for (const [name, version] of Object.entries(pkg.dependencies)) {
            dependencies.push({
              name,
              version: version as string,
              type: 'dependency',
              source: 'package.json',
            });
          }
        }
        
        // Add dev dependencies
        if (pkg.devDependencies) {
          for (const [name, version] of Object.entries(pkg.devDependencies)) {
            dependencies.push({
              name,
              version: version as string,
              type: 'devDependency',
              source: 'package.json',
            });
          }
        }
        
        // Add peer dependencies
        if (pkg.peerDependencies) {
          for (const [name, version] of Object.entries(pkg.peerDependencies)) {
            dependencies.push({
              name,
              version: version as string,
              type: 'peerDependency',
              source: 'package.json',
            });
          }
        }
      } catch (error) {
        // package.json not found, continue with other dependency files
      }
      
      // Check for requirements.txt
      try {
        const requirementsTxt = await this.getFileContent(url, 'requirements.txt');
        const lines = requirementsTxt.split('\n').filter(line => line.trim() && !line.startsWith('#'));
        
        for (const line of lines) {
          const match = line.match(/^([^=><]+)([=><]=?.*)?$/);
          if (match) {
            dependencies.push({
              name: match[1].trim(),
              version: match[2] || '*',
              type: 'dependency',
              source: 'requirements.txt',
            });
          }
        }
      } catch (error) {
        // requirements.txt not found
      }
      
      // Add more dependency file parsers as needed (Gemfile, Cargo.toml, etc.)
      
    } catch (error: any) {
      console.error('Error analyzing dependencies:', error.message);
    }
    
    return dependencies;
  }

  private async analyzeArchitecture(url: string, info: GitHubRepoInfo): Promise<any> {
    // Analyze architecture patterns based on file structure and content
    const patterns: string[] = [];
    const frameworks: string[] = [];
    
    // Detect frameworks based on dependencies and file patterns
    const keyFiles = info.keyFiles;
    
    // Check for React
    if (keyFiles['package.json']?.includes('react')) {
      frameworks.push('React');
    }
    
    // Check for Vue
    if (keyFiles['package.json']?.includes('vue')) {
      frameworks.push('Vue');
    }
    
    // Check for Angular
    if (keyFiles['package.json']?.includes('@angular')) {
      frameworks.push('Angular');
    }
    
    // Check for Express
    if (keyFiles['package.json']?.includes('express')) {
      frameworks.push('Express');
    }
    
    // Detect patterns based on file structure
    const fileTree = info.fileTree;
    const folders = this.extractFolders(fileTree);
    
    // Check for MVC pattern
    if (folders.includes('models') && folders.includes('views') && folders.includes('controllers')) {
      patterns.push('MVC');
    }
    
    // Check for component-based architecture
    if (folders.includes('components')) {
      patterns.push('Component-based');
    }
    
    // Check for layered architecture
    if (folders.includes('services') && folders.includes('models')) {
      patterns.push('Layered');
    }
    
    return {
      patterns,
      frameworks,
      structure: this.analyzeProjectStructure(fileTree),
      entryPoints: this.findEntryPoints(keyFiles),
      configFiles: this.findConfigFiles(keyFiles),
      testFiles: this.findTestFiles(fileTree),
      documentationFiles: this.findDocumentationFiles(keyFiles),
    };
  }

  private async analyzeCodeQuality(url: string, info: GitHubRepoInfo): Promise<any> {
    // Basic code quality analysis
    const keyFiles = info.keyFiles;
    const codeSmells: string[] = [];
    
    // Check for common code smells
    for (const [filePath, content] of Object.entries(keyFiles)) {
      if (content.length > 10000) {
        codeSmells.push(`Large file: ${filePath}`);
      }
      
      if (content.includes('TODO') || content.includes('FIXME')) {
        codeSmells.push(`TODO/FIXME found in: ${filePath}`);
      }
      
      // Check for long lines
      const longLines = content.split('\n').filter(line => line.length > 120);
      if (longLines.length > 5) {
        codeSmells.push(`Long lines in: ${filePath}`);
      }
    }
    
    return {
      complexity: this.calculateComplexity(keyFiles),
      maintainability: this.calculateMaintainability(keyFiles),
      duplicateCode: this.detectDuplicateCode(keyFiles),
      codeSmells,
    };
  }

  private async analyzeRefactoringPotential(url: string, info: GitHubRepoInfo): Promise<any> {
    const keyFiles = info.keyFiles;
    const extractableComponents: any[] = [];
    const reusableUtilities: any[] = [];
    const modernizationOpportunities: any[] = [];
    
    // Analyze files for refactoring potential
    for (const [filePath, content] of Object.entries(keyFiles)) {
      // Look for extractable components
      if (filePath.includes('component') || filePath.includes('Component')) {
        extractableComponents.push({
          name: this.extractComponentName(filePath),
          path: filePath,
          type: 'component',
          dependencies: this.extractDependencies(content),
          complexity: this.calculateFileComplexity(content),
          reusabilityScore: this.calculateReusabilityScore(content),
          description: this.extractDescription(content),
        });
      }
      
      // Look for utility functions
      if (filePath.includes('util') || filePath.includes('helper')) {
        reusableUtilities.push({
          name: this.extractUtilityName(filePath),
          path: filePath,
          functions: this.extractFunctions(content),
          description: this.extractDescription(content),
          dependencies: this.extractDependencies(content),
        });
      }
      
      // Look for modernization opportunities
      if (content.includes('var ') && !content.includes('const ') && !content.includes('let ')) {
        modernizationOpportunities.push({
          type: 'syntax',
          description: 'Use const/let instead of var',
          files: [filePath],
          suggestion: 'Replace var declarations with const/let',
          impact: 'low',
        });
      }
    }
    
    return {
      extractableComponents,
      reusableUtilities,
      configurationFiles: this.findConfigFiles(keyFiles),
      boilerplateCode: this.findBoilerplateCode(keyFiles),
      modernizationOpportunities,
    };
  }

  private parseGitHubUrl(url: string): { owner: string; repo: string } {
    const match = url.match(/github\.com\/([^\/]+)\/([^\/]+)/);
    if (!match) {
      throw new Error('Invalid GitHub URL format');
    }
    return { owner: match[1], repo: match[2] };
  }

  private buildFileTree(gitTree: any[]): FileNode[] {
    const tree: FileNode[] = [];
    const pathMap = new Map();

    // Sort by path to ensure proper ordering
    const sortedTree = gitTree.sort((a, b) => a.path.localeCompare(b.path));

    for (const item of sortedTree) {
      const pathParts = item.path.split('/');
      let currentLevel = tree;
      let currentPath = '';

      for (let i = 0; i < pathParts.length; i++) {
        const part = pathParts[i];
        currentPath = currentPath ? `${currentPath}/${part}` : part;
        
        let existingItem = currentLevel.find(node => node.name === part);
        
        if (!existingItem) {
          const isFile = i === pathParts.length - 1 && item.type === 'blob';
          existingItem = {
            name: part,
            path: currentPath,
            type: isFile ? 'file' : 'directory',
            children: isFile ? undefined : [],
            size: isFile ? item.size : undefined,
            sha: item.sha,
          };
          currentLevel.push(existingItem);
        }
        
        if (existingItem.children) {
          currentLevel = existingItem.children;
        }
      }
    }

    return tree;
  }

  private filterTreeByPath(tree: FileNode[], path: string): FileNode[] {
    const pathParts = path.split('/');
    let currentLevel = tree;
    
    for (const part of pathParts) {
      const found = currentLevel.find(node => node.name === part);
      if (!found || !found.children) {
        return [];
      }
      currentLevel = found.children;
    }
    
    return currentLevel;
  }

  private async getKeyRepositoryFiles(url: string, fileTree: FileNode[]): Promise<Record<string, string>> {
    const keyFiles: Record<string, string> = {};
    
    // Priority files to include for comprehensive analysis
    const priorityPatterns = [
      /^README\.md$/i,
      /^README\.txt$/i,
      /^CONTRIBUTING\.md$/i,
      /^LICENSE$/i,
      /^package\.json$/i,
      /^pyproject\.toml$/i,
      /^requirements\.txt$/i,
      /^Cargo\.toml$/i,
      /^go\.mod$/i,
      /^pom\.xml$/i,
      /^build\.gradle$/i,
      /^Dockerfile$/i,
      /^docker-compose\.yml$/i,
      /^\.gitignore$/i,
      /^tsconfig\.json$/i,
      /^webpack\.config\./i,
      /^vite\.config\./i,
      /^next\.config\./i,
      /^tailwind\.config\./i,
    ];

    // Get all files, prioritizing smaller ones and key configuration files
    const allFiles: Array<{
      path: string;
      size: number;
      priority: number;
      isSmall: boolean;
    }> = [];
    
    const collectFiles = (nodes: FileNode[], currentPath = '') => {
      for (const node of nodes) {
        if (node.type === 'file') {
          const filePath = currentPath ? `${currentPath}/${node.name}` : node.name;
          const isHighPriority = priorityPatterns.some(pattern => pattern.test(node.name));
          const isSmallFile = (node.size || 0) < 10000; // Files under 10KB
          const isCodeFile = /\.(js|ts|jsx|tsx|py|java|cpp|c|h|go|rs|php|rb|swift|kt|dart)$/i.test(node.name);
          
          allFiles.push({
            path: filePath,
            size: node.size || 0,
            priority: isHighPriority ? 3 : (isCodeFile ? 2 : 1),
            isSmall: isSmallFile
          });
        } else if (node.children) {
          const newPath = currentPath ? `${currentPath}/${node.name}` : node.name;
          collectFiles(node.children, newPath);
        }
      }
    };
    
    collectFiles(fileTree);
    
    // Sort by priority, then by size (smaller first)
    allFiles.sort((a, b) => {
      if (a.priority !== b.priority) return b.priority - a.priority;
      return a.size - b.size;
    });

    // Fetch files up to ~500KB total content to stay within reasonable limits
    let totalSize = 0;
    const maxTotalSize = 500000; // 500KB
    
    for (const file of allFiles) {
      if (totalSize + file.size > maxTotalSize && Object.keys(keyFiles).length > 10) {
        break; // Stop if we've reached size limit and have enough files
      }
      
      try {
        const content = await this.getFileContent(url, file.path);
        keyFiles[file.path] = content;
        totalSize += content.length;
        
        // Always include high priority files regardless of size constraints
        if (file.priority < 3 && Object.keys(keyFiles).length > 20) {
          break; // Limit to ~20 files for non-priority files
        }
      } catch (error) {
        // Skip files that can't be fetched (binary, too large, etc.)
        continue;
      }
    }
    
    return keyFiles;
  }

  // Helper methods for analysis
  private extractFolders(tree: FileNode[]): string[] {
    const folders: string[] = [];
    const traverse = (nodes: FileNode[]) => {
      for (const node of nodes) {
        if (node.type === 'directory') {
          folders.push(node.name);
          if (node.children) {
            traverse(node.children);
          }
        }
      }
    };
    traverse(tree);
    return folders;
  }

  private analyzeProjectStructure(fileTree: FileNode[]): any {
    const folders = this.extractFolders(fileTree);
    
    // Determine project type
    let type = 'single-package';
    if (folders.includes('packages') || folders.includes('apps')) {
      type = 'monorepo';
    } else if (folders.includes('lib') && folders.includes('dist')) {
      type = 'multi-package';
    }
    
    // Identify common folder purposes
    const folderMapping: Record<string, string> = {};
    if (folders.includes('src')) folderMapping.src = 'src';
    if (folders.includes('lib')) folderMapping.src = 'lib';
    if (folders.includes('test') || folders.includes('tests')) folderMapping.tests = folders.includes('test') ? 'test' : 'tests';
    if (folders.includes('docs') || folders.includes('documentation')) folderMapping.docs = folders.includes('docs') ? 'docs' : 'documentation';
    if (folders.includes('config') || folders.includes('configs')) folderMapping.config = folders.includes('config') ? 'config' : 'configs';
    if (folders.includes('build') || folders.includes('dist')) folderMapping.build = folders.includes('build') ? 'build' : 'dist';
    if (folders.includes('public') || folders.includes('static')) folderMapping.public = folders.includes('public') ? 'public' : 'static';
    
    return {
      type,
      folders: folderMapping,
    };
  }

  private findEntryPoints(keyFiles: Record<string, string>): string[] {
    const entryPoints: string[] = [];
    
    for (const filePath of Object.keys(keyFiles)) {
      if (filePath.includes('index.') || filePath.includes('main.') || filePath.includes('app.')) {
        entryPoints.push(filePath);
      }
    }
    
    return entryPoints;
  }

  private findConfigFiles(keyFiles: Record<string, string>): string[] {
    const configFiles: string[] = [];
    
    for (const filePath of Object.keys(keyFiles)) {
      if (filePath.includes('config') || filePath.includes('.config.') || 
          filePath.includes('webpack') || filePath.includes('vite') ||
          filePath.includes('tsconfig') || filePath.includes('babel') ||
          filePath.includes('eslint') || filePath.includes('prettier')) {
        configFiles.push(filePath);
      }
    }
    
    return configFiles;
  }

  private findTestFiles(fileTree: FileNode[]): string[] {
    const testFiles: string[] = [];
    
    const traverse = (nodes: FileNode[]) => {
      for (const node of nodes) {
        if (node.type === 'file' && (
          node.name.includes('.test.') || 
          node.name.includes('.spec.') || 
          node.path.includes('test') || 
          node.path.includes('spec')
        )) {
          testFiles.push(node.path);
        } else if (node.children) {
          traverse(node.children);
        }
      }
    };
    
    traverse(fileTree);
    return testFiles;
  }

  private findDocumentationFiles(keyFiles: Record<string, string>): string[] {
    const docFiles: string[] = [];
    
    for (const filePath of Object.keys(keyFiles)) {
      if (filePath.includes('README') || filePath.includes('CHANGELOG') || 
          filePath.includes('CONTRIBUTING') || filePath.includes('.md') ||
          filePath.includes('docs/') || filePath.includes('documentation/')) {
        docFiles.push(filePath);
      }
    }
    
    return docFiles;
  }

  private calculateComplexity(keyFiles: Record<string, string>): number {
    // Basic complexity calculation
    let totalComplexity = 0;
    let fileCount = 0;
    
    for (const content of Object.values(keyFiles)) {
      totalComplexity += this.calculateFileComplexity(content);
      fileCount++;
    }
    
    return fileCount > 0 ? totalComplexity / fileCount : 0;
  }

  private calculateFileComplexity(content: string): number {
    // Count cyclomatic complexity indicators
    const complexityPatterns = [
      { pattern: /\bif\b/g, type: 'if' },
      { pattern: /\belse\b/g, type: 'else' },
      { pattern: /\bfor\b/g, type: 'for' },
      { pattern: /\bwhile\b/g, type: 'while' },
      { pattern: /\bswitch\b/g, type: 'switch' },
      { pattern: /\bcase\b/g, type: 'case' },
      { pattern: /\bcatch\b/g, type: 'catch' },
      { pattern: /\bthrow\b/g, type: 'throw' },
      { pattern: /&&/g, type: '&&' },
      { pattern: /\|\|/g, type: '||' },
      { pattern: /\?/g, type: '?' },
      { pattern: /:/g, type: ':' },
      { pattern: /\breturn\b/g, type: 'return' }
    ];
    
    let complexity = 1; // Base complexity
    
    for (const { pattern } of complexityPatterns) {
      try {
        const matches = content.match(pattern);
        if (matches) {
          complexity += matches.length;
        }
      } catch (error) {
        // Skip invalid regex patterns
        console.warn(`Invalid regex pattern: ${pattern}`);
      }
    }
    
    return complexity;
  }

  private calculateMaintainability(keyFiles: Record<string, string>): number {
    // Basic maintainability score
    let score = 100;
    
    for (const [filePath, content] of Object.entries(keyFiles)) {
      const lines = content.split('\n');
      
      // Penalize long files
      if (lines.length > 500) score -= 10;
      
      // Penalize long lines
      const longLines = lines.filter(line => line.length > 120);
      score -= longLines.length * 0.1;
      
      // Penalize lack of comments
      const commentLines = lines.filter(line => line.trim().startsWith('//') || line.trim().startsWith('/*'));
      if (commentLines.length / lines.length < 0.1) score -= 5;
    }
    
    return Math.max(0, score);
  }

  private detectDuplicateCode(keyFiles: Record<string, string>): number {
    // Basic duplicate detection
    const codeBlocks = new Map<string, number>();
    
    for (const content of Object.values(keyFiles)) {
      const lines = content.split('\n');
      
      // Look for duplicate blocks of 3+ lines
      for (let i = 0; i < lines.length - 2; i++) {
        const block = lines.slice(i, i + 3).join('\n').trim();
        if (block.length > 50) { // Only consider substantial blocks
          codeBlocks.set(block, (codeBlocks.get(block) || 0) + 1);
        }
      }
    }
    
    let duplicateCount = 0;
    for (const count of codeBlocks.values()) {
      if (count > 1) duplicateCount++;
    }
    
    return duplicateCount;
  }

  private extractComponentName(filePath: string): string {
    const parts = filePath.split('/');
    const fileName = parts[parts.length - 1];
    return fileName.replace(/\.(js|ts|jsx|tsx)$/, '');
  }

  private extractUtilityName(filePath: string): string {
    const parts = filePath.split('/');
    const fileName = parts[parts.length - 1];
    return fileName.replace(/\.(js|ts)$/, '');
  }

  private extractDependencies(content: string): string[] {
    const dependencies: string[] = [];
    const importRegex = /import\s+.*?\s+from\s+['"]([^'"]+)['"]/g;
    const requireRegex = /require\(['"]([^'"]+)['"]\)/g;
    
    let match;
    while ((match = importRegex.exec(content)) !== null) {
      dependencies.push(match[1]);
    }
    
    while ((match = requireRegex.exec(content)) !== null) {
      dependencies.push(match[1]);
    }
    
    return dependencies;
  }

  private extractFunctions(content: string): string[] {
    const functions: string[] = [];
    const functionRegex = /function\s+(\w+)|const\s+(\w+)\s*=\s*\(|(\w+)\s*:\s*\(/g;
    
    let match;
    while ((match = functionRegex.exec(content)) !== null) {
      const functionName = match[1] || match[2] || match[3];
      if (functionName) {
        functions.push(functionName);
      }
    }
    
    return functions;
  }

  private extractDescription(content: string): string {
    // Look for JSDoc or comment descriptions
    const jsdocMatch = content.match(/\/\*\*\s*\n\s*\*\s*([^*]+)/);
    if (jsdocMatch) {
      return jsdocMatch[1].trim();
    }
    
    const commentMatch = content.match(/\/\/\s*(.+)/);
    if (commentMatch) {
      return commentMatch[1].trim();
    }
    
    return 'No description available';
  }

  private calculateReusabilityScore(content: string): number {
    let score = 50; // Base score
    
    // Increase score for pure functions
    if (content.includes('function') && !content.includes('this.') && !content.includes('document.')) {
      score += 20;
    }
    
    // Increase score for TypeScript types
    if (content.includes('interface') || content.includes('type ')) {
      score += 15;
    }
    
    // Increase score for good documentation
    if (content.includes('/**') || content.includes('//')) {
      score += 10;
    }
    
    // Decrease score for dependencies on specific libraries
    if (content.includes('import') && content.includes('react')) {
      score -= 10;
    }
    
    return Math.min(100, Math.max(0, score));
  }

  private findBoilerplateCode(keyFiles: Record<string, string>): string[] {
    const boilerplate: string[] = [];
    
    for (const [filePath, content] of Object.entries(keyFiles)) {
      // Look for common boilerplate patterns
      if (content.includes('export default') && content.includes('import React')) {
        boilerplate.push(`React component boilerplate in ${filePath}`);
      }
      
      if (content.includes('app.use(') && content.includes('express')) {
        boilerplate.push(`Express setup boilerplate in ${filePath}`);
      }
      
      if (content.includes('describe(') && content.includes('it(')) {
        boilerplate.push(`Test boilerplate in ${filePath}`);
      }
    }
    
    return boilerplate;
  }

  // Additional methods for MCP tool handlers
  async searchInRepository(url: string, query: string, options: any = {}): Promise<any> {
    const keyFiles = await this.getKeyFiles(url);
    const searchResults = [];
    
    for (const [filePath, content] of Object.entries(keyFiles)) {
      const lines = content.split('\n');
      let lineNumber = 0;
      
      for (const line of lines) {
        lineNumber++;
        if (line.toLowerCase().includes(query.toLowerCase())) {
          searchResults.push({
            file: filePath,
            line: lineNumber,
            content: line.trim(),
            context: options.include_context ? lines.slice(Math.max(0, lineNumber - 3), lineNumber + 3) : [],
            type: 'exact',
          });
        }
      }
    }
    
    return {
      query,
      results: searchResults,
      totalMatches: searchResults.length,
      filesSearched: Object.keys(keyFiles).length,
      searchTime: Date.now(),
    };
  }

  async analyzeCodeStructure(url: string, file_paths?: string[], options: any = {}): Promise<any> {
    const keyFiles = await this.getKeyFiles(url);
    const codeStructure: any = {
      functions: [],
      classes: [],
      imports: [],
      exports: [],
      complexity: {
        cyclomatic: 0,
        cognitive: 0,
        maintainability: 0,
      },
    };
    
    for (const [filePath, content] of Object.entries(keyFiles)) {
      if (file_paths && !file_paths.includes(filePath)) continue;
      
      // Extract functions
      const functions = this.extractFunctions(content);
      codeStructure.functions.push(...functions.map(func => ({
        name: func,
        signature: `function ${func}()`,
        startLine: 0,
        endLine: 0,
        complexity: 1,
        parameters: [],
        documentation: '',
      })));
      
      // Extract imports
      const imports = this.extractDependencies(content);
      codeStructure.imports.push(...imports.map(imp => ({
        source: imp,
        imports: [],
        type: 'import',
        isExternal: !imp.startsWith('.'),
      })));
      
      // Calculate complexity
      codeStructure.complexity.cyclomatic += this.calculateFileComplexity(content);
    }
    
    codeStructure.complexity.maintainability = this.calculateMaintainability(keyFiles);
    
    return codeStructure;
  }

  async calculateMetrics(url: string, options: any = {}): Promise<any> {
    const keyFiles = await this.getKeyFiles(url);
    const repoInfo = await this.getRepositoryInfo(url);
    
    return {
      complexity: {
        cyclomatic: this.calculateComplexity(keyFiles),
        cognitive: this.calculateComplexity(keyFiles),
        maintainability: this.calculateMaintainability(keyFiles),
      },
      quality: {
        score: this.calculateMaintainability(keyFiles),
        issues: [],
      },
      size: {
        lines: repoInfo.lineCount,
        files: repoInfo.fileCount,
        functions: this.extractFunctions(Object.values(keyFiles).join('\n')).length,
        classes: 0,
      },
      dependencies: {
        external: (await this.analyzeDependencies(url)).length,
        internal: 0,
        circular: [],
      },
    };
  }

  async analyzeArchitecturePublic(url: string, options: any = {}): Promise<any> {
    const repoInfo = await this.getRepositoryInfo(url);
    return this.analyzeArchitecture(url, repoInfo);
  }

  async compareRepositories(implementations: any[], comparison_criteria: string[], options: any = {}): Promise<any> {
    const comparisons = [];
    
    for (const impl of implementations) {
      const analysis = await this.analyzeRepository(impl.url);
      comparisons.push({
        name: impl.name,
        url: impl.url,
        analysis,
      });
    }
    
    return {
      implementations: comparisons,
      criteria: comparison_criteria,
      summary: 'Repository comparison completed',
    };
  }

  async validateCodeQuality(url: string, validation_types: string[], options: any = {}): Promise<any> {
    const repoInfo = await this.getRepositoryInfo(url);
    const codeQuality = await this.analyzeCodeQuality(url, repoInfo);
    
    return {
      validations: validation_types.map(type => ({
        type,
        status: 'passed',
        issues: [],
        score: 80,
      })),
      overall: {
        score: codeQuality.maintainability,
        issues: codeQuality.codeSmells,
        recommendations: ['Add more tests', 'Improve documentation'],
      },
    };
  }

  async checkApiLimits(): Promise<any> {
    try {
      const { data } = await this.octokit.rest.rateLimit.get();
      return {
        status: 'healthy',
        core: data.resources.core,
        search: data.resources.search,
        authenticated: this.isAuthenticated,
      };
    } catch (error: any) {
      return {
        status: 'error',
        error: error.message,
        authenticated: this.isAuthenticated,
      };
    }
  }
}
```
Page 2/3FirstPrevNextLast