#
tokens: 47750/50000 8/274 files (page 13/20)
lines: off (toggle) GitHub
raw markdown copy
This is page 13 of 20. Use http://codebase.md/tosin2013/documcp?page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── 001-mcp-server-architecture.md
│   │   ├── 002-repository-analysis-engine.md
│   │   ├── 003-static-site-generator-recommendation-engine.md
│   │   ├── 004-diataxis-framework-integration.md
│   │   ├── 005-github-pages-deployment-automation.md
│   │   ├── 006-mcp-tools-api-design.md
│   │   ├── 007-mcp-prompts-and-resources-integration.md
│   │   ├── 008-intelligent-content-population-engine.md
│   │   ├── 009-content-accuracy-validation-framework.md
│   │   ├── 010-mcp-resource-pattern-redesign.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── check-documentation-links.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── ast-analyzer.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── permission-checker.ts
│   │   └── sitemap-generator.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/src/prompts/technical-writer-prompts.ts:
--------------------------------------------------------------------------------

```typescript
import { promises as fs } from "fs";
import { join } from "path";

export interface ProjectContext {
  projectType: string;
  languages: string[];
  frameworks: string[];
  hasTests: boolean;
  hasCI: boolean;
  readmeExists: boolean;
  packageManager?: string;
  documentationGaps: string[];
}

export interface PromptMessage {
  role: "user" | "assistant" | "system";
  content: {
    type: "text";
    text: string;
  };
}

export async function analyzeProjectContext(
  projectPath: string,
): Promise<ProjectContext> {
  const context: ProjectContext = {
    projectType: "unknown",
    languages: [],
    frameworks: [],
    hasTests: false,
    hasCI: false,
    readmeExists: false,
    documentationGaps: [],
  };

  // Check for README
  context.readmeExists = await fileExists(join(projectPath, "README.md"));

  // Analyze package.json for Node.js projects
  const packageJsonPath = join(projectPath, "package.json");
  if (await fileExists(packageJsonPath)) {
    try {
      const packageJson = JSON.parse(
        await fs.readFile(packageJsonPath, "utf-8"),
      );
      const deps = {
        ...packageJson.dependencies,
        ...packageJson.devDependencies,
      };

      context.projectType = "node_application";
      context.languages.push("JavaScript");

      // Detect frameworks
      if (deps["react"]) context.frameworks.push("React");
      if (deps["vue"]) context.frameworks.push("Vue");
      if (deps["angular"]) context.frameworks.push("Angular");
      if (deps["express"]) context.frameworks.push("Express");
      if (deps["next"]) context.frameworks.push("Next.js");
      if (deps["nuxt"]) context.frameworks.push("Nuxt.js");
      if (deps["svelte"]) context.frameworks.push("Svelte");
      if (deps["typescript"]) context.languages.push("TypeScript");

      // Detect package manager
      if (await fileExists(join(projectPath, "yarn.lock"))) {
        context.packageManager = "yarn";
      } else if (await fileExists(join(projectPath, "pnpm-lock.yaml"))) {
        context.packageManager = "pnpm";
      } else {
        context.packageManager = "npm";
      }
    } catch (error) {
      // If package.json exists but can't be parsed, continue with other detections
      console.warn("Failed to parse package.json:", error);
    }
  }

  // Check for Python projects
  if (
    (await fileExists(join(projectPath, "requirements.txt"))) ||
    (await fileExists(join(projectPath, "pyproject.toml"))) ||
    (await fileExists(join(projectPath, "setup.py")))
  ) {
    context.projectType = "python_application";
    context.languages.push("Python");
  }

  // Check for Go projects
  if (await fileExists(join(projectPath, "go.mod"))) {
    context.projectType = "go_application";
    context.languages.push("Go");
  }

  // Check for Rust projects
  if (await fileExists(join(projectPath, "Cargo.toml"))) {
    context.projectType = "rust_application";
    context.languages.push("Rust");
  }

  // Check for tests
  context.hasTests = await hasTestFiles(projectPath);

  // Check for CI/CD
  context.hasCI = await hasCIConfig(projectPath);

  // Identify documentation gaps
  context.documentationGaps = await identifyDocumentationGaps(
    projectPath,
    context,
  );

  return context;
}

export async function generateTechnicalWriterPrompts(
  promptType: string,
  projectPath: string,
  args: Record<string, any> = {},
): Promise<PromptMessage[]> {
  const context = await analyzeProjectContext(projectPath);

  switch (promptType) {
    case "tutorial-writer":
      return generateTutorialWriterPrompt(context, args);
    case "howto-guide-writer":
      return generateHowToGuideWriterPrompt(context, args);
    case "reference-writer":
      return generateReferenceWriterPrompt(context, args);
    case "explanation-writer":
      return generateExplanationWriterPrompt(context, args);
    case "diataxis-organizer":
      return generateDiataxisOrganizerPrompt(context, args);
    case "readme-optimizer":
      return generateReadmeOptimizerPrompt(context, args);
    case "analyze-and-recommend":
      return generateAnalyzeAndRecommendPrompt(context, args);
    case "setup-documentation":
      return generateSetupDocumentationPrompt(context, args);
    case "troubleshoot-deployment":
      return generateTroubleshootDeploymentPrompt(context, args);
    case "maintain-documentation-freshness":
      return generateMaintainDocumentationFreshnessPrompt(context, args);
    default:
      throw new Error(`Unknown prompt type: ${promptType}`);
  }
}

function generateTutorialWriterPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const targetAudience = args.target_audience || "beginners";
  const learningGoal = args.learning_goal || "get started with the project";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Create a comprehensive tutorial for a ${
          context.projectType
        } project following Diataxis framework principles.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Frameworks: ${context.frameworks.join(", ")}
- Package Manager: ${context.packageManager || "N/A"}
- Target Audience: ${targetAudience}
- Learning Goal: ${learningGoal}

**Diataxis Tutorial Requirements:**
1. Learning-oriented: Focus on helping users learn by doing
2. Step-by-step progression from simple to complex
3. Practical exercises with clear outcomes
4. Safe-to-fail environment for experimentation
5. Minimal explanation - focus on action

**Tutorial Structure:**
1. Prerequisites and setup
2. Step-by-step guided exercises
3. What you'll build/learn
4. Hands-on activities with immediate feedback
5. Next steps for continued learning

**Integration Hints:**
- Use analyze_repository for project structure insights
- Reference setup_development_environment for environment setup
- Consider validate_tutorial_steps for step verification

Please create a tutorial that teaches through guided practice:`,
      },
    },
  ];
}

function generateHowToGuideWriterPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const problemToSolve = args.problem || "common development task";
  const userExperience = args.user_experience || "intermediate";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Create a practical how-to guide for a ${
          context.projectType
        } project following Diataxis framework principles.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Frameworks: ${context.frameworks.join(", ")}
- Problem to Solve: ${problemToSolve}
- User Experience Level: ${userExperience}

**Diataxis How-to Guide Requirements:**
1. Problem-oriented: Address specific real-world problems
2. Goal-focused: Clear objective and success criteria
3. Action-oriented: Direct, actionable steps
4. Assume prior knowledge appropriate to user level
5. Practical and immediately applicable

**How-to Guide Structure:**
1. Problem statement and context
2. Prerequisites and assumptions
3. Step-by-step solution
4. Verification and testing
5. Troubleshooting common issues
6. Related tasks and variations

**Integration Hints:**
- Use analyze_codebase for understanding current implementation
- Reference best_practices for recommended approaches
- Consider validate_solution for testing guidance

Please create a how-to guide that solves real problems:`,
      },
    },
  ];
}

function generateReferenceWriterPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const referenceType = args.reference_type || "API";
  const completeness = args.completeness || "comprehensive";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Create comprehensive reference documentation for a ${
          context.projectType
        } project following Diataxis framework principles.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Frameworks: ${context.frameworks.join(", ")}
- Reference Type: ${referenceType}
- Completeness Level: ${completeness}

**Diataxis Reference Requirements:**
1. Information-oriented: Provide complete, accurate information
2. Structured and consistent organization
3. Comprehensive coverage of all features/APIs
4. Neutral tone - describe what is, not how to use
5. Easy to scan and search

**Reference Structure:**
1. Overview and organization
2. Complete feature/API listings
3. Parameters, return values, examples
4. Technical specifications
5. Cross-references and relationships
6. Version compatibility information

**Integration Hints:**
- Use analyze_api_endpoints for API documentation
- Reference code_analysis for implementation details
- Consider validate_completeness for coverage verification

Please create reference documentation that serves as the authoritative source:`,
      },
    },
  ];
}

function generateExplanationWriterPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const conceptToExplain = args.concept || "system architecture";
  const depth = args.depth || "detailed";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Create in-depth explanation documentation for a ${
          context.projectType
        } project following Diataxis framework principles.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Frameworks: ${context.frameworks.join(", ")}
- Concept to Explain: ${conceptToExplain}
- Depth Level: ${depth}

**Diataxis Explanation Requirements:**
1. Understanding-oriented: Help users understand concepts
2. Theoretical and conceptual focus
3. Provide context and background
4. Explain why things work the way they do
5. Connect ideas and show relationships

**Explanation Structure:**
1. Introduction and context
2. Core concepts and principles
3. How components relate and interact
4. Design decisions and trade-offs
5. Historical context and evolution
6. Implications and consequences

**Integration Hints:**
- Use analyze_architecture for system understanding
- Reference design_patterns for architectural insights
- Consider validate_understanding for comprehension checks

Please create explanatory content that builds deep understanding:`,
      },
    },
  ];
}

function generateDiataxisOrganizerPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const currentDocs = args.current_docs || "mixed documentation";
  const priority = args.priority || "user needs";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Organize existing documentation for a ${
          context.projectType
        } project using Diataxis framework principles.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Current Documentation: ${currentDocs}
- Organization Priority: ${priority}

**Diataxis Organization Requirements:**
1. Categorize content into four types: Tutorials, How-to guides, Reference, Explanation
2. Ensure each piece serves its intended purpose
3. Create clear navigation between content types
4. Identify gaps and overlaps
5. Establish content relationships and cross-references

**Organization Structure:**
1. Content audit and classification
2. Diataxis quadrant mapping
3. Navigation and information architecture
4. Content gap analysis
5. Cross-reference strategy
6. Migration and improvement plan

**Integration Hints:**
- Use analyze_existing_docs for current state assessment
- Reference content_classification for categorization
- Consider validate_organization for structure verification

Please organize documentation according to Diataxis principles:`,
      },
    },
  ];
}

function generateReadmeOptimizerPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const optimizationFocus = args.optimization_focus || "general";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Optimize existing README content for a ${
          context.projectType
        } project using Diataxis-aware principles.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- README Exists: ${context.readmeExists}
- Documentation Gaps: ${
          context.documentationGaps.join(", ") || "None identified"
        }
- Optimization Focus: ${optimizationFocus}

**Diataxis-Aware README Requirements:**
1. Clear content type identification (tutorial, how-to, reference, explanation)
2. Appropriate depth for each content type
3. Logical flow from learning to doing to understanding
4. Clear navigation to detailed documentation
5. Audience-appropriate entry points

**README Structure (Diataxis-organized):**
1. Quick start (tutorial-style for beginners)
2. Common tasks (how-to style for users)
3. API/feature overview (reference-style for developers)
4. Architecture overview (explanation-style for understanding)
5. Links to detailed Diataxis-organized documentation

**Integration Hints:**
- Use analyze_readme for current content analysis
- Reference diataxis_principles for content organization
- Consider validate_readme_structure for optimization verification

Please optimize the README with Diataxis awareness:`,
      },
    },
  ];
}

// Helper functions
async function fileExists(path: string): Promise<boolean> {
  try {
    await fs.access(path);
    return true;
  } catch {
    return false;
  }
}

function generateMaintainDocumentationFreshnessPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const docsPath = args.docs_path || "docs";
  const freshnessPreset = args.freshness_preset || "monthly";
  const action = args.action || "track";

  const actionDescriptions = {
    validate:
      "Initialize freshness metadata for documentation files that don't have it yet",
    track:
      "Scan all documentation for staleness and generate a freshness report",
    insights: "Analyze freshness trends over time and get recommendations",
  };

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Maintain documentation freshness for a ${
          context.projectType
        } project with automated staleness tracking.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Documentation Path: ${docsPath}
- Freshness Preset: ${freshnessPreset}
- Action: ${action} (${
          actionDescriptions[action as keyof typeof actionDescriptions] ||
          "track staleness"
        })

**Documentation Freshness Tracking:**
Documentation freshness tracking helps maintain high-quality, up-to-date documentation by:
1. Adding temporal metadata to markdown frontmatter (last_updated, last_validated)
2. Scanning documentation for staleness based on configurable thresholds
3. Providing insights and trends over time using the knowledge graph
4. Generating recommendations for which files need attention

**Available Actions:**

1. **Validate** (${action === "validate" ? "SELECTED" : "available"}):
   - Initialize freshness metadata for files without it
   - Set last_updated and last_validated timestamps
   - Link validation to git commits for traceability
   - Recommended as first step for new documentation sets

2. **Track** (${action === "track" ? "SELECTED" : "available"}):
   - Scan all documentation files for staleness
   - Categorize as: fresh, warning, stale, or critical
   - Generate comprehensive freshness report
   - Store results in knowledge graph for historical tracking

3. **Insights** (${action === "insights" ? "SELECTED" : "available"}):
   - Analyze freshness trends over time
   - Compare current vs. historical freshness scores
   - Identify chronically stale files
   - Get actionable recommendations

**Freshness Presets:**
- realtime: Minutes (for API docs, status pages)
- active: Hours (for development docs, release notes)
- recent: Days (for tutorials, getting started)
- weekly: 7 days (for how-to guides, examples)
- monthly: 30 days (for reference, architecture) - DEFAULT
- quarterly: 90 days (for explanations, background)

**Integration Tools:**
- validate_documentation_freshness: Initialize and update metadata
- track_documentation_freshness: Scan and report staleness
- update_existing_documentation: Sync docs with code changes
- sync_code_to_docs: Detect drift between code and docs

**Workflow Example:**
1. First time: Run validate_documentation_freshness to initialize metadata
2. Regular checks: Run track_documentation_freshness to monitor staleness
3. Deep analysis: Query knowledge graph for trends and insights
4. Maintenance: Update stale files and re-validate

Please ${
          actionDescriptions[action as keyof typeof actionDescriptions] ||
          "track documentation freshness"
        } and provide guidance on maintaining documentation quality:`,
      },
    },
  ];
}

async function hasTestFiles(projectPath: string): Promise<boolean> {
  try {
    const files = await fs.readdir(projectPath, { recursive: true });
    return files.some(
      (file) =>
        typeof file === "string" &&
        (file.includes("test") ||
          file.includes("spec") ||
          file.endsWith(".test.js") ||
          file.endsWith(".test.ts") ||
          file.endsWith(".spec.js") ||
          file.endsWith(".spec.ts")),
    );
  } catch {
    return false;
  }
}

async function hasCIConfig(projectPath: string): Promise<boolean> {
  const ciFiles = [
    ".github/workflows",
    ".gitlab-ci.yml",
    "circle.yml",
    ".circleci/config.yml",
    "travis.yml",
    ".travis.yml",
  ];

  for (const file of ciFiles) {
    if (await fileExists(join(projectPath, file))) {
      return true;
    }
  }
  return false;
}

async function identifyDocumentationGaps(
  projectPath: string,
  context: ProjectContext,
): Promise<string[]> {
  const gaps: string[] = [];

  if (!context.readmeExists) {
    gaps.push("readme");
  }

  // Check for common documentation files
  const docFiles = [
    "CONTRIBUTING.md",
    "CHANGELOG.md",
    "LICENSE",
    "docs/api.md",
    "docs/tutorial.md",
    "docs/installation.md",
  ];

  for (const docFile of docFiles) {
    if (!(await fileExists(join(projectPath, docFile)))) {
      gaps.push(docFile.toLowerCase().replace(".md", "").replace("docs/", ""));
    }
  }

  return gaps;
}

// Guided workflow prompt generators (ADR-007)

function generateAnalyzeAndRecommendPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const analysisDepth = args.analysis_depth || "standard";
  const preferences =
    args.preferences || "balanced approach with good community support";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Execute a complete repository analysis and SSG recommendation workflow for this project.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Frameworks: ${context.frameworks.join(", ")}
- Package Manager: ${context.packageManager || "N/A"}
- Has Tests: ${context.hasTests}
- Has CI: ${context.hasCI}
- Documentation Gaps: ${context.documentationGaps.join(", ")}

**Workflow Parameters:**
- Analysis Depth: ${analysisDepth}
- Preferences: ${preferences}

**Expected Workflow:**
1. **Repository Analysis**: Analyze project structure, dependencies, and complexity
2. **SSG Recommendation**: Recommend the best static site generator based on project characteristics
3. **Implementation Guidance**: Provide step-by-step setup instructions
4. **Best Practices**: Include security, performance, and maintenance recommendations

**Required Output Format:**
- Executive summary with key findings
- Detailed analysis results with metrics
- SSG recommendation with justification
- Implementation roadmap with priorities
- Resource requirements and timeline estimates

Please execute this workflow systematically and provide actionable recommendations.`,
      },
    },
  ];
}

function generateSetupDocumentationPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const ssgType = args.ssg_type || "recommended based on project analysis";
  const includeExamples = args.include_examples !== false;

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Create a comprehensive documentation structure with best practices for this project.

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Frameworks: ${context.frameworks.join(", ")}
- Current Documentation Gaps: ${context.documentationGaps.join(", ")}

**Setup Parameters:**
- SSG Type: ${ssgType}
- Include Examples: ${includeExamples}

**Documentation Structure Requirements:**
1. **Diataxis Framework Implementation**:
   - Tutorials: Learning-oriented content
   - How-to Guides: Problem-solving content
   - Reference: Information-oriented content
   - Explanations: Understanding-oriented content

2. **Configuration Setup**:
   - SSG configuration files
   - GitHub Pages deployment
   - Automated workflows
   - Security best practices

3. **Content Guidelines**:
   - Writing style guide
   - Contribution guidelines
   - Review processes
   - Maintenance procedures

4. **Development Integration**:
   - Build pipeline integration
   - Automated testing for docs
   - Performance monitoring
   - Analytics setup

**Required Deliverables:**
- Complete directory structure
- Configuration files with comments
- Sample content ${includeExamples ? "with examples" : "templates"}
- Deployment automation
- Maintenance runbook

Please create a production-ready documentation system that scales with the project.`,
      },
    },
  ];
}

function generateTroubleshootDeploymentPrompt(
  context: ProjectContext,
  args: Record<string, any>,
): PromptMessage[] {
  const repository = args.repository;
  const deploymentUrl = args.deployment_url || "GitHub Pages URL";
  const issueDescription =
    args.issue_description || "deployment not working as expected";

  return [
    {
      role: "user",
      content: {
        type: "text",
        text: `Diagnose and fix GitHub Pages deployment issues for this documentation project.

**Repository Information:**
- Repository: ${repository}
- Expected URL: ${deploymentUrl}
- Issue Description: ${issueDescription}

**Project Context:**
- Type: ${context.projectType}
- Languages: ${context.languages.join(", ")}
- Has CI: ${context.hasCI}

**Troubleshooting Checklist:**

1. **Repository Settings**:
   - GitHub Pages source configuration
   - Branch and folder settings
   - Custom domain setup (if applicable)
   - Repository visibility and permissions

2. **Build Configuration**:
   - GitHub Actions workflow validation
   - Build dependencies and versions
   - Output directory configuration
   - Asset and link path issues

3. **Content Issues**:
   - Markdown syntax validation
   - Link and image path verification
   - YAML frontmatter validation
   - Special character handling

4. **Deployment Workflow**:
   - Action permissions and secrets
   - Deployment job configuration
   - Artifact handling
   - Cache and dependency issues

5. **Performance and Security**:
   - Build time optimization
   - Security policy compliance
   - CDN and caching configuration
   - SSL certificate validation

**Diagnostic Approach:**
1. **Immediate Assessment**: Check current status and error messages
2. **Systematic Testing**: Validate each component step-by-step
3. **Fix Implementation**: Apply targeted solutions with validation
4. **Prevention Setup**: Implement monitoring and automated checks

**Required Output:**
- Root cause analysis
- Step-by-step fix instructions
- Validation procedures
- Prevention recommendations
- Monitoring setup guide

Please provide a comprehensive troubleshooting guide with specific, actionable solutions.`,
      },
    },
  ];
}

```

--------------------------------------------------------------------------------
/docs/adrs/006-mcp-tools-api-design.md:
--------------------------------------------------------------------------------

```markdown
---
id: 006-mcp-tools-api-design
title: "ADR-006: MCP Tools API Design and Interface Specification"
sidebar_label: "ADR-006: MCP Tools API Design"
sidebar_position: 6
documcp:
  last_updated: "2025-11-20T00:46:21.940Z"
  last_validated: "2025-11-20T00:46:21.940Z"
  auto_updated: false
  update_frequency: monthly
---

# ADR-006: MCP Tools API Design and Interface Specification

## Status

Accepted

## Context

DocuMCP must expose its functionality through a carefully designed set of MCP tools that provide comprehensive coverage of the documentation deployment workflow while maintaining clear separation of concerns, appropriate granularity, and excellent developer experience for MCP-enabled clients.

The MCP Tools API serves as the primary interface between DocuMCP's intelligence and client applications like GitHub Copilot, Claude Desktop, and other MCP-enabled development environments. This API must balance several competing concerns:

**Functional Requirements:**

- Comprehensive repository analysis capabilities
- Intelligent SSG recommendation with detailed justifications
- Automated configuration generation for multiple SSGs
- Diataxis-compliant documentation structure creation
- GitHub Pages deployment workflow generation
- Git integration for seamless deployment

**Usability Requirements:**

- Intuitive tool names and parameter structures
- Comprehensive input validation with clear error messages
- Consistent response formats across all tools
- Rich metadata for client presentation and user guidance
- Progressive disclosure of complexity (simple to advanced use cases)

**Technical Requirements:**

- Full MCP specification compliance
- Robust error handling and recovery
- Efficient parameter validation and sanitization
- Scalable architecture supporting complex multi-step workflows
- Extensible design for future functionality additions

## Decision

We will implement a comprehensive MCP Tools API consisting of six core tools that cover the complete documentation deployment workflow, with additional utility tools for advanced scenarios and troubleshooting.

### Core MCP Tools Architecture:

#### 1. Repository Analysis Tool (`analyzeRepository`)

**Purpose**: Comprehensive repository analysis and project characterization
**Scope**: Deep analysis of project structure, language ecosystems, existing documentation, and complexity assessment

#### 2. SSG Recommendation Tool (`recommendSSG`)

**Purpose**: Intelligent static site generator recommendation with detailed justifications
**Scope**: Multi-criteria decision analysis with confidence scoring and alternative options

#### 3. Configuration Generation Tool (`generateConfiguration`)

**Purpose**: Create customized SSG configuration files and directory structures
**Scope**: Template-based generation with project-specific customizations and validation

#### 4. Diataxis Structure Tool (`createDiataxisStructure`)

**Purpose**: Generate comprehensive Diataxis-compliant documentation frameworks
**Scope**: Information architecture generation with content planning and navigation design

#### 5. Deployment Workflow Tool (`generateWorkflow`)

**Purpose**: Create optimized GitHub Actions workflows for automated deployment
**Scope**: SSG-specific workflow generation with security best practices and performance optimization

#### 6. Git Integration Tool (`generateGitCommands`)

**Purpose**: Provide ready-to-execute Git commands for deployment and maintenance
**Scope**: Context-aware command generation with branch management and deployment verification

### Supporting Tools:

- `validateConfiguration`: Validate generated configurations and identify issues
- `troubleshootDeployment`: Analyze deployment failures and provide remediation guidance
- `optimizePerformance`: Analyze and optimize existing documentation site performance
- `migrateDocumentation`: Assist with migration between different SSGs or frameworks

## Alternatives Considered

### Monolithic Single Tool Approach

- **Pros**: Simpler API surface, single entry point, easier client integration
- **Cons**: Complex parameter structures, poor separation of concerns, difficult error handling
- **Decision**: Rejected due to poor usability and maintainability

### Micro-Tool Architecture (15+ Small Tools)

- **Pros**: Maximum granularity, precise control, composable workflows
- **Cons**: Complex orchestration, cognitive overhead, fragmented user experience
- **Decision**: Rejected due to complexity and poor user experience

### Stateful Session-Based API

- **Pros**: Could maintain context across tool calls, simplified parameter passing
- **Cons**: Session management complexity, state synchronization issues, harder client integration
- **Decision**: Rejected to maintain MCP stateless principles

### External API Integration (REST/GraphQL)

- **Pros**: Standard web technologies, extensive tooling ecosystem
- **Cons**: Not MCP-compliant, additional infrastructure requirements, authentication complexity
- **Decision**: Rejected due to MCP specification requirements

## Consequences

### Positive

- **Clear Separation of Concerns**: Each tool has well-defined responsibility and scope
- **Progressive Complexity**: Users can start simple and add sophistication as needed
- **Excellent Error Handling**: Tool-specific validation and error reporting
- **Client-Friendly**: Rich metadata and consistent response formats enhance client UX
- **Extensible Architecture**: Easy to add new tools without breaking existing functionality

### Negative

- **API Surface Complexity**: Six core tools plus supporting tools require comprehensive documentation
- **Inter-Tool Coordination**: Some workflows require multiple tool calls with parameter passing
- **Validation Overhead**: Each tool requires comprehensive input validation and error handling

### Risks and Mitigations

- **API Complexity**: Provide comprehensive documentation and usage examples
- **Parameter Evolution**: Use versioned schemas with backward compatibility
- **Client Integration**: Offer reference implementations and integration guides

## Implementation Details

### Tool Parameter Schemas

```typescript
// Core tool parameter interfaces
interface AnalyzeRepositoryParams {
  repositoryPath: string;
  analysisDepth?: "basic" | "comprehensive" | "deep";
  focusAreas?: ("structure" | "languages" | "documentation" | "complexity")[];
  excludePatterns?: string[];
}

interface RecommendSSGParams {
  projectAnalysis: ProjectAnalysis;
  teamCapabilities?: TeamCapabilities;
  performanceRequirements?: PerformanceRequirements;
  customizationNeeds?: CustomizationNeeds;
  existingConstraints?: ProjectConstraints;
}

interface GenerateConfigurationParams {
  selectedSSG: SSGType;
  projectAnalysis: ProjectAnalysis;
  customizations?: SSGCustomizations;
  deploymentTarget?: DeploymentTarget;
  advancedOptions?: AdvancedConfigOptions;
}

interface CreateDiataxisStructureParams {
  selectedSSG: SSGType;
  projectType: ProjectType;
  existingContent?: ExistingContentAnalysis;
  contentComplexity?: "minimal" | "standard" | "comprehensive";
  navigationPreferences?: NavigationPreferences;
}

interface GenerateWorkflowParams {
  ssgType: SSGType;
  deploymentStrategy: "github-actions" | "branch-based" | "hybrid";
  securityRequirements?: SecurityRequirements;
  performanceOptimizations?: PerformanceOptions;
  environmentConfiguration?: EnvironmentConfig;
}

interface GenerateGitCommandsParams {
  deploymentStrategy: DeploymentStrategy;
  repositoryState: RepositoryState;
  branchConfiguration: BranchConfiguration;
  commitPreferences?: CommitPreferences;
}
```

### Response Format Standardization

```typescript
// Standardized response structure for all tools
interface MCPToolResponse<T> {
  success: boolean;
  data?: T;
  error?: ErrorDetails;
  metadata: ResponseMetadata;
  recommendations?: Recommendation[];
  nextSteps?: NextStep[];
}

interface ResponseMetadata {
  toolVersion: string;
  executionTime: number;
  confidenceScore?: number;
  analysisDepth: string;
  timestamp: string;
  correlationId: string;
}

interface ErrorDetails {
  code: string;
  message: string;
  details: string;
  resolution?: string;
  documentation?: string;
}

interface Recommendation {
  type: "optimization" | "alternative" | "enhancement";
  priority: "low" | "medium" | "high";
  description: string;
  implementation?: string;
  resources?: string[];
}

interface NextStep {
  action: string;
  description: string;
  toolRequired?: string;
  parameters?: Record<string, any>;
  estimated_time?: string;
}
```

### analyzeRepository Tool Implementation

```typescript
const analyzeRepositoryTool: MCPTool = {
  name: "analyzeRepository",
  description: "Comprehensive repository analysis for documentation planning",
  inputSchema: {
    type: "object",
    properties: {
      repositoryPath: {
        type: "string",
        description: "Path to the repository to analyze",
      },
      analysisDepth: {
        type: "string",
        enum: ["basic", "comprehensive", "deep"],
        default: "comprehensive",
        description: "Depth of analysis to perform",
      },
      focusAreas: {
        type: "array",
        items: {
          type: "string",
          enum: ["structure", "languages", "documentation", "complexity"],
        },
        description: "Specific areas to focus analysis on",
      },
      excludePatterns: {
        type: "array",
        items: { type: "string" },
        description: "File patterns to exclude from analysis",
      },
    },
    required: ["repositoryPath"],
  },
};

async function handleAnalyzeRepository(
  params: AnalyzeRepositoryParams,
): Promise<MCPToolResponse<RepositoryAnalysis>> {
  try {
    const analysis = await repositoryAnalyzer.analyze(params);

    return {
      success: true,
      data: analysis,
      metadata: {
        toolVersion: "1.0.0",
        executionTime: analysis.executionTime,
        analysisDepth: params.analysisDepth || "comprehensive",
        timestamp: new Date().toISOString(),
        correlationId: generateCorrelationId(),
      },
      recommendations: generateAnalysisRecommendations(analysis),
      nextSteps: [
        {
          action: "Get SSG Recommendation",
          description:
            "Use analysis results to get intelligent SSG recommendations",
          toolRequired: "recommendSSG",
          parameters: { projectAnalysis: analysis },
          estimated_time: "< 1 minute",
        },
      ],
    };
  } catch (error) {
    return {
      success: false,
      error: {
        code: "ANALYSIS_FAILED",
        message: "Repository analysis failed",
        details: error.message,
        resolution: "Verify repository path and permissions",
        documentation: "https://documcp.dev/troubleshooting#analysis-errors",
      },
      metadata: {
        toolVersion: "1.0.0",
        executionTime: 0,
        analysisDepth: params.analysisDepth || "comprehensive",
        timestamp: new Date().toISOString(),
        correlationId: generateCorrelationId(),
      },
    };
  }
}
```

### recommendSSG Tool Implementation

```typescript
const recommendSSGTool: MCPTool = {
  name: "recommendSSG",
  description:
    "Intelligent static site generator recommendation with detailed justifications",
  inputSchema: {
    type: "object",
    properties: {
      projectAnalysis: {
        type: "object",
        description: "Repository analysis results from analyzeRepository tool",
      },
      teamCapabilities: {
        type: "object",
        properties: {
          technicalSkills: { type: "array", items: { type: "string" } },
          maintenanceCapacity: {
            type: "string",
            enum: ["minimal", "moderate", "extensive"],
          },
          learningAppetite: { type: "string", enum: ["low", "medium", "high"] },
        },
      },
      performanceRequirements: {
        type: "object",
        properties: {
          buildTimeImportance: {
            type: "string",
            enum: ["low", "medium", "high"],
          },
          siteSpeedPriority: {
            type: "string",
            enum: ["standard", "fast", "ultra-fast"],
          },
          scalabilityNeeds: {
            type: "string",
            enum: ["small", "medium", "large", "enterprise"],
          },
        },
      },
    },
    required: ["projectAnalysis"],
  },
};

async function handleRecommendSSG(
  params: RecommendSSGParams,
): Promise<MCPToolResponse<SSGRecommendation>> {
  try {
    const recommendation = await ssgRecommendationEngine.analyze(params);

    return {
      success: true,
      data: recommendation,
      metadata: {
        toolVersion: "1.0.0",
        executionTime: recommendation.analysisTime,
        confidenceScore: recommendation.confidence,
        analysisDepth: "comprehensive",
        timestamp: new Date().toISOString(),
        correlationId: generateCorrelationId(),
      },
      recommendations: [
        {
          type: "optimization",
          priority: "medium",
          description: "Consider performance optimization strategies",
          implementation: "Review build caching and incremental build options",
        },
      ],
      nextSteps: [
        {
          action: "Generate Configuration",
          description: "Create customized configuration for recommended SSG",
          toolRequired: "generateConfiguration",
          parameters: {
            selectedSSG: recommendation.primaryRecommendation.ssg,
            projectAnalysis: params.projectAnalysis,
          },
          estimated_time: "2-3 minutes",
        },
      ],
    };
  } catch (error) {
    console.error("SSG recommendation analysis failed:", error);
    return {
      success: false,
      error: {
        code: "SSG_RECOMMENDATION_FAILED",
        message: `Failed to analyze SSG recommendations: ${
          error instanceof Error ? error.message : "Unknown error"
        }`,
        resolution:
          "Check project analysis data and retry with valid parameters",
      },
      metadata: {
        toolVersion: "1.0.0",
        timestamp: new Date().toISOString(),
        correlationId: generateCorrelationId(),
      },
    };
  }
}
```

### Input Validation System

```typescript
interface ValidationRule {
  field: string;
  validator: (value: any) => ValidationResult;
  required: boolean;
  errorMessage: string;
}

class MCPToolValidator {
  validateParameters<T>(params: T, schema: JSONSchema): ValidationResult {
    const results = this.runSchemaValidation(params, schema);
    const semanticResults = this.runSemanticValidation(params);

    return this.combineValidationResults(results, semanticResults);
  }

  private runSemanticValidation(params: any): ValidationResult {
    const issues: ValidationIssue[] = [];

    // Repository path validation
    if (
      params.repositoryPath &&
      !this.isValidRepositoryPath(params.repositoryPath)
    ) {
      issues.push({
        field: "repositoryPath",
        message: "Repository path does not exist or is not accessible",
        severity: "error",
        resolution: "Verify the path exists and you have read permissions",
      });
    }

    // Cross-parameter validation
    if (params.analysisDepth === "deep" && params.focusAreas?.length > 2) {
      issues.push({
        field: "analysisDepth",
        message: "Deep analysis with multiple focus areas may be slow",
        severity: "warning",
        resolution:
          "Consider using comprehensive analysis or fewer focus areas",
      });
    }

    return { valid: issues.length === 0, issues };
  }
}
```

## Tool Orchestration Patterns

### Sequential Workflow Pattern

```typescript
// Common workflow: Analysis → Recommendation → Configuration → Deployment
class DocumentationWorkflow {
  async executeCompleteWorkflow(
    repositoryPath: string,
  ): Promise<WorkflowResult> {
    try {
      // Step 1: Analyze repository
      const analysisResult = await this.callTool("analyzeRepository", {
        repositoryPath,
      });
      if (!analysisResult.success) {
        throw new Error(`Analysis failed: ${analysisResult.error?.message}`);
      }

      // Step 2: Get SSG recommendation
      const recommendationResult = await this.callTool("recommendSSG", {
        projectAnalysis: analysisResult.data,
      });
      if (!recommendationResult.success) {
        throw new Error(
          `Recommendation failed: ${recommendationResult.error?.message}`,
        );
      }

      // Step 3: Generate configuration
      const configResult = await this.callTool("generateConfiguration", {
        selectedSSG: recommendationResult.data.primaryRecommendation.ssg,
        projectAnalysis: analysisResult.data,
      });
      if (!configResult.success) {
        throw new Error(
          `Configuration generation failed: ${configResult.error?.message}`,
        );
      }

      // Step 4: Create Diataxis structure
      const structureResult = await this.callTool("createDiataxisStructure", {
        selectedSSG: recommendationResult.data.primaryRecommendation.ssg,
        projectType: analysisResult.data.projectType,
      });
      if (!structureResult.success) {
        console.warn(
          `Diataxis structure creation failed: ${structureResult.error?.message}`,
        );
      }

      // Step 5: Generate deployment workflow
      const workflowResult = await this.callTool("generateWorkflow", {
        ssgType: recommendationResult.data.primaryRecommendation.ssg,
        deploymentStrategy: "github-actions",
      });
      if (!workflowResult.success) {
        console.warn(
          `Workflow generation failed: ${workflowResult.error?.message}`,
        );
      }

      return this.combineResults([
        analysisResult,
        recommendationResult,
        configResult,
        structureResult,
        workflowResult,
      ]);
    } catch (error) {
      throw new Error(`Complete workflow failed: ${error.message}`);
    }
  }
}
```

## Error Handling and Recovery

### Comprehensive Error Classification

```typescript
enum ErrorCategory {
  VALIDATION = "validation",
  FILESYSTEM = "filesystem",
  ANALYSIS = "analysis",
  GENERATION = "generation",
  CONFIGURATION = "configuration",
  DEPLOYMENT = "deployment",
  NETWORK = "network",
  PERMISSION = "permission",
}

interface ErrorContext {
  tool: string;
  operation: string;
  parameters: Record<string, any>;
  environment: EnvironmentInfo;
}

class MCPErrorHandler {
  handleError(error: Error, context: ErrorContext): MCPToolResponse<null> {
    const classification = this.classifyError(error);
    const resolution = this.generateResolution(classification, context);

    return {
      success: false,
      error: {
        code: this.generateErrorCode(classification),
        message: this.formatUserMessage(error, classification),
        details: error.message,
        resolution: resolution.guidance,
        documentation: resolution.documentationUrl,
      },
      metadata: this.generateErrorMetadata(context),
      nextSteps: resolution.suggestedActions,
    };
  }

  private generateResolution(
    classification: ErrorClassification,
    context: ErrorContext,
  ): ErrorResolution {
    switch (classification.category) {
      case ErrorCategory.FILESYSTEM:
        return {
          guidance: "Verify file paths and permissions",
          documentationUrl:
            "https://documcp.dev/troubleshooting#filesystem-errors",
          suggestedActions: [
            {
              action: "Check file exists",
              description: `Verify ${context.parameters.repositoryPath} exists`,
            },
            {
              action: "Check permissions",
              description: "Ensure read access to repository directory",
            },
          ],
        };
      // ... other error categories
    }
  }
}
```

## Performance Optimization

### Response Caching Strategy

```typescript
interface CacheConfiguration {
  analyzeRepository: {
    ttl: 300;
    keyFields: ["repositoryPath", "analysisDepth"];
  };
  recommendSSG: { ttl: 3600; keyFields: ["projectAnalysis.signature"] };
  generateConfiguration: {
    ttl: 1800;
    keyFields: ["selectedSSG", "projectAnalysis.signature"];
  };
}

class MCPToolCache {
  async getCachedResponse<T>(
    toolName: string,
    parameters: any,
  ): Promise<MCPToolResponse<T> | null> {
    const cacheKey = this.generateCacheKey(toolName, parameters);
    const cached = await this.cache.get(cacheKey);

    if (cached && !this.isExpired(cached)) {
      return {
        ...cached,
        metadata: {
          ...cached.metadata,
          fromCache: true,
          cacheAge: Date.now() - cached.metadata.timestamp,
        },
      };
    }

    return null;
  }
}
```

## Testing Strategy

### Tool Testing Framework

```typescript
describe("MCP Tools API", () => {
  describe("analyzeRepository", () => {
    it("should analyze JavaScript project correctly");
    it("should handle missing repository gracefully");
    it("should respect analysis depth parameters");
    it("should exclude specified patterns");
  });

  describe("recommendSSG", () => {
    it("should recommend Hugo for large documentation sites");
    it("should recommend Jekyll for GitHub Pages simple sites");
    it("should provide confidence scores for all recommendations");
    it("should handle incomplete project analysis");
  });

  describe("Tool Integration", () => {
    it("should support complete workflow from analysis to deployment");
    it("should maintain parameter consistency across tool calls");
    it("should provide appropriate next steps guidance");
  });
});
```

### Integration Testing

```typescript
class MCPToolIntegrationTests {
  async testCompleteWorkflow(): Promise<void> {
    const testRepo = await this.createTestRepository();

    // Test full workflow
    const analysis = await this.callTool("analyzeRepository", {
      repositoryPath: testRepo,
    });
    expect(analysis.success).toBe(true);

    const recommendation = await this.callTool("recommendSSG", {
      projectAnalysis: analysis.data,
    });
    expect(recommendation.success).toBe(true);
    expect(recommendation.data.primaryRecommendation).toBeDefined();

    const config = await this.callTool("generateConfiguration", {
      selectedSSG: recommendation.data.primaryRecommendation.ssg,
      projectAnalysis: analysis.data,
    });
    expect(config.success).toBe(true);

    // Validate generated configuration
    await this.validateGeneratedFiles(config.data.files);
  }
}
```

## Documentation and Examples

### Tool Usage Examples

```typescript
// Example: Complete documentation setup workflow
const examples = {
  basicSetup: {
    description: "Basic documentation setup for a JavaScript project",
    steps: [
      {
        tool: "analyzeRepository",
        parameters: { repositoryPath: "./my-project" },
        expectedResult: "Project analysis with language ecosystem detection",
      },
      {
        tool: "recommendSSG",
        parameters: { projectAnalysis: "${analysis_result}" },
        expectedResult: "SSG recommendation with justification",
      },
    ],
  },
  advancedSetup: {
    description: "Advanced setup with custom requirements",
    steps: [
      // ... detailed workflow steps
    ],
  },
};
```

## Future Enhancements

### Planned Tool Additions

- `analyzeExistingDocs`: Deep analysis of existing documentation quality and structure
- `generateMigrationPlan`: Create migration plans between different documentation systems
- `optimizeContent`: AI-powered content optimization and gap analysis
- `validateAccessibility`: Comprehensive accessibility testing and recommendations

### API Evolution Strategy

- Versioned tool schemas with backward compatibility
- Deprecation notices and migration guidance
- Feature flags for experimental functionality
- Community feedback integration for API improvements

## References

- [Model Context Protocol Specification](https://spec.modelcontextprotocol.io/)
- [JSON Schema Validation](https://json-schema.org/)
- [API Design Best Practices](https://swagger.io/resources/articles/best-practices-in-api-design/)

```

--------------------------------------------------------------------------------
/src/utils/language-parsers-simple.ts:
--------------------------------------------------------------------------------

```typescript
import { CodeElement, APIEndpoint } from "./code-scanner.js";
import { spawn } from "child_process";

export interface LanguageParser {
  extensions: string[];
  name: string;
  parseFile(content: string, filePath: string): Promise<LanguageParseResult>;
  supportsApiEndpoints?: boolean;
  supportsFrameworkDetection?: boolean;
}

export interface LanguageParseResult {
  functions: CodeElement[];
  classes: CodeElement[];
  interfaces: CodeElement[];
  types: CodeElement[];
  enums: CodeElement[];
  exports: CodeElement[];
  imports: CodeElement[];
  apiEndpoints: APIEndpoint[];
  constants: CodeElement[];
  variables: CodeElement[];
}

export class MultiLanguageCodeScanner {
  private parsers = new Map<string, LanguageParser>();

  constructor() {
    this.initializeParsers();
  }

  private initializeParsers() {
    // Register parsers based on your tech stack
    this.registerParser(new PythonParser());
    this.registerParser(new GoParser());
    this.registerParser(new YamlParser());
    this.registerParser(new BashParser());
  }

  private registerParser(parser: LanguageParser) {
    for (const extension of parser.extensions) {
      this.parsers.set(extension, parser);
    }
  }

  async parseFile(
    content: string,
    filePath: string,
  ): Promise<LanguageParseResult> {
    const extension = this.getFileExtension(filePath);
    const parser = this.parsers.get(extension);

    if (parser) {
      return await parser.parseFile(content, filePath);
    }

    // Return empty result for unsupported files
    return this.getEmptyResult();
  }

  private getFileExtension(filePath: string): string {
    return filePath.split(".").pop()?.toLowerCase() || "";
  }

  private getEmptyResult(): LanguageParseResult {
    return {
      functions: [],
      classes: [],
      interfaces: [],
      types: [],
      enums: [],
      exports: [],
      imports: [],
      apiEndpoints: [],
      constants: [],
      variables: [],
    };
  }

  getSupportedExtensions(): string[] {
    return Array.from(this.parsers.keys());
  }

  getParserInfo(): { extension: string; parser: string }[] {
    return Array.from(this.parsers.entries()).map(([ext, parser]) => ({
      extension: ext,
      parser: parser.name,
    }));
  }
}

// Python Parser Implementation using subprocess + regex fallback
export class PythonParser implements LanguageParser {
  extensions = ["py", "pyi", "pyx", "pxd"];
  name = "Python";
  supportsApiEndpoints = true;
  supportsFrameworkDetection = true;

  async parseFile(
    content: string,
    filePath: string,
  ): Promise<LanguageParseResult> {
    const result: LanguageParseResult = {
      functions: [],
      classes: [],
      interfaces: [],
      types: [],
      enums: [],
      exports: [],
      imports: [],
      apiEndpoints: [],
      constants: [],
      variables: [],
    };

    try {
      // Try subprocess-based AST parsing first
      const astResult = await this.parseWithPythonAST(content, filePath);
      if (astResult) {
        this.mergePythonASTResults(astResult, result, filePath);
      } else {
        // Fall back to regex-based parsing
        this.parseWithRegex(content, result, filePath);
      }

      // Look for Flask/FastAPI/Django endpoints
      this.findPythonApiEndpoints(content, result, filePath);
    } catch (error) {
      console.warn(`Failed to parse Python file ${filePath}:`, error);
      // Fall back to regex-based parsing
      this.parseWithRegex(content, result, filePath);
    }

    return result;
  }

  private async parseWithPythonAST(
    content: string,
    _filePath: string,
  ): Promise<any> {
    return new Promise((resolve) => {
      // Create a Python script to parse the AST
      const pythonScript = `
import ast
import sys
import json
import tempfile
import os

try:
    # Read content from stdin
    content = sys.stdin.read()

    tree = ast.parse(content)

    result = {
        'functions': [],
        'classes': [],
        'imports': [],
        'constants': [],
        'variables': []
    }

    for node in ast.walk(tree):
        if isinstance(node, ast.FunctionDef):
            result['functions'].append({
                'name': node.name,
                'line': node.lineno,
                'has_docstring': ast.get_docstring(node) is not None,
                'docstring': ast.get_docstring(node),
                'is_async': False,
                'exported': not node.name.startswith('_')
            })
        elif isinstance(node, ast.AsyncFunctionDef):
            result['functions'].append({
                'name': node.name,
                'line': node.lineno,
                'has_docstring': ast.get_docstring(node) is not None,
                'docstring': ast.get_docstring(node),
                'is_async': True,
                'exported': not node.name.startswith('_')
            })
        elif isinstance(node, ast.ClassDef):
            result['classes'].append({
                'name': node.name,
                'line': node.lineno,
                'has_docstring': ast.get_docstring(node) is not None,
                'docstring': ast.get_docstring(node),
                'exported': not node.name.startswith('_')
            })
        elif isinstance(node, (ast.Import, ast.ImportFrom)):
            if isinstance(node, ast.Import):
                for alias in node.names:
                    result['imports'].append({
                        'name': alias.name,
                        'line': node.lineno
                    })
            else:  # ImportFrom
                result['imports'].append({
                    'name': node.module or 'relative',
                    'line': node.lineno
                })
        elif isinstance(node, ast.Assign):
            for target in node.targets:
                if isinstance(target, ast.Name):
                    is_constant = target.id.isupper()
                    result['constants' if is_constant else 'variables'].append({
                        'name': target.id,
                        'line': node.lineno,
                        'exported': not target.id.startswith('_')
                    })

    print(json.dumps(result))
except Exception as e:
    print(json.dumps({'error': str(e)}), file=sys.stderr)
`;

      // Try to execute Python AST parsing
      const process = spawn("python3", ["-c", pythonScript], {
        stdio: ["pipe", "pipe", "pipe"],
      });

      // Send content via stdin
      process.stdin.write(content);
      process.stdin.end();

      let output = "";
      let errorOutput = "";

      process.stdout.on("data", (data) => {
        output += data.toString();
      });

      process.stderr.on("data", (data) => {
        errorOutput += data.toString();
      });

      process.on("close", (code) => {
        if (code === 0 && output.trim()) {
          try {
            const result = JSON.parse(output.trim());
            if (!result.error) {
              resolve(result);
              return;
            }
          } catch (e) {
            // JSON parsing failed
            console.warn("Failed to parse Python AST output:", e);
          }
        }
        if (errorOutput) {
          console.warn("Python AST parsing errors:", errorOutput);
        }
        resolve(null); // Fall back to regex parsing
      });

      process.on("error", () => {
        resolve(null); // Python not available or failed
      });

      // Timeout after 5 seconds
      setTimeout(() => {
        process.kill();
        resolve(null);
      }, 5000);
    });
  }

  private mergePythonASTResults(
    astResult: any,
    result: LanguageParseResult,
    filePath: string,
  ): void {
    astResult.functions?.forEach((func: any) => {
      result.functions.push({
        name: func.name,
        type: "function",
        filePath,
        line: func.line,
        column: 0,
        exported: func.exported,
        isAsync: func.is_async,
        hasJSDoc: func.has_docstring,
        jsDocDescription: func.docstring || undefined,
      });
    });

    astResult.classes?.forEach((cls: any) => {
      result.classes.push({
        name: cls.name,
        type: "class",
        filePath,
        line: cls.line,
        column: 0,
        exported: cls.exported,
        hasJSDoc: cls.has_docstring,
        jsDocDescription: cls.docstring || undefined,
      });
    });

    astResult.imports?.forEach((imp: any) => {
      result.imports.push({
        name: imp.name,
        type: "import",
        filePath,
        line: imp.line,
        column: 0,
        exported: false,
      });
    });

    astResult.constants?.forEach((constant: any) => {
      result.constants.push({
        name: constant.name,
        type: "variable",
        filePath,
        line: constant.line,
        column: 0,
        exported: constant.exported,
        hasJSDoc: false,
      });
    });

    astResult.variables?.forEach((variable: any) => {
      result.variables.push({
        name: variable.name,
        type: "variable",
        filePath,
        line: variable.line,
        column: 0,
        exported: variable.exported,
        hasJSDoc: false,
      });
    });
  }

  private parseWithRegex(
    content: string,
    result: LanguageParseResult,
    filePath: string,
  ): void {
    const lines = content.split("\n");

    lines.forEach((line, index) => {
      const lineNum = index + 1;

      // Function definitions
      const funcMatch = line.match(
        /^\s*(async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(/,
      );
      if (funcMatch) {
        const isAsync = !!funcMatch[1];
        const funcName = funcMatch[2];
        const hasDocstring = this.hasDocstringAfterLine(lines, index);

        result.functions.push({
          name: funcName,
          type: "function",
          filePath,
          line: lineNum,
          column: 0,
          exported: !funcName.startsWith("_"),
          isAsync,
          hasJSDoc: hasDocstring,
        });
      }

      // Class definitions
      const classMatch = line.match(/^\s*class\s+([a-zA-Z_][a-zA-Z0-9_]*)/);
      if (classMatch) {
        const className = classMatch[1];
        const hasDocstring = this.hasDocstringAfterLine(lines, index);

        result.classes.push({
          name: className,
          type: "class",
          filePath,
          line: lineNum,
          column: 0,
          exported: !className.startsWith("_"),
          hasJSDoc: hasDocstring,
        });
      }

      // Import statements
      const importMatch = line.match(
        /^\s*(?:from\s+([^\s]+)\s+)?import\s+(.+)/,
      );
      if (importMatch) {
        const module = importMatch[1] || importMatch[2].split(",")[0].trim();
        result.imports.push({
          name: module,
          type: "import",
          filePath,
          line: lineNum,
          column: 0,
          exported: false,
        });
      }

      // Constants and variables
      const assignMatch = line.match(/^\s*([A-Z_][A-Z0-9_]*)\s*=/);
      if (assignMatch) {
        result.constants.push({
          name: assignMatch[1],
          type: "variable",
          filePath,
          line: lineNum,
          column: 0,
          exported: true,
          hasJSDoc: false,
        });
      }
    });
  }

  private hasDocstringAfterLine(lines: string[], lineIndex: number): boolean {
    // Check if next few lines contain a docstring
    for (
      let i = lineIndex + 1;
      i < Math.min(lineIndex + 3, lines.length);
      i++
    ) {
      const line = lines[i].trim();
      if (line.startsWith('"""') || line.startsWith("'''")) {
        return true;
      }
    }
    return false;
  }

  private findPythonApiEndpoints(
    content: string,
    result: LanguageParseResult,
    filePath: string,
  ) {
    // Flask patterns
    const flaskPatterns = [
      /@app\.(route|get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
      /@bp\.(route|get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
    ];

    // FastAPI patterns
    const fastApiPatterns = [
      /@app\.(get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
      /router\.(get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
    ];

    // Django patterns
    const djangoPatterns = [
      /path\s*\(\s*['"]([^'"]+)['"]/g,
      /url\s*\(\s*r?['"]([^'"]+)['"]/g,
    ];

    const allPatterns = [
      ...flaskPatterns,
      ...fastApiPatterns,
      ...djangoPatterns,
    ];

    allPatterns.forEach((pattern) => {
      let match;
      while ((match = pattern.exec(content)) !== null) {
        const method =
          match[1] === "route"
            ? "ALL"
            : (match[1].toUpperCase() as APIEndpoint["method"]);
        const path = match[2] || match[1]; // Handle different capture groups

        // Find line number
        const beforeMatch = content.substring(0, match.index!);
        const line = beforeMatch.split("\n").length;

        result.apiEndpoints.push({
          method,
          path,
          filePath,
          line,
          hasDocumentation: this.hasEndpointDocumentation(
            content,
            match.index!,
          ),
        });
      }
    });
  }

  private hasEndpointDocumentation(
    content: string,
    matchIndex: number,
  ): boolean {
    const beforeMatch = content.substring(0, matchIndex);
    const lines = beforeMatch.split("\n");

    // Check last few lines for docstrings or comments
    for (let i = Math.max(0, lines.length - 5); i < lines.length; i++) {
      const line = lines[i].trim();
      if (
        line.startsWith('"""') ||
        line.startsWith("'''") ||
        line.startsWith("#")
      ) {
        return true;
      }
    }
    return false;
  }
}

// Go Parser Implementation (regex-based)
export class GoParser implements LanguageParser {
  extensions = ["go"];
  name = "Go";
  supportsApiEndpoints = true;

  async parseFile(
    content: string,
    filePath: string,
  ): Promise<LanguageParseResult> {
    const result: LanguageParseResult = {
      functions: [],
      classes: [],
      interfaces: [],
      types: [],
      enums: [],
      exports: [],
      imports: [],
      apiEndpoints: [],
      constants: [],
      variables: [],
    };

    const lines = content.split("\n");

    lines.forEach((line, index) => {
      const lineNum = index + 1;

      // Function declarations
      const funcMatch = line.match(
        /^\s*func\s+(?:\([^)]*\)\s+)?([a-zA-Z_][a-zA-Z0-9_]*)\s*\(/,
      );
      if (funcMatch) {
        const funcName = funcMatch[1];
        result.functions.push({
          name: funcName,
          type: "function",
          filePath,
          line: lineNum,
          column: 0,
          exported: this.isGoExported(funcName),
          hasJSDoc: this.hasGoDocComment(lines, index),
        });
      }

      // Type declarations (struct, interface, etc.)
      const typeMatch = line.match(
        /^\s*type\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+(struct|interface)/,
      );
      if (typeMatch) {
        const typeName = typeMatch[1];
        const typeKind = typeMatch[2];

        if (typeKind === "struct") {
          result.classes.push({
            name: typeName,
            type: "class",
            filePath,
            line: lineNum,
            column: 0,
            exported: this.isGoExported(typeName),
            hasJSDoc: this.hasGoDocComment(lines, index),
          });
        } else if (typeKind === "interface") {
          result.interfaces.push({
            name: typeName,
            type: "interface",
            filePath,
            line: lineNum,
            column: 0,
            exported: this.isGoExported(typeName),
            hasJSDoc: this.hasGoDocComment(lines, index),
          });
        }
      }

      // Import declarations
      const importMatch = line.match(/^\s*(?:import\s*)?"([^"]+)"/);
      if (importMatch) {
        result.imports.push({
          name: importMatch[1],
          type: "import",
          filePath,
          line: lineNum,
          column: 0,
          exported: false,
        });
      }

      // Constants and variables
      const constMatch = line.match(
        /^\s*(const|var)\s+([a-zA-Z_][a-zA-Z0-9_]*)/,
      );
      if (constMatch) {
        const declType = constMatch[1];
        const varName = constMatch[2];

        const element: CodeElement = {
          name: varName,
          type: "variable",
          filePath,
          line: lineNum,
          column: 0,
          exported: this.isGoExported(varName),
          hasJSDoc: this.hasGoDocComment(lines, index),
        };

        if (declType === "const") {
          result.constants.push(element);
        } else {
          result.variables.push(element);
        }
      }
    });

    // Find Go API endpoints
    this.findGoApiEndpoints(content, result, filePath);

    return result;
  }

  private isGoExported(name: string): boolean {
    // In Go, exported names start with uppercase letter
    return name.length > 0 && name[0] === name[0].toUpperCase();
  }

  private hasGoDocComment(lines: string[], lineIndex: number): boolean {
    // Check if previous line has a doc comment
    if (lineIndex > 0) {
      const prevLine = lines[lineIndex - 1].trim();
      return prevLine.startsWith("//");
    }
    return false;
  }

  private findGoApiEndpoints(
    content: string,
    result: LanguageParseResult,
    filePath: string,
  ) {
    // Common Go web framework patterns
    const patterns = [
      // Gin framework
      /\.(GET|POST|PUT|DELETE|PATCH)\s*\(\s*"([^"]+)"/g,
      // Echo framework
      /\.(Get|Post|Put|Delete|Patch)\s*\(\s*"([^"]+)"/g,
      // Gorilla mux
      /\.HandleFunc\s*\(\s*"([^"]+)"/g,
      // Standard library
      /http\.HandleFunc\s*\(\s*"([^"]+)"/g,
    ];

    patterns.forEach((pattern) => {
      let match;
      while ((match = pattern.exec(content)) !== null) {
        let method: APIEndpoint["method"] = "ALL";
        let path: string;

        if (match[1] && match[2]) {
          method = match[1].toUpperCase() as APIEndpoint["method"];
          path = match[2];
        } else {
          path = match[1] || match[2];
        }

        const beforeMatch = content.substring(0, match.index!);
        const line = beforeMatch.split("\n").length;

        result.apiEndpoints.push({
          method,
          path,
          filePath,
          line,
          hasDocumentation: this.hasEndpointDocumentation(
            content,
            match.index!,
          ),
        });
      }
    });
  }

  private hasEndpointDocumentation(
    content: string,
    matchIndex: number,
  ): boolean {
    const beforeMatch = content.substring(0, matchIndex);
    const lines = beforeMatch.split("\n");

    for (let i = Math.max(0, lines.length - 5); i < lines.length; i++) {
      const line = lines[i].trim();
      if (line.startsWith("//") || line.startsWith("/*")) {
        return true;
      }
    }
    return false;
  }
}

// YAML Parser for Kubernetes, Terraform, etc.
export class YamlParser implements LanguageParser {
  extensions = ["yml", "yaml"];
  name = "YAML";
  supportsFrameworkDetection = true;

  async parseFile(
    content: string,
    filePath: string,
  ): Promise<LanguageParseResult> {
    const result: LanguageParseResult = {
      functions: [],
      classes: [],
      interfaces: [],
      types: [],
      enums: [],
      exports: [],
      imports: [],
      apiEndpoints: [],
      constants: [],
      variables: [],
    };

    // YAML parsing focuses on identifying Kubernetes resources, Terraform configs, etc.
    this.identifyKubernetesResources(content, result, filePath);
    this.identifyDockerComposeServices(content, result, filePath);
    this.identifyGitHubActions(content, result, filePath);

    return result;
  }

  private identifyKubernetesResources(
    content: string,
    result: LanguageParseResult,
    filePath: string,
  ) {
    const lines = content.split("\n");
    let apiVersion = "";
    let kind = "";

    lines.forEach((line, index) => {
      const lineNum = index + 1;

      const apiMatch = line.match(/^\s*apiVersion:\s*(.+)/);
      if (apiMatch) {
        apiVersion = apiMatch[1].trim();
      }

      const kindMatch = line.match(/^\s*kind:\s*(.+)/);
      if (kindMatch) {
        kind = kindMatch[1].trim();

        result.types.push({
          name: `${kind} (${apiVersion})`,
          type: "type",
          filePath,
          line: lineNum,
          column: 0,
          exported: true,
          hasJSDoc: false,
        });
      }
    });
  }

  private identifyDockerComposeServices(
    content: string,
    result: LanguageParseResult,
    filePath: string,
  ) {
    let inServicesSection = false;

    const lines = content.split("\n");

    lines.forEach((line, index) => {
      if (line.trim() === "services:") {
        inServicesSection = true;
        return;
      }

      if (inServicesSection && line.match(/^[a-zA-Z]/)) {
        inServicesSection = false; // Left services section
      }

      if (inServicesSection) {
        const serviceMatch = line.match(/^\s+([a-zA-Z0-9_-]+):\s*$/);
        if (serviceMatch) {
          result.types.push({
            name: `service: ${serviceMatch[1]}`,
            type: "type",
            filePath,
            line: index + 1,
            column: 0,
            exported: true,
            hasJSDoc: false,
          });
        }
      }
    });
  }

  private identifyGitHubActions(
    content: string,
    result: LanguageParseResult,
    filePath: string,
  ) {
    if (!filePath.includes(".github/workflows/")) return;

    const lines = content.split("\n");
    let inJobsSection = false;

    lines.forEach((line, index) => {
      if (line.trim() === "jobs:") {
        inJobsSection = true;
        return;
      }

      if (inJobsSection && line.match(/^[a-zA-Z]/)) {
        inJobsSection = false;
      }

      if (inJobsSection) {
        const jobMatch = line.match(/^\s+([a-zA-Z0-9_-]+):\s*$/);
        if (jobMatch) {
          result.functions.push({
            name: `job: ${jobMatch[1]}`,
            type: "function",
            filePath,
            line: index + 1,
            column: 0,
            exported: true,
            hasJSDoc: false,
          });
        }
      }
    });
  }
}

// Bash Parser for DevOps scripts
export class BashParser implements LanguageParser {
  extensions = ["sh", "bash", "zsh"];
  name = "Bash";

  async parseFile(
    content: string,
    filePath: string,
  ): Promise<LanguageParseResult> {
    const result: LanguageParseResult = {
      functions: [],
      classes: [],
      interfaces: [],
      types: [],
      enums: [],
      exports: [],
      imports: [],
      apiEndpoints: [],
      constants: [],
      variables: [],
    };

    const lines = content.split("\n");

    lines.forEach((line, index) => {
      const lineNum = index + 1;

      // Function definitions
      const funcMatch = line.match(
        /^\s*(?:function\s+)?([a-zA-Z_][a-zA-Z0-9_]*)\s*\(\)/,
      );
      if (funcMatch) {
        const functionName = funcMatch[1];

        result.functions.push({
          name: functionName,
          type: "function",
          filePath,
          line: lineNum,
          column: 0,
          exported: true, // Bash functions are generally available in scope
          hasJSDoc: this.hasBashDocComment(lines, index),
        });
      }

      // Variable assignments
      const varMatch = line.match(/^\s*([A-Z_][A-Z0-9_]*)\s*=/);
      if (varMatch) {
        const varName = varMatch[1];
        const isConstant = varName === varName.toUpperCase();

        const element: CodeElement = {
          name: varName,
          type: "variable",
          filePath,
          line: lineNum,
          column: 0,
          exported: true,
          hasJSDoc: this.hasBashDocComment(lines, index),
        };

        if (isConstant) {
          result.constants.push(element);
        } else {
          result.variables.push(element);
        }
      }
    });

    return result;
  }

  private hasBashDocComment(lines: string[], lineIndex: number): boolean {
    // Check if previous line has a comment
    if (lineIndex > 0) {
      const prevLine = lines[lineIndex - 1].trim();
      return prevLine.startsWith("#");
    }
    return false;
  }
}

```

--------------------------------------------------------------------------------
/tests/memory/kg-storage-validation.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for uncovered branches in KGStorage
 * Covers: Error handling (lines 197, 276), backup restoration with timestamp (lines 453-455),
 * validation errors (lines 496, 510), and other edge cases
 */

import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { KGStorage } from "../../src/memory/kg-storage.js";
import { GraphNode, GraphEdge } from "../../src/memory/knowledge-graph.js";
import { tmpdir } from "os";

describe("KGStorage - Validation and Error Handling", () => {
  let storage: KGStorage;
  let testDir: string;

  beforeEach(async () => {
    testDir = join(tmpdir(), `kg-storage-validation-test-${Date.now()}`);
    await fs.mkdir(testDir, { recursive: true });

    storage = new KGStorage({
      storageDir: testDir,
      backupOnWrite: true,
      validateOnRead: true,
    });

    await storage.initialize();
  });

  afterEach(async () => {
    try {
      await fs.rm(testDir, { recursive: true, force: true });
    } catch (error) {
      // Ignore cleanup errors
    }
  });

  describe("Load Error Handling", () => {
    it("should handle non-JSON lines in loadEntities gracefully (line 188)", async () => {
      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");

      // Write marker + valid entity + invalid JSON + another valid entity
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n" +
          '{"id":"e1","type":"project","label":"Project 1","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n' +
          "invalid json line {this is not valid}\n" +
          '{"id":"e2","type":"project","label":"Project 2","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n',
        "utf-8",
      );

      // Should load valid entities and skip invalid line
      const entities = await storage.loadEntities();

      expect(entities.length).toBe(2);
      expect(entities[0].id).toBe("e1");
      expect(entities[1].id).toBe("e2");
    });

    it("should handle non-JSON lines in loadRelationships gracefully (line 267)", async () => {
      const relationshipFile = join(
        testDir,
        "knowledge-graph-relationships.jsonl",
      );

      // Write marker + valid relationship + invalid JSON + another valid relationship
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n" +
          '{"id":"r1","source":"s1","target":"t1","type":"uses","label":"Uses","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n' +
          "corrupted json {missing quotes and brackets\n" +
          '{"id":"r2","source":"s2","target":"t2","type":"uses","label":"Uses","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n',
        "utf-8",
      );

      // Should load valid relationships and skip invalid line
      const relationships = await storage.loadRelationships();

      expect(relationships.length).toBe(2);
      expect(relationships[0].id).toBe("r1");
      expect(relationships[1].id).toBe("r2");
    });

    it("should throw error when loadEntities encounters non-ENOENT error (line 197-201)", async () => {
      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");

      // Create file with proper marker
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n",
        "utf-8",
      );

      // Make file unreadable by changing permissions (Unix-like systems)
      if (process.platform !== "win32") {
        await fs.chmod(entityFile, 0o000);

        await expect(storage.loadEntities()).rejects.toThrow();

        // Restore permissions for cleanup
        await fs.chmod(entityFile, 0o644);
      }
    });

    it("should throw error when loadRelationships encounters non-ENOENT error (line 276-280)", async () => {
      const relationshipFile = join(
        testDir,
        "knowledge-graph-relationships.jsonl",
      );

      // Create file with proper marker
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n",
        "utf-8",
      );

      // Make file unreadable (Unix-like systems)
      if (process.platform !== "win32") {
        await fs.chmod(relationshipFile, 0o000);

        await expect(storage.loadRelationships()).rejects.toThrow();

        // Restore permissions for cleanup
        await fs.chmod(relationshipFile, 0o644);
      }
    });
  });

  describe("Validation Errors", () => {
    it("should validate entity structure and throw on invalid entity (line 496)", async () => {
      const invalidEntity = {
        // Missing required 'type' and 'label' fields
        id: "invalid-entity",
        properties: {},
        weight: 1.0,
        lastUpdated: "2024-01-01",
      } as unknown as GraphNode;

      // Create a storage with validation enabled
      const validatingStorage = new KGStorage({
        storageDir: testDir,
        validateOnRead: true,
      });
      await validatingStorage.initialize();

      // Write invalid entity to file
      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n" +
          '{"id":"invalid-entity","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n',
        "utf-8",
      );

      // Loading should skip the invalid entity (caught and logged)
      const entities = await validatingStorage.loadEntities();
      expect(entities.length).toBe(0); // Invalid entity skipped
    });

    it("should validate relationship structure and throw on invalid relationship (line 510)", async () => {
      // Create storage with validation enabled
      const validatingStorage = new KGStorage({
        storageDir: testDir,
        validateOnRead: true,
      });
      await validatingStorage.initialize();

      // Write invalid relationship (missing 'type' field)
      const relationshipFile = join(
        testDir,
        "knowledge-graph-relationships.jsonl",
      );
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n" +
          '{"id":"r1","source":"s1","target":"t1","label":"Invalid","properties":{},"weight":1.0}\n',
        "utf-8",
      );

      // Loading should skip the invalid relationship
      const relationships = await validatingStorage.loadRelationships();
      expect(relationships.length).toBe(0); // Invalid relationship skipped
    });

    it("should validate entity has required fields: id, type, label (line 495-497)", async () => {
      const validatingStorage = new KGStorage({
        storageDir: testDir,
        validateOnRead: true,
      });
      await validatingStorage.initialize();

      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");

      // Test missing 'id'
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n" +
          '{"type":"project","label":"No ID","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n',
        "utf-8",
      );

      let entities = await validatingStorage.loadEntities();
      expect(entities.length).toBe(0);

      // Test missing 'type'
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n" +
          '{"id":"e1","label":"No Type","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n',
        "utf-8",
      );

      entities = await validatingStorage.loadEntities();
      expect(entities.length).toBe(0);

      // Test missing 'label'
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n" +
          '{"id":"e1","type":"project","properties":{},"weight":1.0,"lastUpdated":"2024-01-01"}\n',
        "utf-8",
      );

      entities = await validatingStorage.loadEntities();
      expect(entities.length).toBe(0);
    });

    it("should validate relationship has required fields: id, source, target, type (line 504-512)", async () => {
      const validatingStorage = new KGStorage({
        storageDir: testDir,
        validateOnRead: true,
      });
      await validatingStorage.initialize();

      const relationshipFile = join(
        testDir,
        "knowledge-graph-relationships.jsonl",
      );

      // Test missing 'id'
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n" +
          '{"source":"s1","target":"t1","type":"uses","label":"Uses","properties":{},"weight":1.0}\n',
        "utf-8",
      );

      let relationships = await validatingStorage.loadRelationships();
      expect(relationships.length).toBe(0);

      // Test missing 'source'
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n" +
          '{"id":"r1","target":"t1","type":"uses","label":"Uses","properties":{},"weight":1.0}\n',
        "utf-8",
      );

      relationships = await validatingStorage.loadRelationships();
      expect(relationships.length).toBe(0);

      // Test missing 'target'
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n" +
          '{"id":"r1","source":"s1","type":"uses","label":"Uses","properties":{},"weight":1.0}\n',
        "utf-8",
      );

      relationships = await validatingStorage.loadRelationships();
      expect(relationships.length).toBe(0);

      // Test missing 'type'
      await fs.writeFile(
        relationshipFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_RELATIONSHIPS v1.0.0\n" +
          '{"id":"r1","source":"s1","target":"t1","label":"Uses","properties":{},"weight":1.0}\n',
        "utf-8",
      );

      relationships = await validatingStorage.loadRelationships();
      expect(relationships.length).toBe(0);
    });

    it("should not validate when validateOnRead is false", async () => {
      const nonValidatingStorage = new KGStorage({
        storageDir: testDir,
        validateOnRead: false,
      });
      await nonValidatingStorage.initialize();

      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");

      // Write entity missing required fields
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\n" +
          '{"id":"e1","properties":{}}\n',
        "utf-8",
      );

      // Should load without validation (parse as-is)
      const entities = await nonValidatingStorage.loadEntities();
      expect(entities.length).toBe(1);
      expect(entities[0].id).toBe("e1");
    });
  });

  describe("Backup Restoration with Timestamp", () => {
    // TODO: Fix timing issue with backup file creation
    it.skip("should restore from backup with specific timestamp (lines 451-455)", async () => {
      const entities: GraphNode[] = [
        {
          id: "project:backup1",
          type: "project",
          label: "Backup Test 1",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      // Save first version
      await storage.saveEntities(entities);

      // Get list of backups to find the timestamp
      const backupDir = join(testDir, "backups");
      const backups = await fs.readdir(backupDir);
      const entityBackups = backups.filter((f) => f.startsWith("entities-"));

      expect(entityBackups.length).toBeGreaterThan(0);

      // Extract timestamp from backup filename (format: entities-YYYY-MM-DDTHH-MM-SS-MMMZ.jsonl)
      const backupFilename = entityBackups[0];
      const timestampMatch = backupFilename.match(/entities-(.*?)\.jsonl/);
      expect(timestampMatch).not.toBeNull();

      const timestamp = timestampMatch![1];

      // Modify entities
      const modifiedEntities: GraphNode[] = [
        {
          id: "project:backup2",
          type: "project",
          label: "Modified",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-02",
        },
      ];
      await storage.saveEntities(modifiedEntities);

      // Verify current state
      let current = await storage.loadEntities();
      expect(current.length).toBe(1);
      expect(current[0].id).toBe("project:backup2");

      // Restore from backup using specific timestamp
      await storage.restoreFromBackup("entities", timestamp);

      // Verify restored state
      current = await storage.loadEntities();
      expect(current.length).toBe(1);
      expect(current[0].id).toBe("project:backup1");
    });

    it("should throw error when backup with timestamp not found (line 454-456)", async () => {
      const entities: GraphNode[] = [
        {
          id: "project:test",
          type: "project",
          label: "Test",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveEntities(entities);

      // Try to restore with non-existent timestamp
      await expect(
        storage.restoreFromBackup("entities", "2099-12-31T23-59-59-999Z"),
      ).rejects.toThrow("Backup with timestamp");
    });

    it("should restore most recent backup when no timestamp specified (line 458-467)", async () => {
      const entities1: GraphNode[] = [
        {
          id: "project:v1",
          type: "project",
          label: "Version 1",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveEntities(entities1);

      // Wait a bit to ensure different timestamps
      await new Promise((resolve) => setTimeout(resolve, 100));

      const entities2: GraphNode[] = [
        {
          id: "project:v2",
          type: "project",
          label: "Version 2",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-02",
        },
      ];

      await storage.saveEntities(entities2);

      await new Promise((resolve) => setTimeout(resolve, 100));

      const entities3: GraphNode[] = [
        {
          id: "project:v3",
          type: "project",
          label: "Version 3 (current)",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-03",
        },
      ];

      await storage.saveEntities(entities3);

      // Current state should be v3
      let current = await storage.loadEntities();
      expect(current[0].id).toBe("project:v3");

      // Restore without timestamp (should get most recent backup = v2)
      await storage.restoreFromBackup("entities");

      current = await storage.loadEntities();
      expect(current[0].id).toBe("project:v2");
    });

    // TODO: Fix timing issue with backup file creation
    it.skip("should restore relationships with timestamp", async () => {
      const relationships1: GraphEdge[] = [
        {
          id: "rel:v1",
          source: "s1",
          target: "t1",
          type: "uses",
          properties: {},
          weight: 1.0,
          confidence: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveRelationships(relationships1);

      // Get backup timestamp
      const backupDir = join(testDir, "backups");
      const backups = await fs.readdir(backupDir);
      const relBackups = backups.filter((f) => f.startsWith("relationships-"));

      expect(relBackups.length).toBeGreaterThan(0);

      const timestampMatch = relBackups[0].match(/relationships-(.*?)\.jsonl/);
      const timestamp = timestampMatch![1];

      // Modify relationships
      const relationships2: GraphEdge[] = [
        {
          id: "rel:v2",
          source: "s2",
          target: "t2",
          type: "uses",
          properties: {},
          weight: 1.0,
          confidence: 1.0,
          lastUpdated: "2024-01-02",
        },
      ];

      await storage.saveRelationships(relationships2);

      // Restore from backup using timestamp
      await storage.restoreFromBackup("relationships", timestamp);

      const restored = await storage.loadRelationships();
      expect(restored[0].id).toBe("rel:v1");
    });

    it("should throw error when no backups exist (line 445-447)", async () => {
      // Try to restore when no backups exist
      await expect(storage.restoreFromBackup("entities")).rejects.toThrow(
        "No backups found",
      );
    });

    it("should log restoration in debug mode (line 478-481)", async () => {
      const entities: GraphNode[] = [
        {
          id: "project:debug",
          type: "project",
          label: "Debug Test",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveEntities(entities);

      // Set DEBUG env var
      const originalDebug = process.env.DEBUG;
      process.env.DEBUG = "true";

      // Modify
      const modifiedEntities: GraphNode[] = [
        {
          id: "project:modified",
          type: "project",
          label: "Modified",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-02",
        },
      ];
      await storage.saveEntities(modifiedEntities);

      // Restore (should log in debug mode)
      await storage.restoreFromBackup("entities");

      // Restore original DEBUG setting
      if (originalDebug !== undefined) {
        process.env.DEBUG = originalDebug;
      } else {
        delete process.env.DEBUG;
      }

      // Verify restoration worked
      const restored = await storage.loadEntities();
      expect(restored[0].id).toBe("project:debug");
    });
  });

  describe("Error Handling Edge Cases", () => {
    it("should handle backup file access errors gracefully (line 337-340)", async () => {
      // This tests the warning path when backup fails due to file access issues
      const storage2 = new KGStorage({
        storageDir: testDir,
        backupOnWrite: true,
      });

      await storage2.initialize();

      // Save initial entities to create a file
      const initialEntities: GraphNode[] = [
        {
          id: "project:initial",
          type: "project",
          label: "Initial",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];
      await storage2.saveEntities(initialEntities);

      // Make entity file unreadable (Unix-like systems only) to trigger backup error
      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
      if (process.platform !== "win32") {
        try {
          await fs.chmod(entityFile, 0o000);
        } catch (e) {
          // Skip test if chmod not supported
          return;
        }
      }

      // Saving should still attempt even if backup fails with non-ENOENT error
      const newEntities: GraphNode[] = [
        {
          id: "project:no-backup",
          type: "project",
          label: "No Backup",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-02",
        },
      ];

      // Will fail during backup read, but should warn and continue
      // This tests line 337-339: if (error.code !== "ENOENT")
      try {
        await storage2.saveEntities(newEntities);
      } catch (error) {
        // Might throw due to unreadable file
      }

      // Restore permissions for cleanup
      if (process.platform !== "win32") {
        try {
          await fs.chmod(entityFile, 0o644);
        } catch (e) {
          // Ignore
        }
      }
    });

    it("should handle cleanup of backups when file is deleted during iteration (line 369-371)", async () => {
      // Create multiple backups
      const entities: GraphNode[] = [
        {
          id: "project:cleanup",
          type: "project",
          label: "Cleanup Test",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      // Create many backups (more than keepCount of 10)
      for (let i = 0; i < 15; i++) {
        await storage.saveEntities([
          {
            ...entities[0],
            id: `project:cleanup-${i}`,
            label: `Cleanup Test ${i}`,
          },
        ]);
        await new Promise((resolve) => setTimeout(resolve, 10));
      }

      // Old backups should be cleaned up automatically
      const backupDir = join(testDir, "backups");
      const backups = await fs.readdir(backupDir);
      const entityBackups = backups.filter((f) => f.startsWith("entities-"));

      // Should keep last 10 backups
      expect(entityBackups.length).toBeLessThanOrEqual(10);
    });

    it("should handle missing backup directory gracefully (line 388-391)", async () => {
      // Create storage without creating backups first
      const testDir2 = join(tmpdir(), `kg-no-backup-${Date.now()}`);
      await fs.mkdir(testDir2, { recursive: true });

      const storage2 = new KGStorage({
        storageDir: testDir2,
        backupOnWrite: false, // Disable backups
      });

      await storage2.initialize();

      const entities: GraphNode[] = [
        {
          id: "project:no-backup-dir",
          type: "project",
          label: "No Backup Dir",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      // Should work fine without backup directory
      await storage2.saveEntities(entities);

      const loaded = await storage2.loadEntities();
      expect(loaded.length).toBe(1);

      await fs.rm(testDir2, { recursive: true, force: true });
    });
  });

  describe("Verify Integrity Coverage", () => {
    it("should detect orphaned relationships - missing source (line 535-538)", async () => {
      const entities: GraphNode[] = [
        {
          id: "project:exists",
          type: "project",
          label: "Exists",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      const relationships: GraphEdge[] = [
        {
          id: "rel:orphan",
          source: "project:missing",
          target: "project:exists",
          type: "uses",
          properties: {},
          weight: 1.0,
          confidence: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveEntities(entities);
      await storage.saveRelationships(relationships);

      const result = await storage.verifyIntegrity();

      expect(result.warnings.length).toBeGreaterThan(0);
      expect(
        result.warnings.some((w) => w.includes("missing source entity")),
      ).toBe(true);
    });

    it("should detect orphaned relationships - missing target (line 540-544)", async () => {
      const entities: GraphNode[] = [
        {
          id: "project:exists",
          type: "project",
          label: "Exists",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      const relationships: GraphEdge[] = [
        {
          id: "rel:orphan",
          source: "project:exists",
          target: "project:missing",
          type: "uses",
          properties: {},
          weight: 1.0,
          confidence: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveEntities(entities);
      await storage.saveRelationships(relationships);

      const result = await storage.verifyIntegrity();

      expect(result.warnings.length).toBeGreaterThan(0);
      expect(
        result.warnings.some((w) => w.includes("missing target entity")),
      ).toBe(true);
    });

    // TODO: Fix - validation prevents corrupted data from being loaded
    it.skip("should catch errors during integrity check (lines 564-570)", async () => {
      // Save valid data
      const entities: GraphNode[] = [
        {
          id: "project:test",
          type: "project",
          label: "Test",
          properties: {},
          weight: 1.0,
          lastUpdated: "2024-01-01",
        },
      ];

      await storage.saveEntities(entities);

      // Corrupt the entity file to cause a parse error
      const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
      await fs.writeFile(
        entityFile,
        "# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES v1.0.0\nthis is not valid json\n",
        "utf-8",
      );

      // Integrity check should catch the error
      const result = await storage.verifyIntegrity();

      expect(result.valid).toBe(false);
      expect(result.errors.length).toBeGreaterThan(0);
      expect(result.errors[0]).toContain("Integrity check failed");
    });
  });
});

```

--------------------------------------------------------------------------------
/src/memory/pruning.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Memory Pruning & Optimization System for DocuMCP
 * Intelligent memory cleanup, storage optimization, and performance tuning
 */

import { EventEmitter } from "events";
import { MemoryEntry, JSONLStorage } from "./storage.js";
import { MemoryManager } from "./manager.js";
import { IncrementalLearningSystem } from "./learning.js";
import { KnowledgeGraph } from "./knowledge-graph.js";

export interface PruningPolicy {
  maxAge: number; // Maximum age in days
  maxSize: number; // Maximum storage size in MB
  maxEntries: number; // Maximum number of entries
  preservePatterns: string[]; // Pattern types to preserve
  compressionThreshold: number; // Compress entries older than X days
  redundancyThreshold: number; // Remove similar entries with similarity > X
}

export interface OptimizationMetrics {
  totalEntries: number;
  storageSize: number;
  indexSize: number;
  compressionRatio: number;
  duplicatesRemoved: number;
  entriesPruned: number;
  performanceGain: number;
  lastOptimization: Date;
}

export interface PruningResult {
  entriesRemoved: number;
  spaceSaved: number;
  patternsPreserved: number;
  compressionApplied: number;
  optimizationApplied: boolean;
  metrics: OptimizationMetrics;
}

export interface CompressionStrategy {
  type: "gzip" | "lz4" | "semantic";
  threshold: number;
  ratio: number;
}

export interface RedundancyPattern {
  similarity: number;
  count: number;
  representative: string;
  duplicates: string[];
  canMerge: boolean;
}

export class MemoryPruningSystem extends EventEmitter {
  private storage: JSONLStorage;
  private manager: MemoryManager;
  private learningSystem: IncrementalLearningSystem;
  private knowledgeGraph: KnowledgeGraph;
  private defaultPolicy: PruningPolicy;
  private compressionCache: Map<string, any>;
  private similarityCache: Map<string, Map<string, number>>;

  constructor(
    storage: JSONLStorage,
    manager: MemoryManager,
    learningSystem: IncrementalLearningSystem,
    knowledgeGraph: KnowledgeGraph,
  ) {
    super();
    this.storage = storage;
    this.manager = manager;
    this.learningSystem = learningSystem;
    this.knowledgeGraph = knowledgeGraph;
    this.compressionCache = new Map();
    this.similarityCache = new Map();

    this.defaultPolicy = {
      maxAge: 180, // 6 months
      maxSize: 500, // 500MB
      maxEntries: 50000,
      preservePatterns: [
        "successful_deployment",
        "user_preference",
        "critical_error",
      ],
      compressionThreshold: 30, // Compress after 30 days
      redundancyThreshold: 0.85, // 85% similarity threshold
    };

    this.setupPeriodicCleanup();
  }

  /**
   * Execute comprehensive memory pruning
   */
  async prune(policy?: Partial<PruningPolicy>): Promise<PruningResult> {
    const activePolicy = { ...this.defaultPolicy, ...policy };
    const startTime = Date.now();

    this.emit("pruning_started", { policy: activePolicy });

    try {
      // Get current metrics
      const initialMetrics = await this.getOptimizationMetrics();

      // Phase 1: Remove aged entries
      const agedResult = await this.removeAgedEntries(activePolicy);

      // Phase 2: Apply size-based pruning
      const sizeResult = await this.applySizePruning(activePolicy);

      // Phase 3: Remove redundant entries
      const redundancyResult = await this.removeRedundantEntries(activePolicy);

      // Phase 4: Apply compression
      const compressionResult = await this.applyCompression(activePolicy);

      // Phase 5: Optimize storage structure
      const optimizationResult = await this.optimizeStorage();

      // Get final metrics
      const finalMetrics = await this.getOptimizationMetrics();

      const result: PruningResult = {
        entriesRemoved:
          agedResult.removed + sizeResult.removed + redundancyResult.removed,
        spaceSaved: initialMetrics.storageSize - finalMetrics.storageSize,
        patternsPreserved: agedResult.preserved + sizeResult.preserved,
        compressionApplied: compressionResult.compressed,
        optimizationApplied: optimizationResult.applied,
        metrics: finalMetrics,
      };

      // Update learning system with pruning results
      await this.updateLearningFromPruning(result);

      this.emit("pruning_completed", {
        result,
        duration: Date.now() - startTime,
      });

      return result;
    } catch (error) {
      this.emit("pruning_error", {
        error: error instanceof Error ? error.message : String(error),
      });
      throw error;
    }
  }

  /**
   * Remove entries older than policy threshold
   */
  private async removeAgedEntries(
    policy: PruningPolicy,
  ): Promise<{ removed: number; preserved: number }> {
    const cutoffDate = new Date(
      Date.now() - policy.maxAge * 24 * 60 * 60 * 1000,
    );
    const allEntries = await this.storage.getAll();

    let removed = 0;
    let preserved = 0;

    for (const entry of allEntries) {
      const entryDate = new Date(entry.timestamp);

      if (entryDate < cutoffDate) {
        // Check if entry should be preserved
        if (this.shouldPreserveEntry(entry, policy)) {
          preserved++;
          continue;
        }

        // Remove from storage
        await this.storage.delete(entry.id);

        // Remove from knowledge graph
        await this.knowledgeGraph.removeNode(entry.id);

        removed++;
      }
    }

    return { removed, preserved };
  }

  /**
   * Apply size-based pruning to stay within limits
   */
  private async applySizePruning(
    policy: PruningPolicy,
  ): Promise<{ removed: number; preserved: number }> {
    const metrics = await this.getOptimizationMetrics();

    if (
      metrics.storageSize <= policy.maxSize &&
      metrics.totalEntries <= policy.maxEntries
    ) {
      return { removed: 0, preserved: 0 };
    }

    // Get entries sorted by importance score
    const allEntries = await this.storage.getAll();
    const scoredEntries = await Promise.all(
      allEntries.map(async (entry) => ({
        entry,
        score: await this.calculateImportanceScore(entry),
      })),
    );

    // Sort by score (ascending - remove least important first)
    scoredEntries.sort((a, b) => a.score - b.score);

    let removed = 0;
    let preserved = 0;
    let currentSize = metrics.storageSize;
    let currentEntries = metrics.totalEntries;

    for (const { entry, score } of scoredEntries) {
      if (
        currentSize <= policy.maxSize &&
        currentEntries <= policy.maxEntries
      ) {
        break;
      }

      if (this.shouldPreserveEntry(entry, policy) || score > 0.8) {
        preserved++;
        continue;
      }

      // Remove entry
      await this.storage.delete(entry.id);
      await this.knowledgeGraph.removeNode(entry.id);

      // Estimate size reduction (rough approximation)
      const entrySize = JSON.stringify(entry).length / (1024 * 1024);
      currentSize -= entrySize;
      currentEntries--;
      removed++;
    }

    return { removed, preserved };
  }

  /**
   * Remove redundant and duplicate entries
   */
  private async removeRedundantEntries(
    policy: PruningPolicy,
  ): Promise<{ removed: number; merged: number }> {
    const redundantPatterns = await this.findRedundantPatterns(
      policy.redundancyThreshold,
    );

    let removed = 0;
    let merged = 0;

    for (const pattern of redundantPatterns) {
      if (pattern.canMerge && pattern.duplicates.length > 1) {
        // Keep the representative, remove duplicates
        for (let i = 1; i < pattern.duplicates.length; i++) {
          await this.storage.delete(pattern.duplicates[i]);
          removed++;
        }

        // Optionally merge information into representative
        if (pattern.count > 2) {
          await this.mergeRedundantEntries(
            pattern.representative,
            pattern.duplicates.slice(1),
          );
          merged++;
        }
      }
    }

    return { removed, merged };
  }

  /**
   * Apply compression to old entries
   */
  private async applyCompression(
    policy: PruningPolicy,
  ): Promise<{ compressed: number; spaceSaved: number }> {
    const cutoffDate = new Date(
      Date.now() - policy.compressionThreshold * 24 * 60 * 60 * 1000,
    );
    const allEntries = await this.storage.getAll();

    let compressed = 0;
    let spaceSaved = 0;

    for (const entry of allEntries) {
      const entryDate = new Date(entry.timestamp);

      if (entryDate < cutoffDate && !this.isCompressed(entry)) {
        const originalSize = JSON.stringify(entry).length;
        const compressedEntry = await this.compressEntry(entry);
        const compressedSize = JSON.stringify(compressedEntry).length;

        await this.storage.update(entry.id, compressedEntry);

        compressed++;
        spaceSaved += originalSize - compressedSize;
      }
    }

    return { compressed, spaceSaved };
  }

  /**
   * Optimize storage structure and indices
   */
  private async optimizeStorage(): Promise<{
    applied: boolean;
    improvements: string[];
  }> {
    const improvements: string[] = [];

    try {
      // Rebuild indices
      await this.storage.rebuildIndex();
      improvements.push("rebuilt_indices");

      // Defragment storage files
      await this.defragmentStorage();
      improvements.push("defragmented_storage");

      // Optimize cache sizes
      this.optimizeCaches();
      improvements.push("optimized_caches");

      return { applied: true, improvements };
    } catch (error) {
      return { applied: false, improvements };
    }
  }

  /**
   * Calculate importance score for an entry
   */
  private async calculateImportanceScore(entry: MemoryEntry): Promise<number> {
    let score = 0;

    // Recency score (0-0.3)
    const age = Date.now() - new Date(entry.timestamp).getTime();
    const maxAge = 180 * 24 * 60 * 60 * 1000; // 180 days
    score += Math.max(0, 1 - age / maxAge) * 0.3;

    // Type importance (0-0.2)
    const typeScores: Record<string, number> = {
      successful_deployment: 0.2,
      user_preference: 0.18,
      configuration: 0.15,
      analysis: 0.12,
      recommendation: 0.12,
      interaction: 0.08,
      error: 0.05,
    };
    score += typeScores[entry.type] || 0.05;

    // Learning value (0-0.2)
    const patterns = await this.learningSystem.getPatterns();
    const relevantPatterns = patterns.filter(
      (p) =>
        p.metadata.technologies?.includes(entry.data.language) ||
        p.metadata.technologies?.includes(entry.data.framework),
    );
    score += Math.min(0.2, relevantPatterns.length * 0.05);

    // Knowledge graph centrality (0-0.15)
    try {
      const connections = await this.knowledgeGraph.getConnections(entry.id);
      score += Math.min(0.15, connections.length * 0.02);
    } catch {
      // Node might not exist in graph
    }

    // Success indicator (0-0.15)
    if (entry.data.outcome === "success" || entry.data.success === true) {
      score += 0.15;
    }

    return Math.min(1, score);
  }

  /**
   * Check if entry should be preserved based on policy
   */
  private shouldPreserveEntry(
    entry: MemoryEntry,
    policy: PruningPolicy,
  ): boolean {
    // Check preserve patterns
    for (const pattern of policy.preservePatterns) {
      if (
        entry.type.includes(pattern) ||
        JSON.stringify(entry.data).includes(pattern)
      ) {
        return true;
      }
    }

    // Preserve high-value entries
    if (
      entry.data.outcome === "success" ||
      entry.data.success === true ||
      entry.data.critical === true
    ) {
      return true;
    }

    return false;
  }

  /**
   * Find patterns of redundant entries
   */
  private async findRedundantPatterns(
    threshold: number,
  ): Promise<RedundancyPattern[]> {
    const allEntries = await this.storage.getAll();
    const patterns: RedundancyPattern[] = [];
    const processed = new Set<string>();

    for (const entry of allEntries) {
      if (processed.has(entry.id)) continue;

      const similar = await this.findSimilarEntries(
        entry,
        allEntries,
        threshold,
      );

      if (similar.length > 1) {
        patterns.push({
          similarity: threshold,
          count: similar.length,
          representative: similar[0].id,
          duplicates: similar.map((e) => e.id),
          canMerge: this.canMergeEntries(similar),
        });

        similar.forEach((s) => processed.add(s.id));
      }
    }

    return patterns;
  }

  /**
   * Find entries similar to given entry
   */
  private async findSimilarEntries(
    target: MemoryEntry,
    entries: MemoryEntry[],
    threshold: number,
  ): Promise<MemoryEntry[]> {
    const similar: MemoryEntry[] = [target];

    for (const entry of entries) {
      if (entry.id === target.id) continue;

      const similarity = await this.calculateSimilarity(target, entry);
      if (similarity >= threshold) {
        similar.push(entry);
      }
    }

    return similar;
  }

  /**
   * Calculate similarity between two entries
   */
  private async calculateSimilarity(
    entry1: MemoryEntry,
    entry2: MemoryEntry,
  ): Promise<number> {
    // Check cache first
    if (
      this.similarityCache.has(entry1.id) &&
      this.similarityCache.get(entry1.id)?.has(entry2.id)
    ) {
      return this.similarityCache.get(entry1.id)!.get(entry2.id)!;
    }

    let similarity = 0;

    // Type similarity (0-0.3)
    if (entry1.type === entry2.type) {
      similarity += 0.3;
    }

    // Temporal similarity (0-0.2)
    const timeDiff = Math.abs(
      new Date(entry1.timestamp).getTime() -
        new Date(entry2.timestamp).getTime(),
    );
    const maxTimeDiff = 7 * 24 * 60 * 60 * 1000; // 7 days
    similarity += Math.max(0, 1 - timeDiff / maxTimeDiff) * 0.2;

    // Data similarity (0-0.5)
    const dataSimilarity = this.calculateDataSimilarity(
      entry1.data,
      entry2.data,
    );
    similarity += dataSimilarity * 0.5;

    // Cache result
    if (!this.similarityCache.has(entry1.id)) {
      this.similarityCache.set(entry1.id, new Map());
    }
    this.similarityCache.get(entry1.id)!.set(entry2.id, similarity);

    return similarity;
  }

  /**
   * Calculate similarity between data objects
   */
  private calculateDataSimilarity(data1: any, data2: any): number {
    const keys1 = new Set(Object.keys(data1));
    const keys2 = new Set(Object.keys(data2));
    const allKeys = new Set([...keys1, ...keys2]);

    let matches = 0;
    let total = 0;

    for (const key of allKeys) {
      total++;
      if (keys1.has(key) && keys2.has(key)) {
        if (data1[key] === data2[key]) {
          matches++;
        } else if (
          typeof data1[key] === "string" &&
          typeof data2[key] === "string"
        ) {
          // String similarity for text fields
          const stringSim = this.calculateStringSimilarity(
            data1[key],
            data2[key],
          );
          matches += stringSim;
        }
      }
    }

    return total > 0 ? matches / total : 0;
  }

  /**
   * Calculate string similarity (simple Jaccard similarity)
   */
  private calculateStringSimilarity(str1: string, str2: string): number {
    const words1 = new Set(str1.toLowerCase().split(/\s+/));
    const words2 = new Set(str2.toLowerCase().split(/\s+/));

    const intersection = new Set([...words1].filter((w) => words2.has(w)));
    const union = new Set([...words1, ...words2]);

    return union.size > 0 ? intersection.size / union.size : 0;
  }

  /**
   * Check if entries can be safely merged
   */
  private canMergeEntries(entries: MemoryEntry[]): boolean {
    if (entries.length < 2) return false;

    // All entries must have the same type
    const firstType = entries[0].type;
    if (!entries.every((e) => e.type === firstType)) {
      return false;
    }

    // Check for conflicting data
    const firstData = entries[0].data;
    for (const entry of entries.slice(1)) {
      if (this.hasConflictingData(firstData, entry.data)) {
        return false;
      }
    }

    return true;
  }

  /**
   * Check for conflicting data between entries
   */
  private hasConflictingData(data1: any, data2: any): boolean {
    for (const key of Object.keys(data1)) {
      if (key in data2 && data1[key] !== data2[key]) {
        // Special handling for arrays and objects
        if (Array.isArray(data1[key]) && Array.isArray(data2[key])) {
          continue; // Arrays can be merged
        }
        if (typeof data1[key] === "object" && typeof data2[key] === "object") {
          continue; // Objects can be merged
        }
        return true; // Conflicting primitive values
      }
    }
    return false;
  }

  /**
   * Merge redundant entries into representative
   */
  private async mergeRedundantEntries(
    representativeId: string,
    duplicateIds: string[],
  ): Promise<void> {
    const representative = await this.storage.get(representativeId);
    if (!representative) return;

    const duplicates = await Promise.all(
      duplicateIds.map((id) => this.storage.get(id)),
    );

    // Merge data from duplicates
    const mergedData = { ...representative.data };

    for (const duplicate of duplicates) {
      if (!duplicate) continue;

      // Merge arrays
      for (const [key, value] of Object.entries(duplicate.data)) {
        if (Array.isArray(value) && Array.isArray(mergedData[key])) {
          mergedData[key] = [...new Set([...mergedData[key], ...value])];
        } else if (
          typeof value === "object" &&
          typeof mergedData[key] === "object"
        ) {
          mergedData[key] = { ...mergedData[key], ...value };
        } else if (!(key in mergedData)) {
          mergedData[key] = value;
        }
      }
    }

    // Update representative with merged data
    await this.storage.update(representativeId, {
      ...representative,
      data: mergedData,
      metadata: {
        ...representative.metadata,
        merged: true,
        mergedCount: duplicateIds.length,
        mergedAt: new Date().toISOString(),
      },
    });
  }

  /**
   * Check if entry is already compressed
   */
  private isCompressed(entry: MemoryEntry): boolean {
    return Boolean(entry.metadata?.compressed);
  }

  /**
   * Compress entry data
   */
  private async compressEntry(entry: MemoryEntry): Promise<MemoryEntry> {
    // Simple compression - in production, use actual compression library
    const compressedData = this.simpleCompress(entry.data);

    return {
      ...entry,
      data: compressedData,
      metadata: {
        ...entry.metadata,
        compressed: true,
        compressionType: "simple",
        compressedAt: new Date().toISOString(),
        originalSize: JSON.stringify(entry.data).length,
      },
    };
  }

  /**
   * Simple compression simulation
   */
  private simpleCompress(data: any): any {
    // This is a placeholder - in production, use proper compression
    const stringified = JSON.stringify(data);
    const compressed = stringified.replace(/\s+/g, " ").trim();

    return {
      _compressed: true,
      _data: compressed,
      _type: "simple",
    };
  }

  /**
   * Defragment storage files
   */
  private async defragmentStorage(): Promise<void> {
    // Rebuild storage with optimal layout
    const allEntries = await this.storage.getAll();

    // Sort entries for optimal access patterns
    allEntries.sort(
      (a, b) =>
        new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime(),
    );

    // This would typically rewrite storage files
    // For now, just trigger a rebuild
    await this.storage.rebuildIndex();
  }

  /**
   * Optimize cache sizes based on usage patterns
   */
  private optimizeCaches(): void {
    // Clear old cache entries
    // Clear similarity cache entries older than 24 hours
    for (const [key1, innerMap] of this.similarityCache.entries()) {
      for (const [key2] of innerMap.entries()) {
        // Simple heuristic - remove if keys suggest old timestamps
        if (Math.random() < 0.1) {
          // 10% chance to clear each entry
          innerMap.delete(key2);
        }
      }
      if (innerMap.size === 0) {
        this.similarityCache.delete(key1);
      }
    }

    // Limit cache sizes
    if (this.compressionCache.size > 10000) {
      const entries = Array.from(this.compressionCache.entries());
      this.compressionCache.clear();
      // Keep only the most recent 5000 entries
      entries.slice(-5000).forEach(([key, value]) => {
        this.compressionCache.set(key, value);
      });
    }
  }

  /**
   * Get comprehensive optimization metrics
   */
  async getOptimizationMetrics(): Promise<OptimizationMetrics> {
    const allEntries = await this.storage.getAll();
    const totalEntries = allEntries.length;

    // Calculate storage size (approximate)
    const storageSize =
      allEntries.reduce((total, entry) => {
        return total + JSON.stringify(entry).length;
      }, 0) /
      (1024 * 1024); // Convert to MB

    // Calculate index size (approximate)
    const indexSize = (totalEntries * 100) / (1024 * 1024); // Rough estimate

    // Calculate compression ratio
    const compressedEntries = allEntries.filter((e) => this.isCompressed(e));
    const compressionRatio = compressedEntries.length / totalEntries;

    return {
      totalEntries,
      storageSize,
      indexSize,
      compressionRatio,
      duplicatesRemoved: 0, // Would be tracked during runtime
      entriesPruned: 0, // Would be tracked during runtime
      performanceGain: 0, // Would be calculated based on before/after metrics
      lastOptimization: new Date(),
    };
  }

  /**
   * Update learning system based on pruning results
   */
  private async updateLearningFromPruning(
    result: PruningResult,
  ): Promise<void> {
    // Create a learning entry about pruning effectiveness
    const pruningLearning = {
      action: "memory_pruning",
      outcome: result.spaceSaved > 0 ? "success" : "neutral",
      metrics: {
        entriesRemoved: result.entriesRemoved,
        spaceSaved: result.spaceSaved,
        patternsPreserved: result.patternsPreserved,
      },
      timestamp: new Date().toISOString(),
    };

    // This would integrate with the learning system
    // For now, just emit an event
    this.emit("learning_update", pruningLearning);
  }

  /**
   * Setup periodic cleanup
   */
  private setupPeriodicCleanup(): void {
    // Run optimization every 24 hours
    setInterval(
      async () => {
        try {
          await this.prune();
          this.emit("periodic_cleanup_completed");
        } catch (error) {
          this.emit("periodic_cleanup_error", {
            error: error instanceof Error ? error.message : String(error),
          });
        }
      },
      24 * 60 * 60 * 1000,
    );
  }

  /**
   * Get pruning recommendations
   */
  async getPruningRecommendations(): Promise<{
    shouldPrune: boolean;
    reasons: string[];
    estimatedSavings: number;
    recommendedPolicy: Partial<PruningPolicy>;
  }> {
    const metrics = await this.getOptimizationMetrics();
    const reasons: string[] = [];
    let shouldPrune = false;
    let estimatedSavings = 0;

    // Check storage size
    if (metrics.storageSize > this.defaultPolicy.maxSize * 0.8) {
      shouldPrune = true;
      reasons.push(
        `Storage size (${metrics.storageSize.toFixed(2)}MB) approaching limit`,
      );
      estimatedSavings += metrics.storageSize * 0.2;
    }

    // Check entry count
    if (metrics.totalEntries > this.defaultPolicy.maxEntries * 0.8) {
      shouldPrune = true;
      reasons.push(`Entry count (${metrics.totalEntries}) approaching limit`);
    }

    // Check compression ratio
    if (metrics.compressionRatio < 0.3) {
      reasons.push("Low compression ratio indicates optimization opportunity");
      estimatedSavings += metrics.storageSize * 0.15;
    }

    // Time-based recommendation
    const daysSinceLastOptimization =
      (Date.now() - metrics.lastOptimization.getTime()) / (24 * 60 * 60 * 1000);
    if (daysSinceLastOptimization > 7) {
      shouldPrune = true;
      reasons.push("Regular maintenance window (weekly optimization)");
    }

    return {
      shouldPrune,
      reasons,
      estimatedSavings,
      recommendedPolicy: {
        maxAge: Math.max(30, this.defaultPolicy.maxAge - 30), // More aggressive if needed
        compressionThreshold: Math.max(
          7,
          this.defaultPolicy.compressionThreshold - 7,
        ),
      },
    };
  }
}

```

--------------------------------------------------------------------------------
/src/tools/deploy-pages.ts:
--------------------------------------------------------------------------------

```typescript
import { promises as fs } from "fs";
import path from "path";
import { z } from "zod";
import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
import {
  createOrUpdateProject,
  trackDeployment,
  getDeploymentRecommendations,
  getKnowledgeGraph,
} from "../memory/kg-integration.js";
import { getUserPreferenceManager } from "../memory/user-preferences.js";

const inputSchema = z.object({
  repository: z.string(),
  ssg: z
    .enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"])
    .optional()
    .describe(
      "Static site generator to use. If not provided, will be retrieved from knowledge graph using analysisId",
    ),
  branch: z.string().optional().default("gh-pages"),
  customDomain: z.string().optional(),
  projectPath: z
    .string()
    .optional()
    .describe("Local path to the project for tracking"),
  projectName: z.string().optional().describe("Project name for tracking"),
  analysisId: z
    .string()
    .optional()
    .describe("ID from repository analysis for linking and SSG retrieval"),
  userId: z
    .string()
    .optional()
    .default("default")
    .describe("User ID for preference tracking"),
});

interface BuildConfig {
  workingDirectory: string | null;
  buildCommand: string;
  outputPath: string;
  nodeVersion?: string;
  packageManager?: "npm" | "yarn" | "pnpm";
}

/**
 * Retrieve SSG from knowledge graph using analysisId
 */
async function getSSGFromKnowledgeGraph(
  analysisId: string,
): Promise<string | null> {
  try {
    const kg = await getKnowledgeGraph();

    // Find project node by analysis ID
    const projectNode = await kg.findNode({
      type: "project",
      properties: { id: analysisId },
    });

    if (!projectNode) {
      return null;
    }

    // Get deployment recommendations for this project
    const recommendations = await getDeploymentRecommendations(analysisId);

    if (recommendations.length > 0) {
      // Return the highest confidence SSG
      const topRecommendation = recommendations.sort(
        (a, b) => b.confidence - a.confidence,
      )[0];
      return topRecommendation.ssg;
    }

    // Fallback: check if there are any previous successful deployments
    const edges = await kg.findEdges({
      source: projectNode.id,
    });

    const deploymentEdges = edges.filter((e) =>
      e.type.startsWith("project_deployed_with"),
    );

    if (deploymentEdges.length > 0) {
      // Get the most recent successful deployment
      const successfulDeployments = deploymentEdges.filter(
        (e) => e.properties?.success === true,
      );

      if (successfulDeployments.length > 0) {
        const mostRecent = successfulDeployments.sort(
          (a, b) =>
            new Date(b.properties?.timestamp || 0).getTime() -
            new Date(a.properties?.timestamp || 0).getTime(),
        )[0];

        const configNode = (await kg.getAllNodes()).find(
          (n) => n.id === mostRecent.target,
        );

        return configNode?.properties?.ssg || null;
      }
    }

    return null;
  } catch (error) {
    console.warn("Failed to retrieve SSG from knowledge graph:", error);
    return null;
  }
}

/**
 * Detect documentation folder in repository
 */
async function detectDocsFolder(repoPath: string): Promise<string | null> {
  const commonFolders = [
    "docs",
    "documentation",
    "website",
    "doc",
    "site",
    "pages",
  ];

  for (const folder of commonFolders) {
    const folderPath = path.join(repoPath, folder);
    try {
      const stat = await fs.stat(folderPath);
      if (stat.isDirectory()) {
        // Check if it has package.json or other SSG-specific files
        const hasPackageJson = await fs
          .access(path.join(folderPath, "package.json"))
          .then(() => true)
          .catch(() => false);
        const hasMkDocsYml = await fs
          .access(path.join(folderPath, "mkdocs.yml"))
          .then(() => true)
          .catch(() => false);
        const hasConfigToml = await fs
          .access(path.join(folderPath, "config.toml"))
          .then(() => true)
          .catch(() => false);

        if (hasPackageJson || hasMkDocsYml || hasConfigToml) {
          return folder;
        }
      }
    } catch {
      continue;
    }
  }

  return null;
}

/**
 * Detect build configuration from package.json
 */
async function detectBuildConfig(
  repoPath: string,
  ssg: string,
  docsFolder: string | null,
): Promise<BuildConfig> {
  const workingDir = docsFolder || ".";
  const packageJsonPath = path.join(repoPath, workingDir, "package.json");

  const defaults: Record<string, BuildConfig> = {
    docusaurus: {
      workingDirectory: docsFolder,
      buildCommand: "npm run build",
      outputPath: "./build",
    },
    eleventy: {
      workingDirectory: docsFolder,
      buildCommand: "npm run build",
      outputPath: "./_site",
    },
    hugo: {
      workingDirectory: docsFolder,
      buildCommand: "hugo --minify",
      outputPath: "./public",
    },
    jekyll: {
      workingDirectory: docsFolder,
      buildCommand: "bundle exec jekyll build",
      outputPath: "./_site",
    },
    mkdocs: {
      workingDirectory: docsFolder,
      buildCommand: "mkdocs build",
      outputPath: "./site",
    },
  };

  const config = defaults[ssg] || defaults.docusaurus;

  try {
    const packageJson = JSON.parse(await fs.readFile(packageJsonPath, "utf-8"));

    // Detect build command from scripts
    const scripts = packageJson.scripts || {};
    if (scripts.build) {
      config.buildCommand = "npm run build";
    } else if (scripts["docs:build"]) {
      config.buildCommand = "npm run docs:build";
    } else if (scripts.start && scripts.start.includes("docusaurus")) {
      config.buildCommand = "npm run build";
    }

    // Detect package manager
    const hasYarnLock = await fs
      .access(path.join(repoPath, workingDir, "yarn.lock"))
      .then(() => true)
      .catch(() => false);
    const hasPnpmLock = await fs
      .access(path.join(repoPath, workingDir, "pnpm-lock.yaml"))
      .then(() => true)
      .catch(() => false);

    if (hasYarnLock) {
      config.packageManager = "yarn";
      config.buildCommand = config.buildCommand.replace("npm", "yarn");
    } else if (hasPnpmLock) {
      config.packageManager = "pnpm";
      config.buildCommand = config.buildCommand.replace("npm", "pnpm");
    } else {
      config.packageManager = "npm";
    }

    // Detect Node version from engines field
    if (packageJson.engines?.node) {
      config.nodeVersion = packageJson.engines.node;
    }
  } catch (error) {
    // If package.json doesn't exist or can't be read, use defaults
    console.warn("Using default build configuration:", error);
  }

  return config;
}

export async function deployPages(
  args: unknown,
  context?: any,
): Promise<{ content: any[] }> {
  const startTime = Date.now();
  const {
    repository,
    ssg: providedSSG,
    branch,
    customDomain,
    projectPath,
    projectName,
    analysisId,
    userId,
  } = inputSchema.parse(args);

  // Declare ssg outside try block so it's accessible in catch
  let ssg:
    | "jekyll"
    | "hugo"
    | "docusaurus"
    | "mkdocs"
    | "eleventy"
    | undefined = providedSSG;

  // Report initial progress
  if (context?.meta?.progressToken) {
    await context.meta.reportProgress?.({
      progress: 0,
      total: 100,
    });
  }

  await context?.info?.("🚀 Starting GitHub Pages deployment configuration...");

  try {
    // Determine repository path (local or remote)
    const repoPath = repository.startsWith("http") ? "." : repository;
    await context?.info?.(`📂 Target repository: ${repository}`);

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 10,
        total: 100,
      });
    }

    // Retrieve SSG from knowledge graph if not provided
    ssg = providedSSG;
    if (!ssg && analysisId) {
      await context?.info?.(
        `🔍 Retrieving SSG recommendation from analysis ${analysisId}...`,
      );
      const retrievedSSG = await getSSGFromKnowledgeGraph(analysisId);
      if (retrievedSSG) {
        ssg = retrievedSSG as
          | "jekyll"
          | "hugo"
          | "docusaurus"
          | "mkdocs"
          | "eleventy";
        await context?.info?.(`✅ Found recommended SSG: ${ssg}`);
      }
    } else if (ssg) {
      await context?.info?.(`ℹ️ Using specified SSG: ${ssg}`);
    }

    if (!ssg) {
      const errorResponse: MCPToolResponse = {
        success: false,
        error: {
          code: "SSG_NOT_SPECIFIED",
          message:
            "SSG parameter is required. Either provide it directly or ensure analysisId points to a project with SSG recommendations.",
          resolution:
            "Run analyze_repository and recommend_ssg first, or specify the SSG parameter explicitly.",
        },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: Date.now() - startTime,
          timestamp: new Date().toISOString(),
        },
      };
      return formatMCPResponse(errorResponse);
    }

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 25,
        total: 100,
      });
    }

    // Detect documentation folder
    await context?.info?.("📑 Detecting documentation folder...");
    const docsFolder = await detectDocsFolder(repoPath);
    await context?.info?.(
      `📁 Documentation folder: ${docsFolder || "root directory"}`,
    );

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 40,
        total: 100,
      });
    }

    // Detect build configuration
    await context?.info?.(`⚙️ Detecting build configuration for ${ssg}...`);
    const buildConfig = await detectBuildConfig(repoPath, ssg, docsFolder);

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 55,
        total: 100,
      });
    }

    // Create .github/workflows directory
    await context?.info?.("📂 Creating GitHub Actions workflow directory...");
    const workflowsDir = path.join(repoPath, ".github", "workflows");
    await fs.mkdir(workflowsDir, { recursive: true });

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 70,
        total: 100,
      });
    }

    // Generate workflow based on SSG and build config
    await context?.info?.(`✍️ Generating ${ssg} deployment workflow...`);
    const workflow = generateWorkflow(ssg, branch, customDomain, buildConfig);
    const workflowPath = path.join(workflowsDir, "deploy-docs.yml");
    await fs.writeFile(workflowPath, workflow);
    await context?.info?.(
      `✅ Workflow created: .github/workflows/deploy-docs.yml`,
    );

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 85,
        total: 100,
      });
    }

    // Create CNAME file if custom domain is specified
    let cnameCreated = false;
    if (customDomain) {
      await context?.info?.(
        `🌐 Creating CNAME file for custom domain: ${customDomain}...`,
      );
      const cnamePath = path.join(repoPath, "CNAME");
      await fs.writeFile(cnamePath, customDomain);
      cnameCreated = true;
      await context?.info?.("✅ CNAME file created");
    }

    const deploymentResult = {
      repository,
      ssg,
      branch,
      customDomain,
      workflowPath: "deploy-docs.yml",
      cnameCreated,
      repoPath,
      detectedConfig: {
        docsFolder: docsFolder || "root",
        buildCommand: buildConfig.buildCommand,
        outputPath: buildConfig.outputPath,
        packageManager: buildConfig.packageManager || "npm",
        workingDirectory: buildConfig.workingDirectory,
      },
    };

    // Phase 2.3: Track deployment setup in knowledge graph
    await context?.info?.("💾 Tracking deployment in Knowledge Graph...");
    try {
      // Create or update project in knowledge graph
      if (projectPath || projectName) {
        const timestamp = new Date().toISOString();
        const project = await createOrUpdateProject({
          id:
            analysisId ||
            `deploy_${repository.replace(/[^a-zA-Z0-9]/g, "_")}_${Date.now()}`,
          timestamp,
          path: projectPath || repository,
          projectName: projectName || repository,
          structure: {
            totalFiles: 0, // Unknown at this point
            languages: {},
            hasTests: false,
            hasCI: true, // We just added CI
            hasDocs: true, // Setting up docs deployment
          },
        });

        // Track successful deployment setup
        await trackDeployment(project.id, ssg, true, {
          buildTime: Date.now() - startTime,
        });

        // Update user preferences with SSG usage
        const userPreferenceManager = await getUserPreferenceManager(userId);
        await userPreferenceManager.trackSSGUsage({
          ssg,
          success: true, // Setup successful
          timestamp,
          projectType: projectPath || repository,
        });
      }
    } catch (trackingError) {
      // Don't fail the whole deployment if tracking fails
      console.warn(
        "Failed to track deployment in knowledge graph:",
        trackingError,
      );
    }

    if (context?.meta?.progressToken) {
      await context.meta.reportProgress?.({
        progress: 100,
        total: 100,
      });
    }

    const executionTime = Date.now() - startTime;
    await context?.info?.(
      `✅ Deployment configuration complete! ${ssg} workflow created in ${Math.round(
        executionTime / 1000,
      )}s`,
    );

    const response: MCPToolResponse<typeof deploymentResult> = {
      success: true,
      data: deploymentResult,
      metadata: {
        toolVersion: "2.0.0",
        executionTime,
        timestamp: new Date().toISOString(),
      },
      recommendations: [
        {
          type: "info",
          title: "Deployment Workflow Created",
          description: `GitHub Actions workflow configured for ${ssg} deployment to ${branch} branch`,
        },
        ...(!providedSSG && analysisId
          ? [
              {
                type: "info" as const,
                title: "SSG Auto-Detected",
                description: `Retrieved ${ssg} from knowledge graph using analysisId`,
              },
            ]
          : []),
        ...(docsFolder
          ? [
              {
                type: "info" as const,
                title: "Documentation Folder Detected",
                description: `Found documentation in '${docsFolder}/' folder. Workflow configured with working-directory.`,
              },
            ]
          : []),
        ...(buildConfig.packageManager !== "npm"
          ? [
              {
                type: "info" as const,
                title: "Package Manager Detected",
                description: `Using ${buildConfig.packageManager} based on lockfile detection`,
              },
            ]
          : []),
        ...(customDomain
          ? [
              {
                type: "info" as const,
                title: "Custom Domain Configured",
                description: `CNAME file created for ${customDomain}`,
              },
            ]
          : []),
      ],
      nextSteps: [
        {
          action: "Verify Deployment Setup",
          toolRequired: "verify_deployment",
          description: "Check that all deployment requirements are met",
          priority: "high",
        },
        {
          action: "Commit and Push",
          toolRequired: "git",
          description: "Commit workflow files and push to trigger deployment",
          priority: "high",
        },
      ],
    };

    return formatMCPResponse(response);
  } catch (error) {
    // Phase 2.3: Track failed deployment setup
    try {
      if ((projectPath || projectName) && ssg) {
        const timestamp = new Date().toISOString();
        const project = await createOrUpdateProject({
          id:
            analysisId ||
            `deploy_${repository.replace(/[^a-zA-Z0-9]/g, "_")}_${Date.now()}`,
          timestamp,
          path: projectPath || repository,
          projectName: projectName || repository,
          structure: {
            totalFiles: 0,
            languages: {},
            hasTests: false,
            hasCI: false,
            hasDocs: false,
          },
        });

        // Track failed deployment (only if ssg is known)
        await trackDeployment(project.id, ssg, false, {
          errorMessage: String(error),
        });

        // Update user preferences with failed SSG usage
        const userPreferenceManager = await getUserPreferenceManager(userId);
        await userPreferenceManager.trackSSGUsage({
          ssg,
          success: false,
          timestamp,
          projectType: projectPath || repository,
        });
      }
    } catch (trackingError) {
      console.warn("Failed to track deployment failure:", trackingError);
    }

    const errorResponse: MCPToolResponse = {
      success: false,
      error: {
        code: "DEPLOYMENT_SETUP_FAILED",
        message: `Failed to setup deployment: ${error}`,
        resolution:
          "Ensure repository path is accessible and GitHub Actions are enabled",
      },
      metadata: {
        toolVersion: "1.0.0",
        executionTime: Date.now() - startTime,
        timestamp: new Date().toISOString(),
      },
    };
    return formatMCPResponse(errorResponse);
  }
}

function generateWorkflow(
  ssg: string,
  branch: string,
  _customDomain: string | undefined,
  buildConfig: BuildConfig,
): string {
  const workingDirPrefix = buildConfig.workingDirectory
    ? `      working-directory: ${buildConfig.workingDirectory}\n`
    : "";

  const nodeVersion = buildConfig.nodeVersion || "20";
  const packageManager = buildConfig.packageManager || "npm";

  // Helper to get install command
  const getInstallCmd = () => {
    if (packageManager === "yarn") return "yarn install --frozen-lockfile";
    if (packageManager === "pnpm") return "pnpm install --frozen-lockfile";
    return "npm ci";
  };

  // Helper to add working directory to steps
  // const _addWorkingDir = (step: string) => {
  //   if (!buildConfig.workingDirectory) return step;
  //   return step.replace(
  //     /^(\s+)run:/gm,
  //     `$1working-directory: ${buildConfig.workingDirectory}\n$1run:`,
  //   );
  // };

  const workflows: Record<string, string> = {
    docusaurus: `name: Deploy Docusaurus to GitHub Pages

on:
  push:
    branches: [main]
  workflow_dispatch:

permissions:
  contents: read
  pages: write
  id-token: write

concurrency:
  group: "pages"
  cancel-in-progress: false

jobs:
  build:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v4

      - name: Setup Node.js
        uses: actions/setup-node@v4
        with:
          node-version: '${nodeVersion}'
          cache: '${packageManager}'${
            buildConfig.workingDirectory
              ? `\n          cache-dependency-path: ${buildConfig.workingDirectory}/package-lock.json`
              : ""
          }

      - name: Install dependencies
${workingDirPrefix}        run: ${getInstallCmd()}

      - name: Build website
${workingDirPrefix}        run: ${buildConfig.buildCommand}

      - name: Upload artifact
        uses: actions/upload-pages-artifact@v2
        with:
          path: ${
            buildConfig.workingDirectory
              ? `${buildConfig.workingDirectory}/${buildConfig.outputPath}`
              : buildConfig.outputPath
          }

  deploy:
    environment:
      name: github-pages
      url: \${{ steps.deployment.outputs.page_url }}
    runs-on: ubuntu-latest
    needs: build
    steps:
      - name: Deploy to GitHub Pages
        id: deployment
        uses: actions/deploy-pages@v3`,

    mkdocs: `name: Deploy MkDocs to GitHub Pages

on:
  push:
    branches: [main]
  workflow_dispatch:

permissions:
  contents: write

jobs:
  deploy:
    runs-on: ubuntu-latest${
      buildConfig.workingDirectory
        ? `\n    defaults:\n      run:\n        working-directory: ${buildConfig.workingDirectory}`
        : ""
    }
    steps:
      - uses: actions/checkout@v4

      - name: Setup Python
        uses: actions/setup-python@v4
        with:
          python-version: '3.x'

      - name: Install dependencies
        run: |
          pip install -r requirements.txt

      - name: Build and Deploy
        run: mkdocs gh-deploy --force --branch ${branch}`,

    hugo: `name: Deploy Hugo to GitHub Pages

on:
  push:
    branches: [main]
  workflow_dispatch:

permissions:
  contents: read
  pages: write
  id-token: write

concurrency:
  group: "pages"
  cancel-in-progress: false

jobs:
  build:
    runs-on: ubuntu-latest${
      buildConfig.workingDirectory
        ? `\n    defaults:\n      run:\n        working-directory: ${buildConfig.workingDirectory}`
        : ""
    }
    steps:
      - name: Checkout
        uses: actions/checkout@v4
        with:
          submodules: recursive

      - name: Setup Hugo
        uses: peaceiris/actions-hugo@v2
        with:
          hugo-version: 'latest'
          extended: true

      - name: Build
        run: ${buildConfig.buildCommand}

      - name: Upload artifact
        uses: actions/upload-pages-artifact@v2
        with:
          path: ${
            buildConfig.workingDirectory
              ? `${buildConfig.workingDirectory}/${buildConfig.outputPath}`
              : buildConfig.outputPath
          }

  deploy:
    environment:
      name: github-pages
      url: \${{ steps.deployment.outputs.page_url }}
    runs-on: ubuntu-latest
    needs: build
    steps:
      - name: Deploy to GitHub Pages
        id: deployment
        uses: actions/deploy-pages@v3`,

    jekyll: `name: Deploy Jekyll to GitHub Pages

on:
  push:
    branches: [main]
  workflow_dispatch:

permissions:
  contents: read
  pages: write
  id-token: write

concurrency:
  group: "pages"
  cancel-in-progress: false

jobs:
  build:
    runs-on: ubuntu-latest${
      buildConfig.workingDirectory
        ? `\n    defaults:\n      run:\n        working-directory: ${buildConfig.workingDirectory}`
        : ""
    }
    steps:
      - name: Checkout
        uses: actions/checkout@v4

      - name: Setup Ruby
        uses: ruby/setup-ruby@v1
        with:
          ruby-version: '3.1'
          bundler-cache: true${
            buildConfig.workingDirectory
              ? `\n          working-directory: ${buildConfig.workingDirectory}`
              : ""
          }

      - name: Build with Jekyll
        run: ${buildConfig.buildCommand}
        env:
          JEKYLL_ENV: production

      - name: Upload artifact
        uses: actions/upload-pages-artifact@v2${
          buildConfig.workingDirectory
            ? `\n        with:\n          path: ${buildConfig.workingDirectory}/${buildConfig.outputPath}`
            : ""
        }

  deploy:
    environment:
      name: github-pages
      url: \${{ steps.deployment.outputs.page_url }}
    runs-on: ubuntu-latest
    needs: build
    steps:
      - name: Deploy to GitHub Pages
        id: deployment
        uses: actions/deploy-pages@v3`,

    eleventy: `name: Deploy Eleventy to GitHub Pages

on:
  push:
    branches: [main]
  workflow_dispatch:

permissions:
  contents: read
  pages: write
  id-token: write

concurrency:
  group: "pages"
  cancel-in-progress: false

jobs:
  build:
    runs-on: ubuntu-latest
    steps:
      - name: Checkout
        uses: actions/checkout@v4

      - name: Setup Node.js
        uses: actions/setup-node@v4
        with:
          node-version: '${nodeVersion}'
          cache: '${packageManager}'${
            buildConfig.workingDirectory
              ? `\n          cache-dependency-path: ${buildConfig.workingDirectory}/package-lock.json`
              : ""
          }

      - name: Install dependencies
${workingDirPrefix}        run: ${getInstallCmd()}

      - name: Build site
${workingDirPrefix}        run: ${buildConfig.buildCommand}

      - name: Upload artifact
        uses: actions/upload-pages-artifact@v2
        with:
          path: ${
            buildConfig.workingDirectory
              ? `${buildConfig.workingDirectory}/${buildConfig.outputPath}`
              : buildConfig.outputPath
          }

  deploy:
    environment:
      name: github-pages
      url: \${{ steps.deployment.outputs.page_url }}
    runs-on: ubuntu-latest
    needs: build
    steps:
      - name: Deploy to GitHub Pages
        id: deployment
        uses: actions/deploy-pages@v3`,
  };

  return workflows[ssg] || workflows.jekyll;
}

```

--------------------------------------------------------------------------------
/tests/tools/validate-readme-checklist.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import * as path from "path";
import * as tmp from "tmp";
import {
  validateReadmeChecklist,
  ReadmeChecklistValidator,
  ValidateReadmeChecklistSchema,
} from "../../src/tools/validate-readme-checklist";

describe("README Checklist Validator", () => {
  let tempDir: string;
  let validator: ReadmeChecklistValidator;

  beforeEach(() => {
    tempDir = tmp.dirSync({ unsafeCleanup: true }).name;
    validator = new ReadmeChecklistValidator();
  });

  afterEach(async () => {
    try {
      await fs.rmdir(tempDir, { recursive: true });
    } catch {
      // Ignore cleanup errors
    }
  });

  async function createTestReadme(
    content: string,
    filename = "README.md",
  ): Promise<string> {
    const readmePath = path.join(tempDir, filename);
    await fs.writeFile(readmePath, content, "utf-8");
    return readmePath;
  }

  async function createProjectFile(
    filename: string,
    content = "",
  ): Promise<void> {
    await fs.writeFile(path.join(tempDir, filename), content, "utf-8");
  }

  describe("Input Validation", () => {
    it("should validate required fields", () => {
      expect(() => ValidateReadmeChecklistSchema.parse({})).toThrow();
      expect(() =>
        ValidateReadmeChecklistSchema.parse({
          readmePath: "",
        }),
      ).toThrow();
    });

    it("should accept valid input with defaults", () => {
      const input = ValidateReadmeChecklistSchema.parse({
        readmePath: "/path/to/README.md",
      });

      expect(input.strict).toBe(false);
      expect(input.outputFormat).toBe("console");
    });

    it("should validate output format options", () => {
      const validFormats = ["json", "markdown", "console"];

      for (const format of validFormats) {
        expect(() =>
          ValidateReadmeChecklistSchema.parse({
            readmePath: "/test/README.md",
            outputFormat: format,
          }),
        ).not.toThrow();
      }

      expect(() =>
        ValidateReadmeChecklistSchema.parse({
          readmePath: "/test/README.md",
          outputFormat: "invalid",
        }),
      ).toThrow();
    });
  });

  describe("Essential Sections Validation", () => {
    it("should detect project title", async () => {
      const goodReadme = await createTestReadme(
        "# My Project\n\nDescription here",
        "good-README.md",
      );
      const badReadme = await createTestReadme(
        "## Not a main title\n\nNo main heading",
        "bad-README.md",
      );

      const goodInput = ValidateReadmeChecklistSchema.parse({
        readmePath: goodReadme,
      });
      const badInput = ValidateReadmeChecklistSchema.parse({
        readmePath: badReadme,
      });
      const result = await validateReadmeChecklist(goodInput);
      const result2 = await validateReadmeChecklist(badInput);

      const titleCheck = result.categories["Essential Sections"].results.find(
        (r) => r.item.id === "title",
      );
      const badTitleCheck = result2.categories[
        "Essential Sections"
      ].results.find((r) => r.item.id === "title");

      expect(titleCheck?.passed).toBe(true);
      expect(badTitleCheck?.passed).toBe(false);
    });

    it("should detect project description", async () => {
      const withSubtitle = await createTestReadme(
        "# Project\n\n> A great project description",
        "subtitle-README.md",
      );
      const withParagraph = await createTestReadme(
        "# Project\n\nThis is a description paragraph",
        "paragraph-README.md",
      );
      const withoutDesc = await createTestReadme(
        "# Project\n\n## Installation",
        "no-desc-README.md",
      );

      const subtitleResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withSubtitle }),
      );
      const paragraphResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withParagraph }),
      );
      const noDescResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withoutDesc }),
      );

      const getDescCheck = (result: any) =>
        result.categories["Essential Sections"].results.find(
          (r: any) => r.item.id === "description",
        );

      expect(getDescCheck(subtitleResult)?.passed).toBe(true);
      expect(getDescCheck(paragraphResult)?.passed).toBe(true);
      expect(getDescCheck(noDescResult)?.passed).toBe(false);
    });

    it("should detect TL;DR section", async () => {
      const withTldr = await createTestReadme(
        "# Project\n\n## TL;DR\n\nQuick summary",
        "tldr-README.md",
      );
      const withQuickStart = await createTestReadme(
        "# Project\n\n## Quick Start\n\nQuick summary",
        "quickstart-README.md",
      );
      const withoutTldr = await createTestReadme(
        "# Project\n\n## Installation",
        "no-tldr-README.md",
      );

      const tldrInput = ValidateReadmeChecklistSchema.parse({
        readmePath: withTldr,
      });
      const quickStartInput = ValidateReadmeChecklistSchema.parse({
        readmePath: withQuickStart,
      });
      const noTldrInput = ValidateReadmeChecklistSchema.parse({
        readmePath: withoutTldr,
      });
      const result = await validateReadmeChecklist(tldrInput);
      const result2 = await validateReadmeChecklist(quickStartInput);
      const result3 = await validateReadmeChecklist(noTldrInput);

      const getTldrCheck = (result: any) =>
        result.categories["Essential Sections"].results.find(
          (r: any) => r.item.id === "tldr",
        );

      expect(getTldrCheck(result)?.passed).toBe(true);
      expect(getTldrCheck(result2)?.passed).toBe(true);
      expect(getTldrCheck(result3)?.passed).toBe(false);
    });

    it("should detect installation instructions with code blocks", async () => {
      const goodInstall = await createTestReadme(
        `
# Project
## Installation
\`\`\`bash
npm install project
\`\`\`
      `,
        "good-install-README.md",
      );

      const noCodeBlocks = await createTestReadme(
        `
# Project
## Installation
Just install it somehow
      `,
        "no-code-README.md",
      );

      const noInstallSection = await createTestReadme(
        "# Project\n\nSome content",
        "no-install-README.md",
      );

      const goodResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: goodInstall }),
      );
      const noCodeResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: noCodeBlocks }),
      );
      const noSectionResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: noInstallSection }),
      );

      const getInstallCheck = (result: any) =>
        result.categories["Essential Sections"].results.find(
          (r: any) => r.item.id === "installation",
        );

      expect(getInstallCheck(goodResult)?.passed).toBe(true);
      expect(getInstallCheck(noCodeResult)?.passed).toBe(true); // This should pass because it has Installation section
      expect(getInstallCheck(noSectionResult)?.passed).toBe(false);
    });

    it("should detect usage examples", async () => {
      const goodUsage = await createTestReadme(
        `
# Project
## Usage
\`\`\`javascript
const lib = require('lib');
lib.doSomething();
\`\`\`
      `,
        "good-usage-README.md",
      );

      const noUsage = await createTestReadme(
        "# Project\n\nNo usage section",
        "no-usage-README.md",
      );

      const goodResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: goodUsage }),
      );
      const noUsageResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: noUsage }),
      );

      const getUsageCheck = (result: any) =>
        result.categories["Essential Sections"].results.find(
          (r: any) => r.item.id === "usage",
        );

      expect(getUsageCheck(goodResult)?.passed).toBe(true);
      expect(getUsageCheck(noUsageResult)?.passed).toBe(false);
    });

    it("should detect license information", async () => {
      const readmeWithLicense = await createTestReadme(
        "# Project\n\n## License\n\nMIT",
        "license-README.md",
      );
      const readmeWithoutLicense = await createTestReadme(
        "# Project\n\nNo license info",
        "no-license-README.md",
      );

      // Test without LICENSE file first
      const withLicenseResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readmeWithLicense,
          projectPath: tempDir,
        }),
      );
      const withoutLicenseResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readmeWithoutLicense,
          projectPath: tempDir,
        }),
      );

      // Test with LICENSE file
      await createProjectFile("LICENSE", "MIT License...");
      const readmeWithLicenseFile = await createTestReadme(
        "# Project\n\nSome content",
        "license-file-README.md",
      );
      const withLicenseFileResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readmeWithLicenseFile,
          projectPath: tempDir,
        }),
      );

      const getLicenseCheck = (result: any) =>
        result.categories["Essential Sections"].results.find(
          (r: any) => r.item.id === "license",
        );

      expect(getLicenseCheck(withLicenseResult)?.passed).toBe(true);
      expect(getLicenseCheck(withoutLicenseResult)?.passed).toBe(false);
      expect(getLicenseCheck(withLicenseFileResult)?.passed).toBe(true);
    });
  });

  describe("Community Health Validation", () => {
    it("should detect contributing guidelines", async () => {
      const readmeWithContributing = await createTestReadme(
        "# Project\n\n## Contributing\n\nSee CONTRIBUTING.md",
      );
      await createProjectFile("CONTRIBUTING.md", "Contributing guidelines...");

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readmeWithContributing,
          projectPath: tempDir,
        }),
      );

      const contributingCheck = result.categories[
        "Community Health"
      ].results.find((r) => r.item.id === "contributing");
      expect(contributingCheck?.passed).toBe(true);
    });

    it("should detect code of conduct", async () => {
      await createProjectFile("CODE_OF_CONDUCT.md", "Code of conduct...");
      const readme = await createTestReadme("# Project\n\nSome content");

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readme,
          projectPath: tempDir,
        }),
      );

      const cocCheck = result.categories["Community Health"].results.find(
        (r) => r.item.id === "code-of-conduct",
      );
      expect(cocCheck?.passed).toBe(true);
    });

    it("should detect security policy", async () => {
      await createProjectFile("SECURITY.md", "Security policy...");
      const readme = await createTestReadme("# Project\n\nSome content");

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readme,
          projectPath: tempDir,
        }),
      );

      const securityCheck = result.categories["Community Health"].results.find(
        (r) => r.item.id === "security",
      );
      expect(securityCheck?.passed).toBe(true);
    });
  });

  describe("Visual Elements Validation", () => {
    it("should detect status badges", async () => {
      const withBadges = await createTestReadme(
        `
# Project
[![Build Status](https://travis-ci.org/user/repo.svg?branch=main)](https://travis-ci.org/user/repo)
[![npm version](https://badge.fury.io/js/package.svg)](https://badge.fury.io/js/package)
      `,
        "with-badges-README.md",
      );

      const withoutBadges = await createTestReadme(
        "# Project\n\nNo badges here",
        "no-badges-README.md",
      );

      const withBadgesResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withBadges }),
      );
      const withoutBadgesResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withoutBadges }),
      );

      const getBadgeCheck = (result: any) =>
        result.categories["Visual Elements"].results.find(
          (r: any) => r.item.id === "badges",
        );

      expect(getBadgeCheck(withBadgesResult)?.passed).toBe(true);
      expect(getBadgeCheck(withoutBadgesResult)?.passed).toBe(false);
    });

    it("should detect screenshots and images", async () => {
      const withScreenshots = await createTestReadme(
        `
# Project
![Screenshot](screenshot.png)
![Demo](demo.gif)
      `,
        "with-screenshots-README.md",
      );

      const withoutScreenshots = await createTestReadme(
        "# Project\n\nNo images",
        "no-screenshots-README.md",
      );

      const withScreenshotsResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withScreenshots }),
      );
      const withoutScreenshotsResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withoutScreenshots }),
      );

      const getScreenshotCheck = (result: any) =>
        result.categories["Visual Elements"].results.find(
          (r: any) => r.item.id === "screenshots",
        );

      expect(getScreenshotCheck(withScreenshotsResult)?.passed).toBe(true);
      expect(getScreenshotCheck(withoutScreenshotsResult)?.passed).toBe(false);
    });

    it("should validate markdown formatting", async () => {
      const goodFormatting = await createTestReadme(
        `
# Main Title
## Section 1
### Subsection
## Section 2
      `,
        "good-formatting-README.md",
      );

      const poorFormatting = await createTestReadme(
        `
# Title
#Another Title
##Poor Spacing
      `,
        "poor-formatting-README.md",
      );

      const goodResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: goodFormatting }),
      );
      const poorResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: poorFormatting }),
      );

      const getFormattingCheck = (result: any) =>
        result.categories["Visual Elements"].results.find(
          (r: any) => r.item.id === "formatting",
        );

      expect(getFormattingCheck(goodResult)?.passed).toBe(true);
      expect(getFormattingCheck(poorResult)?.passed).toBe(false);
    });
  });

  describe("Content Quality Validation", () => {
    it("should detect working code examples", async () => {
      const withCodeExamples = await createTestReadme(
        `
# Project
\`\`\`javascript
const lib = require('lib');
lib.doSomething();
\`\`\`

\`\`\`bash
npm install lib
\`\`\`
      `,
        "with-code-README.md",
      );

      const withoutCodeExamples = await createTestReadme(
        "# Project\n\nNo code examples",
        "no-code-examples-README.md",
      );

      const withCodeResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: withCodeExamples }),
      );
      const withoutCodeResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: withoutCodeExamples,
        }),
      );

      const getCodeCheck = (result: any) =>
        result.categories["Content Quality"].results.find(
          (r: any) => r.item.id === "working-examples",
        );

      expect(getCodeCheck(withCodeResult)?.passed).toBe(true);
      expect(getCodeCheck(withoutCodeResult)?.passed).toBe(false);
    });

    it("should validate appropriate length", async () => {
      const shortReadme = await createTestReadme(
        "# Project\n\nShort content",
        "short-README.md",
      );
      const longContent =
        "# Project\n\n" + "Long line of content.\n".repeat(350);
      const longReadme = await createTestReadme(longContent, "long-README.md");

      const shortResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: shortReadme }),
      );
      const longResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: longReadme }),
      );

      const getLengthCheck = (result: any) =>
        result.categories["Content Quality"].results.find(
          (r: any) => r.item.id === "appropriate-length",
        );

      expect(getLengthCheck(shortResult)?.passed).toBe(true);
      expect(getLengthCheck(longResult)?.passed).toBe(false);
    });

    it("should validate scannable structure", async () => {
      const goodStructure = await createTestReadme(
        `
# Main Title
## Section 1
### Subsection 1.1
- Item 1
- Item 2
### Subsection 1.2
## Section 2
### Subsection 2.1
- Another item
- Yet another item
      `,
        "good-structure-README.md",
      );

      const poorStructure = await createTestReadme(
        `
# Title
#### Skipped levels
## Back to level 2
      `,
        "poor-structure-README.md",
      );

      const goodResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: goodStructure }),
      );
      const poorResult = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: poorStructure }),
      );

      const getStructureCheck = (result: any) =>
        result.categories["Content Quality"].results.find(
          (r: any) => r.item.id === "scannable-structure",
        );

      expect(getStructureCheck(goodResult)?.passed).toBe(true);
      expect(getStructureCheck(poorResult)?.passed).toBe(false);
    });
  });

  describe("Report Generation", () => {
    it("should generate comprehensive report with all categories", async () => {
      const readme = await createTestReadme(`
# Test Project
> A test project description

## TL;DR
Quick summary of the project.

## Quick Start
\`\`\`bash
npm install test-project
\`\`\`

## Usage
\`\`\`javascript
const test = require('test-project');
test.run();
\`\`\`

## License
MIT
      `);

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: readme }),
      );

      expect(result.overallScore).toBeGreaterThan(0);
      expect(result.totalItems).toBeGreaterThan(0);
      expect(result.passedItems).toBeGreaterThan(0);
      expect(result.categories).toHaveProperty("Essential Sections");
      expect(result.categories).toHaveProperty("Community Health");
      expect(result.categories).toHaveProperty("Visual Elements");
      expect(result.categories).toHaveProperty("Content Quality");
      expect(result.wordCount).toBeGreaterThan(0);
      expect(result.estimatedReadTime).toBeGreaterThan(0);
    });

    it("should calculate scores correctly", async () => {
      const perfectReadme = await createTestReadme(`
# Perfect Project
> An amazing project that does everything right

[![Build Status](https://travis-ci.org/user/repo.svg)](https://travis-ci.org/user/repo)

## TL;DR
This project is perfect and demonstrates all best practices.

## Quick Start
\`\`\`bash
npm install perfect-project
\`\`\`

## Usage
\`\`\`javascript
const perfect = require('perfect-project');
perfect.doSomething();
\`\`\`

## Contributing
See CONTRIBUTING.md for guidelines.

## License
MIT © Author
      `);

      await createProjectFile("CONTRIBUTING.md", "Guidelines...");
      await createProjectFile("LICENSE", "MIT License...");

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: perfectReadme,
          projectPath: tempDir,
        }),
      );

      expect(result.overallScore).toBeGreaterThan(70);
      expect(result.categories["Essential Sections"].score).toBeGreaterThan(80);
    });

    it("should provide helpful recommendations", async () => {
      const poorReadme = await createTestReadme(
        "# Poor Project\n\nMinimal content",
      );

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: poorReadme }),
      );

      expect(result.recommendations.length).toBeGreaterThan(0);
      expect(result.overallScore).toBeLessThan(50);
    });
  });

  describe("Output Formatting", () => {
    it("should format console output correctly", async () => {
      const readme = await createTestReadme("# Test\n\nContent");
      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readme,
          outputFormat: "console",
        }),
      );

      const formatted = validator.formatReport(result, "console");

      expect(formatted).toContain("📋 README Checklist Report");
      expect(formatted).toContain("Overall Score:");
      expect(formatted).toContain("Essential Sections");
      expect(formatted).toContain("✅");
      expect(formatted).toContain("❌");
    });

    it("should format markdown output correctly", async () => {
      const readme = await createTestReadme("# Test\n\nContent");
      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readme,
          outputFormat: "markdown",
        }),
      );

      const formatted = validator.formatReport(result, "markdown");

      expect(formatted).toContain("# README Checklist Report");
      expect(formatted).toContain("## Overall Score:");
      expect(formatted).toContain("### Essential Sections");
      expect(formatted).toContain("- ✅");
      expect(formatted).toContain("- ❌");
    });

    it("should format JSON output correctly", async () => {
      const readme = await createTestReadme("# Test\n\nContent");
      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readme,
          outputFormat: "json",
        }),
      );

      const formatted = validator.formatReport(result, "json");
      const parsed = JSON.parse(formatted);

      expect(parsed).toHaveProperty("overallScore");
      expect(parsed).toHaveProperty("categories");
      expect(parsed).toHaveProperty("recommendations");
    });
  });

  describe("Error Handling", () => {
    it("should handle non-existent README file", async () => {
      const nonExistentPath = path.join(tempDir, "nonexistent.md");

      await expect(
        validateReadmeChecklist(
          ValidateReadmeChecklistSchema.parse({ readmePath: nonExistentPath }),
        ),
      ).rejects.toThrow();
    });

    it("should handle invalid project path gracefully", async () => {
      const readme = await createTestReadme("# Test\n\nContent");

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({
          readmePath: readme,
          projectPath: "/invalid/path",
        }),
      );

      // Should still work, just without project file context
      expect(result.overallScore).toBeGreaterThan(0);
    });

    it("should handle empty README file", async () => {
      const emptyReadme = await createTestReadme("", "empty-README.md");

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: emptyReadme }),
      );

      // Empty README should pass length test (0 words <= 300) and external links test (no links to fail)
      // but fail most other tests, resulting in a low overall score
      expect(result.overallScore).toBeLessThan(20); // Very low score due to missing content
      expect(result.passedItems).toBe(2); // Only length and external-links should pass
      expect(result.failedItems).toBe(15); // Most checks should fail
    });
  });

  describe("Suggestions Generation", () => {
    it("should provide specific suggestions for failed checks", async () => {
      const incompleteReadme = await createTestReadme(
        "# Project\n\nMinimal content",
      );

      const result = await validateReadmeChecklist(
        ValidateReadmeChecklistSchema.parse({ readmePath: incompleteReadme }),
      );

      const failedChecks = Object.values(result.categories)
        .flatMap((cat) => cat.results)
        .filter((r) => !r.passed && r.suggestions);

      expect(failedChecks.length).toBeGreaterThan(0);

      for (const check of failedChecks) {
        expect(check.suggestions).toBeDefined();
        expect(check.suggestions!.length).toBeGreaterThan(0);
      }
    });
  });
});

```

--------------------------------------------------------------------------------
/src/memory/contextual-retrieval.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Contextual Memory Retrieval System for DocuMCP
 * Implements Issue #49: Contextual Memory Retrieval
 *
 * Provides intelligent, context-aware memory retrieval using semantic similarity,
 * temporal relevance, and user intent analysis for enhanced recommendation accuracy.
 */

import { MemoryManager } from "./manager.js";
import { MemoryEntry } from "./storage.js";
import { KnowledgeGraph } from "./knowledge-graph.js";

export interface RetrievalContext {
  currentProject?: {
    path: string;
    language: string;
    framework?: string;
    domain?: string;
    size?: "small" | "medium" | "large";
  };
  userIntent?: {
    action: "analyze" | "recommend" | "deploy" | "troubleshoot" | "learn";
    urgency: "low" | "medium" | "high";
    experience: "novice" | "intermediate" | "expert";
  };
  sessionContext?: {
    recentActions: string[];
    focusAreas: string[];
    timeConstraints?: number; // minutes
  };
  temporalContext?: {
    timeRange?: { start: string; end: string };
    recency: "recent" | "all" | "historical";
    seasonality?: boolean;
  };
}

export interface SemanticEmbedding {
  vector: number[];
  metadata: {
    source: string;
    confidence: number;
    generatedAt: string;
  };
}

export interface ContextualMatch {
  memory: MemoryEntry;
  relevanceScore: number;
  contextualFactors: {
    semantic: number;
    temporal: number;
    structural: number;
    intentional: number;
  };
  reasoning: string[];
  confidence: number;
}

export interface RetrievalResult {
  matches: ContextualMatch[];
  metadata: {
    queryContext: RetrievalContext;
    totalCandidates: number;
    processingTime: number;
    fallbackUsed: boolean;
  };
  insights: {
    patterns: string[];
    recommendations: string[];
    gaps: string[];
  };
}

export class ContextualMemoryRetrieval {
  private memoryManager: MemoryManager;
  private knowledgeGraph: KnowledgeGraph;
  private embeddingCache: Map<string, SemanticEmbedding>;
  private readonly maxCacheSize = 1000;
  private readonly similarityThreshold = 0.6;

  constructor(memoryManager: MemoryManager, knowledgeGraph: KnowledgeGraph) {
    this.memoryManager = memoryManager;
    this.knowledgeGraph = knowledgeGraph;
    this.embeddingCache = new Map();
  }

  /**
   * Retrieve contextually relevant memories
   */
  async retrieve(
    query: string,
    context: RetrievalContext,
    options?: {
      maxResults?: number;
      minRelevance?: number;
      includeReasoning?: boolean;
    },
  ): Promise<RetrievalResult> {
    const startTime = Date.now();
    const maxResults = options?.maxResults || 10;
    const minRelevance = options?.minRelevance || 0.3;

    // Get candidate memories based on basic filtering
    const candidates = await this.getCandidateMemories(query, context);

    // Score and rank candidates
    const scoredMatches = await this.scoreAndRankCandidates(
      candidates,
      query,
      context,
    );

    // Filter by relevance threshold
    const relevantMatches = scoredMatches
      .filter((match) => match.relevanceScore >= minRelevance)
      .slice(0, maxResults);

    // Generate insights from matches
    const insights = await this.generateInsights(relevantMatches, context);

    const processingTime = Date.now() - startTime;

    return {
      matches: relevantMatches,
      metadata: {
        queryContext: context,
        totalCandidates: candidates.length,
        processingTime,
        fallbackUsed: relevantMatches.length === 0 && candidates.length > 0,
      },
      insights,
    };
  }

  /**
   * Get candidate memories using multiple retrieval strategies
   */
  private async getCandidateMemories(
    query: string,
    context: RetrievalContext,
  ): Promise<MemoryEntry[]> {
    const candidates = new Map<string, MemoryEntry>();

    // Strategy 1: Text-based search
    const textMatches = await this.memoryManager.search(query, {
      sortBy: "timestamp",
    });
    textMatches.forEach((memory) => candidates.set(memory.id, memory));

    // Strategy 2: Context-based filtering
    if (context.currentProject) {
      const contextMatches = await this.getContextBasedCandidates(
        context.currentProject,
      );
      contextMatches.forEach((memory) => candidates.set(memory.id, memory));
    }

    // Strategy 3: Intent-based retrieval
    if (context.userIntent) {
      const intentMatches = await this.getIntentBasedCandidates(
        context.userIntent,
      );
      intentMatches.forEach((memory) => candidates.set(memory.id, memory));
    }

    // Strategy 4: Temporal filtering
    if (context.temporalContext) {
      const temporalMatches = await this.getTemporalCandidates(
        context.temporalContext,
      );
      temporalMatches.forEach((memory) => candidates.set(memory.id, memory));
    }

    // Strategy 5: Knowledge graph traversal
    const graphMatches = await this.getGraphBasedCandidates(query, context);
    graphMatches.forEach((memory) => candidates.set(memory.id, memory));

    return Array.from(candidates.values());
  }

  /**
   * Get candidates based on current project context
   */
  private async getContextBasedCandidates(
    project: NonNullable<RetrievalContext["currentProject"]>,
  ): Promise<MemoryEntry[]> {
    const searchCriteria = [];

    // Language-based search
    searchCriteria.push(
      this.memoryManager
        .search("", { sortBy: "timestamp" })
        .then((memories) =>
          memories.filter(
            (m) =>
              m.data.language?.primary === project.language ||
              m.metadata.tags?.includes(project.language),
          ),
        ),
    );

    // Framework-based search
    if (project.framework) {
      searchCriteria.push(
        this.memoryManager
          .search("", { sortBy: "timestamp" })
          .then((memories) =>
            memories.filter(
              (m) =>
                m.data.framework?.name === project.framework ||
                (project.framework &&
                  m.metadata.tags?.includes(project.framework)),
            ),
          ),
      );
    }

    // Project size similarity
    if (project.size) {
      searchCriteria.push(
        this.memoryManager
          .search("", { sortBy: "timestamp" })
          .then((memories) =>
            memories.filter(
              (m) =>
                this.categorizeProjectSize(m.data.stats?.files || 0) ===
                project.size,
            ),
          ),
      );
    }

    const results = await Promise.all(searchCriteria);
    const allMatches = results.flat();

    // Deduplicate
    const unique = new Map<string, MemoryEntry>();
    allMatches.forEach((memory) => unique.set(memory.id, memory));

    return Array.from(unique.values());
  }

  /**
   * Get candidates based on user intent
   */
  private async getIntentBasedCandidates(
    intent: NonNullable<RetrievalContext["userIntent"]>,
  ): Promise<MemoryEntry[]> {
    const intentTypeMap = {
      analyze: ["analysis", "evaluation", "assessment"],
      recommend: ["recommendation", "suggestion", "advice"],
      deploy: ["deployment", "publish", "release"],
      troubleshoot: ["error", "issue", "problem", "debug"],
      learn: ["tutorial", "guide", "example", "pattern"],
    };

    const searchTerms = intentTypeMap[intent.action] || [intent.action];
    const searches = searchTerms.map((term) =>
      this.memoryManager.search(term, { sortBy: "timestamp" }),
    );

    const results = await Promise.all(searches);
    const allMatches = results.flat();

    // Filter by experience level
    return allMatches.filter((memory) => {
      if (intent.experience === "novice") {
        return (
          !memory.metadata.tags?.includes("advanced") &&
          !memory.metadata.tags?.includes("expert")
        );
      } else if (intent.experience === "expert") {
        return (
          memory.metadata.tags?.includes("advanced") ||
          memory.metadata.tags?.includes("expert") ||
          memory.data.complexity === "complex"
        );
      }
      return true; // intermediate gets all
    });
  }

  /**
   * Get candidates based on temporal context
   */
  private async getTemporalCandidates(
    temporal: NonNullable<RetrievalContext["temporalContext"]>,
  ): Promise<MemoryEntry[]> {
    const searchOptions: any = { sortBy: "timestamp" };

    if (temporal.timeRange) {
      // Use memory manager's built-in time filtering
      const allMemories = await this.memoryManager.search("", searchOptions);

      return allMemories.filter((memory) => {
        const memoryTime = new Date(memory.timestamp);
        const start = new Date(temporal.timeRange!.start);
        const end = new Date(temporal.timeRange!.end);
        return memoryTime >= start && memoryTime <= end;
      });
    }

    if (temporal.recency === "recent") {
      const cutoff = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000); // Last 7 days
      const allMemories = await this.memoryManager.search("", searchOptions);

      return allMemories.filter(
        (memory) => new Date(memory.timestamp) > cutoff,
      );
    }

    if (temporal.recency === "historical") {
      const cutoff = new Date(Date.now() - 90 * 24 * 60 * 60 * 1000); // Older than 90 days
      const allMemories = await this.memoryManager.search("", searchOptions);

      return allMemories.filter(
        (memory) => new Date(memory.timestamp) < cutoff,
      );
    }

    return this.memoryManager.search("", searchOptions);
  }

  /**
   * Get candidates using knowledge graph traversal
   */
  private async getGraphBasedCandidates(
    query: string,
    context: RetrievalContext,
  ): Promise<MemoryEntry[]> {
    if (!context.currentProject) return [];

    // Find relevant nodes in the knowledge graph
    const graphQuery = {
      nodeTypes: ["project", "technology"],
      properties: context.currentProject.language
        ? {
            language: context.currentProject.language,
          }
        : undefined,
      maxDepth: 2,
    };

    const graphResult = this.knowledgeGraph.query(graphQuery);
    const relevantNodeIds = graphResult.nodes.map((node) => node.id);

    // Find memories associated with these nodes
    const memories: MemoryEntry[] = [];
    const allMemories = await this.memoryManager.search("", {
      sortBy: "timestamp",
    });

    for (const memory of allMemories) {
      const projectNodeId = `project:${memory.metadata.projectId}`;
      const techNodeId = memory.metadata.ssg
        ? `tech:${memory.metadata.ssg}`
        : null;

      if (
        relevantNodeIds.includes(projectNodeId) ||
        (techNodeId && relevantNodeIds.includes(techNodeId))
      ) {
        memories.push(memory);
      }
    }

    return memories;
  }

  /**
   * Score and rank candidates based on contextual relevance
   */
  private async scoreAndRankCandidates(
    candidates: MemoryEntry[],
    query: string,
    context: RetrievalContext,
  ): Promise<ContextualMatch[]> {
    const matches: ContextualMatch[] = [];

    for (const memory of candidates) {
      const contextualFactors = await this.calculateContextualFactors(
        memory,
        query,
        context,
      );

      const relevanceScore = this.calculateOverallRelevance(contextualFactors);
      const reasoning = this.generateReasoning(
        memory,
        contextualFactors,
        context,
      );
      const confidence = this.calculateConfidence(contextualFactors, memory);

      matches.push({
        memory,
        relevanceScore,
        contextualFactors,
        reasoning,
        confidence,
      });
    }

    // Sort by relevance score (descending)
    return matches.sort((a, b) => b.relevanceScore - a.relevanceScore);
  }

  /**
   * Calculate contextual factors for scoring
   */
  private async calculateContextualFactors(
    memory: MemoryEntry,
    query: string,
    context: RetrievalContext,
  ): Promise<ContextualMatch["contextualFactors"]> {
    const semantic = await this.calculateSemanticSimilarity(memory, query);
    const temporal = await this.calculateTemporalRelevance(memory, context);
    const structural = this.calculateStructuralRelevance(memory, context);
    const intentional = this.calculateIntentionalRelevance(memory, context);

    return { semantic, temporal, structural, intentional };
  }

  /**
   * Calculate semantic similarity using simple text matching
   * (In a production system, this would use embeddings)
   */
  private async calculateSemanticSimilarity(
    memory: MemoryEntry,
    query: string,
  ): Promise<number> {
    const queryTerms = query.toLowerCase().split(/\s+/);
    const memoryText = JSON.stringify(memory.data).toLowerCase();
    const metadataText = JSON.stringify(memory.metadata).toLowerCase();

    let matches = 0;
    for (const term of queryTerms) {
      if (memoryText.includes(term) || metadataText.includes(term)) {
        matches++;
      }
    }

    return queryTerms.length > 0 ? matches / queryTerms.length : 0;
  }

  /**
   * Calculate temporal relevance based on recency and context
   */
  private calculateTemporalRelevance(
    memory: MemoryEntry,
    context: RetrievalContext,
  ): Promise<number> {
    const memoryDate = new Date(memory.timestamp);
    const now = new Date();
    const daysSince =
      (now.getTime() - memoryDate.getTime()) / (1000 * 60 * 60 * 24);

    // Base score decreases with age
    let score = Math.exp(-daysSince / 30); // Half-life of 30 days

    // Boost for explicit temporal preferences
    if (context.temporalContext?.recency === "recent" && daysSince <= 7) {
      score *= 1.5;
    } else if (
      context.temporalContext?.recency === "historical" &&
      daysSince >= 90
    ) {
      score *= 1.3;
    }

    // Consider time constraints
    if (context.sessionContext?.timeConstraints) {
      const urgencyMultiplier =
        context.userIntent?.urgency === "high" ? 1.2 : 1.0;
      score *= urgencyMultiplier;
    }

    return Promise.resolve(Math.min(score, 1.0));
  }

  /**
   * Calculate structural relevance based on project similarity
   */
  private calculateStructuralRelevance(
    memory: MemoryEntry,
    context: RetrievalContext,
  ): number {
    if (!context.currentProject) return 0.5; // Neutral when no project context

    let score = 0;
    let factors = 0;

    // Language match
    if (memory.data.language?.primary === context.currentProject.language) {
      score += 0.4;
    }
    factors++;

    // Framework match
    if (
      context.currentProject.framework &&
      memory.data.framework?.name === context.currentProject.framework
    ) {
      score += 0.3;
    }
    factors++;

    // Size similarity
    if (context.currentProject.size) {
      const memorySize = this.categorizeProjectSize(
        memory.data.stats?.files || 0,
      );
      if (memorySize === context.currentProject.size) {
        score += 0.2;
      }
    }
    factors++;

    // Type relevance
    if (
      memory.type === "analysis" &&
      context.userIntent?.action === "analyze"
    ) {
      score += 0.1;
    } else if (
      memory.type === "recommendation" &&
      context.userIntent?.action === "recommend"
    ) {
      score += 0.1;
    }
    factors++;

    return factors > 0 ? score / factors : 0;
  }

  /**
   * Calculate intentional relevance based on user intent
   */
  private calculateIntentionalRelevance(
    memory: MemoryEntry,
    context: RetrievalContext,
  ): number {
    if (!context.userIntent) return 0.5; // Neutral when no intent

    let score = 0;

    // Action alignment
    const actionTypeMap = {
      analyze: ["analysis", "evaluation"],
      recommend: ["recommendation"],
      deploy: ["deployment"],
      troubleshoot: ["deployment", "configuration"],
      learn: ["analysis", "recommendation"],
    };

    const relevantTypes = actionTypeMap[context.userIntent.action] || [];
    if (relevantTypes.includes(memory.type)) {
      score += 0.5;
    }

    // Experience level alignment
    if (context.userIntent.experience === "novice") {
      // Prefer simpler, more successful cases
      if (
        memory.data.status === "success" ||
        memory.data.complexity !== "complex"
      ) {
        score += 0.3;
      }
    } else if (context.userIntent.experience === "expert") {
      // Prefer complex or edge cases
      if (
        memory.data.complexity === "complex" ||
        memory.metadata.tags?.includes("advanced")
      ) {
        score += 0.3;
      }
    }

    // Urgency consideration
    if (context.userIntent.urgency === "high") {
      // Prefer recent, successful cases
      const daysSince =
        (Date.now() - new Date(memory.timestamp).getTime()) /
        (1000 * 60 * 60 * 24);
      if (daysSince <= 7 && memory.data.status === "success") {
        score += 0.2;
      }
    }

    return Math.min(score, 1.0);
  }

  /**
   * Calculate overall relevance score
   */
  private calculateOverallRelevance(
    factors: ContextualMatch["contextualFactors"],
  ): number {
    // Weighted combination of factors
    const weights = {
      semantic: 0.3,
      temporal: 0.2,
      structural: 0.3,
      intentional: 0.2,
    };

    return (
      factors.semantic * weights.semantic +
      factors.temporal * weights.temporal +
      factors.structural * weights.structural +
      factors.intentional * weights.intentional
    );
  }

  /**
   * Generate reasoning for why a memory was selected
   */
  private generateReasoning(
    memory: MemoryEntry,
    factors: ContextualMatch["contextualFactors"],
    context: RetrievalContext,
  ): string[] {
    const reasoning: string[] = [];

    if (factors.semantic > 0.7) {
      reasoning.push("High semantic similarity to query");
    }

    if (factors.temporal > 0.8) {
      reasoning.push("Recently relevant information");
    }

    if (factors.structural > 0.6) {
      reasoning.push(
        `Similar project structure (${
          memory.data.language?.primary || "unknown"
        })`,
      );
    }

    if (factors.intentional > 0.7) {
      reasoning.push(
        `Matches user intent for ${
          context.userIntent?.action || "general"
        } action`,
      );
    }

    if (
      memory.data.status === "success" &&
      context.userIntent?.urgency === "high"
    ) {
      reasoning.push("Proven successful approach for urgent needs");
    }

    if (memory.metadata.ssg && context.currentProject?.framework) {
      reasoning.push(
        `Experience with ${memory.metadata.ssg} for similar projects`,
      );
    }

    return reasoning.length > 0 ? reasoning : ["General relevance to query"];
  }

  /**
   * Calculate confidence in the match
   */
  private calculateConfidence(
    factors: ContextualMatch["contextualFactors"],
    memory: MemoryEntry,
  ): number {
    let confidence = (factors.semantic + factors.structural) / 2;

    // Boost confidence for successful outcomes
    if (memory.data.status === "success") {
      confidence *= 1.2;
    }

    // Boost confidence for recent data
    const daysSince =
      (Date.now() - new Date(memory.timestamp).getTime()) /
      (1000 * 60 * 60 * 24);
    if (daysSince <= 30) {
      confidence *= 1.1;
    }

    // Boost confidence for rich metadata
    if (memory.metadata.tags && memory.metadata.tags.length > 2) {
      confidence *= 1.05;
    }

    return Math.min(confidence, 1.0);
  }

  /**
   * Generate insights from retrieved matches
   */
  private async generateInsights(
    matches: ContextualMatch[],
    context: RetrievalContext,
  ): Promise<RetrievalResult["insights"]> {
    const patterns: string[] = [];
    const recommendations: string[] = [];
    const gaps: string[] = [];

    if (matches.length === 0) {
      gaps.push("No relevant memories found for current context");
      recommendations.push(
        "Consider expanding search criteria or building more experience",
      );
      return { patterns, recommendations, gaps };
    }

    // Analyze patterns in successful matches
    const successfulMatches = matches.filter(
      (m) => m.memory.data.status === "success" && m.relevanceScore > 0.6,
    );

    if (successfulMatches.length >= 2) {
      // Find common SSGs
      const ssgs = new Map<string, number>();
      successfulMatches.forEach((match) => {
        if (match.memory.metadata.ssg) {
          ssgs.set(
            match.memory.metadata.ssg,
            (ssgs.get(match.memory.metadata.ssg) || 0) + 1,
          );
        }
      });

      if (ssgs.size > 0) {
        const topSSG = Array.from(ssgs.entries()).sort(
          ([, a], [, b]) => b - a,
        )[0];
        patterns.push(
          `${topSSG[0]} appears in ${topSSG[1]} successful similar projects`,
        );
        recommendations.push(
          `Consider ${topSSG[0]} based on successful precedents`,
        );
      }

      // Find common success factors
      const commonFactors = this.findCommonSuccessFactors(successfulMatches);
      patterns.push(...commonFactors);
    }

    // Identify gaps
    if (
      context.userIntent?.action === "deploy" &&
      matches.filter((m) => m.memory.type === "deployment").length === 0
    ) {
      gaps.push("Limited deployment experience for similar projects");
      recommendations.push(
        "Proceed cautiously with deployment and document the process",
      );
    }

    if (
      context.userIntent?.experience === "novice" &&
      matches.every((m) => m.confidence < 0.7)
    ) {
      gaps.push("Limited beginner-friendly resources for this context");
      recommendations.push(
        "Consider consulting documentation or seeking expert guidance",
      );
    }

    return { patterns, recommendations, gaps };
  }

  /**
   * Find common success factors across matches
   */
  private findCommonSuccessFactors(matches: ContextualMatch[]): string[] {
    const factors: string[] = [];

    const hasTests = matches.filter(
      (m) => m.memory.data.testing?.hasTests,
    ).length;
    if (hasTests / matches.length > 0.7) {
      factors.push("Projects with testing have higher success rates");
    }

    const hasCI = matches.filter((m) => m.memory.data.ci?.hasCI).length;
    if (hasCI / matches.length > 0.6) {
      factors.push("CI/CD adoption correlates with deployment success");
    }

    const simpleProjects = matches.filter(
      (m) => m.memory.data.complexity !== "complex",
    ).length;
    if (simpleProjects / matches.length > 0.8) {
      factors.push("Simpler project structures show more reliable outcomes");
    }

    return factors;
  }

  /**
   * Categorize project size for comparison
   */
  private categorizeProjectSize(
    fileCount: number,
  ): "small" | "medium" | "large" {
    if (fileCount < 50) return "small";
    if (fileCount < 200) return "medium";
    return "large";
  }

  /**
   * Get contextual suggestions for improving retrieval
   */
  async getSuggestions(context: RetrievalContext): Promise<{
    queryImprovements: string[];
    contextEnhancements: string[];
    learningOpportunities: string[];
  }> {
    const suggestions = {
      queryImprovements: [] as string[],
      contextEnhancements: [] as string[],
      learningOpportunities: [] as string[],
    };

    // Analyze current context completeness
    if (!context.currentProject) {
      suggestions.contextEnhancements.push(
        "Provide current project information for better matches",
      );
    }

    if (!context.userIntent) {
      suggestions.contextEnhancements.push(
        "Specify your intent (analyze, recommend, deploy, etc.) for targeted results",
      );
    }

    if (!context.temporalContext) {
      suggestions.contextEnhancements.push(
        "Set temporal preferences (recent vs. historical) for relevance",
      );
    }

    // Analyze retrieval patterns
    const recentSearches = await this.memoryManager.search("search", {
      sortBy: "timestamp",
    });
    if (recentSearches.length < 5) {
      suggestions.learningOpportunities.push(
        "System will improve with more usage and data",
      );
    }

    // Check for data gaps
    if (context.currentProject?.language) {
      const languageMemories = await this.memoryManager.search(
        context.currentProject.language,
      );
      if (languageMemories.length < 3) {
        suggestions.learningOpportunities.push(
          `More experience needed with ${context.currentProject.language} projects`,
        );
      }
    }

    return suggestions;
  }

  /**
   * Clear embedding cache
   */
  clearCache(): void {
    this.embeddingCache.clear();
  }

  /**
   * Get retrieval statistics
   */
  getStatistics(): {
    cacheSize: number;
    cacheHitRate: number;
    averageRetrievalTime: number;
    commonContextTypes: Record<string, number>;
  } {
    // This would track actual usage statistics in a real implementation
    return {
      cacheSize: this.embeddingCache.size,
      cacheHitRate: 0.85, // Placeholder
      averageRetrievalTime: 150, // ms
      commonContextTypes: {
        project_analysis: 45,
        ssg_recommendation: 38,
        deployment_troubleshooting: 12,
        learning_assistance: 5,
      },
    };
  }
}

export default ContextualMemoryRetrieval;

```
Page 13/20FirstPrevNextLast