#
tokens: 48344/50000 13/274 files (page 6/20)
lines: off (toggle) GitHub
raw markdown copy
This is page 6 of 20. Use http://codebase.md/tosin2013/documcp?page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── 001-mcp-server-architecture.md
│   │   ├── 002-repository-analysis-engine.md
│   │   ├── 003-static-site-generator-recommendation-engine.md
│   │   ├── 004-diataxis-framework-integration.md
│   │   ├── 005-github-pages-deployment-automation.md
│   │   ├── 006-mcp-tools-api-design.md
│   │   ├── 007-mcp-prompts-and-resources-integration.md
│   │   ├── 008-intelligent-content-population-engine.md
│   │   ├── 009-content-accuracy-validation-framework.md
│   │   ├── 010-mcp-resource-pattern-redesign.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── check-documentation-links.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── ast-analyzer.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── permission-checker.ts
│   │   └── sitemap-generator.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/src/memory/storage.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * JSONL-based persistent storage for DocuMCP memory system
 * Implements Issue #45: Persistent JSONL Storage
 */

import * as fs from "fs";
import * as path from "path";
import * as readline from "readline";
import * as os from "os";
import { createHash } from "crypto";

export interface MemoryEntry {
  id: string;
  timestamp: string;
  type:
    | "analysis"
    | "recommendation"
    | "deployment"
    | "configuration"
    | "interaction";
  data: Record<string, any>;
  metadata: {
    projectId?: string;
    repository?: string;
    ssg?: string;
    tags?: string[];
    version?: string;
    compressed?: boolean;
    compressionType?: string;
    compressedAt?: string;
    originalSize?: number;
    merged?: boolean;
    mergedCount?: number;
    mergedAt?: string;
  };
  tags?: string[]; // Convenience field for direct access
  embeddings?: number[];
  checksum?: string;
}

export class JSONLStorage {
  private readonly storageDir: string;
  private readonly indexFile: string;
  private index: Map<string, { file: string; line: number; size: number }>;
  private lineCounters: Map<string, number>; // Track line count per file

  constructor(baseDir?: string) {
    this.storageDir = baseDir || this.getDefaultStorageDir();
    this.indexFile = path.join(this.storageDir, ".index.json");
    this.index = new Map();
    this.lineCounters = new Map();
  }

  private getDefaultStorageDir(): string {
    // For tests, use temp directory
    if (process.env.NODE_ENV === "test" || process.env.JEST_WORKER_ID) {
      return path.join(
        os.tmpdir(),
        `documcp-test-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
      );
    }

    // For production/development, use project-local .documcp directory
    return path.join(process.cwd(), ".documcp", "memory");
  }

  async initialize(): Promise<void> {
    await fs.promises.mkdir(this.storageDir, { recursive: true });
    await this.loadIndex();

    // Log storage location in development mode
    if (process.env.NODE_ENV === "development" || process.env.DEBUG) {
      // eslint-disable-next-line no-console
      console.log(`[DocuMCP] Memory storage initialized: ${this.storageDir}`);
    }
  }

  private async loadIndex(): Promise<void> {
    try {
      const indexData = await fs.promises.readFile(this.indexFile, "utf-8");
      const data = JSON.parse(indexData);

      // Handle both old format (just entries) and new format (with line counters)
      if (Array.isArray(data)) {
        this.index = new Map(data);
        // Rebuild line counters for existing data
        await this.rebuildLineCounters();
      } else {
        this.index = new Map(data.entries || []);
        this.lineCounters = new Map(Object.entries(data.lineCounters || {}));
      }
    } catch (error) {
      this.index = new Map();
      this.lineCounters = new Map();
    }
  }

  private async saveIndex(): Promise<void> {
    // Ensure storage directory exists before writing index
    await fs.promises.mkdir(this.storageDir, { recursive: true });

    const data = {
      entries: Array.from(this.index.entries()),
      lineCounters: Object.fromEntries(this.lineCounters.entries()),
    };
    await fs.promises.writeFile(this.indexFile, JSON.stringify(data, null, 2));
  }

  private getFileName(type: MemoryEntry["type"], timestamp: string): string {
    const date = new Date(timestamp);
    const year = date.getFullYear();
    const month = String(date.getMonth() + 1).padStart(2, "0");
    return `${type}_${year}_${month}.jsonl`;
  }

  private generateId(entry: Omit<MemoryEntry, "id" | "checksum">): string {
    const hash = createHash("sha256");
    hash.update(JSON.stringify({ type: entry.type, data: entry.data }));
    return hash.digest("hex").substring(0, 16);
  }

  private generateChecksum(data: any): string {
    const hash = createHash("md5");
    hash.update(JSON.stringify(data));
    return hash.digest("hex");
  }

  async append(
    entry: Omit<MemoryEntry, "id" | "checksum">,
  ): Promise<MemoryEntry> {
    const id = this.generateId(entry);
    const checksum = this.generateChecksum(entry.data);
    const completeEntry: MemoryEntry = {
      ...entry,
      id,
      checksum,
      timestamp: entry.timestamp || new Date().toISOString(),
    };

    const fileName = this.getFileName(
      completeEntry.type,
      completeEntry.timestamp,
    );
    const filePath = path.join(this.storageDir, fileName);

    // Ensure storage directory exists before writing
    await fs.promises.mkdir(this.storageDir, { recursive: true });

    const line = JSON.stringify(completeEntry);
    await fs.promises.appendFile(filePath, line + "\n");

    // Efficiently track line numbers using a counter
    const currentLineCount = this.lineCounters.get(fileName) || 0;
    const lineNumber = currentLineCount + 1;
    this.lineCounters.set(fileName, lineNumber);

    this.index.set(id, {
      file: fileName,
      line: lineNumber,
      size: Buffer.byteLength(line),
    });

    await this.saveIndex();
    return completeEntry;
  }

  async get(id: string): Promise<MemoryEntry | null> {
    const location = this.index.get(id);
    if (!location) return null;

    const filePath = path.join(this.storageDir, location.file);
    const stream = readline.createInterface({
      input: fs.createReadStream(filePath),
      crlfDelay: Infinity,
    });

    let lineNumber = 0;
    for await (const line of stream) {
      lineNumber++;
      if (lineNumber === location.line) {
        stream.close();
        try {
          return JSON.parse(line);
        } catch (error) {
          return null;
        }
      }
    }

    return null;
  }

  async query(filter: {
    type?: MemoryEntry["type"];
    projectId?: string;
    repository?: string;
    ssg?: string;
    tags?: string[];
    startDate?: string;
    endDate?: string;
    limit?: number;
  }): Promise<MemoryEntry[]> {
    const results: MemoryEntry[] = [];
    const files = await this.getRelevantFiles(filter);

    for (const file of files) {
      const filePath = path.join(this.storageDir, file);
      const stream = readline.createInterface({
        input: fs.createReadStream(filePath),
        crlfDelay: Infinity,
      });

      for await (const line of stream) {
        if (line.trim() === "") continue; // Skip empty lines

        try {
          const entry: MemoryEntry = JSON.parse(line);

          // Only include entries that are still in the index (not soft-deleted)
          if (this.index.has(entry.id) && this.matchesFilter(entry, filter)) {
            results.push(entry);
            if (filter.limit && results.length >= filter.limit) {
              stream.close();
              return results;
            }
          }
        } catch (error) {
          // Skip invalid JSON lines
          continue;
        }
      }
    }

    return results;
  }

  private async getRelevantFiles(filter: any): Promise<string[]> {
    const files = await fs.promises.readdir(this.storageDir);
    return files
      .filter((f) => f.endsWith(".jsonl"))
      .filter((file) => {
        if (!filter.type) return true;
        return file.startsWith(filter.type);
      });
  }

  private matchesFilter(entry: MemoryEntry, filter: any): boolean {
    if (filter.type && entry.type !== filter.type) return false;
    if (filter.projectId && entry.metadata.projectId !== filter.projectId)
      return false;
    if (filter.repository && entry.metadata.repository !== filter.repository)
      return false;
    if (filter.ssg && entry.metadata.ssg !== filter.ssg) return false;

    if (filter.tags && filter.tags.length > 0) {
      const entryTags = entry.metadata.tags || [];
      if (!filter.tags.some((tag: any) => entryTags.includes(tag)))
        return false;
    }

    if (filter.startDate && entry.timestamp < filter.startDate) return false;
    if (filter.endDate && entry.timestamp > filter.endDate) return false;

    return true;
  }

  async delete(id: string): Promise<boolean> {
    const location = this.index.get(id);
    if (!location) return false;

    this.index.delete(id);
    await this.saveIndex();
    return true;
  }

  async compact(type?: MemoryEntry["type"]): Promise<void> {
    // Ensure storage directory exists before compacting
    await fs.promises.mkdir(this.storageDir, { recursive: true });

    const files = await this.getRelevantFiles({ type });

    for (const file of files) {
      const filePath = path.join(this.storageDir, file);
      const tempPath = filePath + ".tmp";
      const validEntries: string[] = [];

      const stream = readline.createInterface({
        input: fs.createReadStream(filePath),
        crlfDelay: Infinity,
      });

      for await (const line of stream) {
        try {
          const entry: MemoryEntry = JSON.parse(line);
          if (this.index.has(entry.id)) {
            validEntries.push(line);
          }
        } catch (error) {
          // Skip invalid lines
        }
      }

      await fs.promises.writeFile(tempPath, validEntries.join("\n") + "\n");
      await fs.promises.rename(tempPath, filePath);
    }
  }

  private async countLines(filePath: string): Promise<number> {
    const stream = readline.createInterface({
      input: fs.createReadStream(filePath),
      crlfDelay: Infinity,
    });

    let count = 0;
    // eslint-disable-next-line @typescript-eslint/no-unused-vars
    for await (const _ of stream) {
      count++;
    }
    return count;
  }

  async getStatistics(): Promise<{
    totalEntries: number;
    byType: Record<string, number>;
    byMonth: Record<string, number>;
    totalSize: number;
  }> {
    const stats = {
      totalEntries: this.index.size,
      byType: {} as Record<string, number>,
      byMonth: {} as Record<string, number>,
      totalSize: 0,
    };

    const files = await fs.promises.readdir(this.storageDir);
    for (const file of files.filter((f) => f.endsWith(".jsonl"))) {
      const filePath = path.join(this.storageDir, file);
      const fileStats = await fs.promises.stat(filePath);
      stats.totalSize += fileStats.size;

      const match = file.match(/^(\w+)_(\d{4})_(\d{2})\.jsonl$/);
      if (match) {
        const [, type, year, month] = match;
        const monthKey = `${year}-${month}`;

        stats.byType[type] = (stats.byType[type] || 0) + 1;
        stats.byMonth[monthKey] = (stats.byMonth[monthKey] || 0) + 1;
      }
    }

    return stats;
  }

  /**
   * Get all memory entries
   */
  async getAll(): Promise<MemoryEntry[]> {
    const entries: MemoryEntry[] = [];

    for (const [id] of this.index) {
      const entry = await this.get(id);
      if (entry) {
        entries.push(entry);
      }
    }

    return entries;
  }

  /**
   * Update an existing memory entry
   */
  async update(id: string, updatedEntry: MemoryEntry): Promise<boolean> {
    const existing = await this.get(id);
    if (!existing) {
      return false;
    }

    // Delete the old entry and store the updated one
    await this.delete(id);
    const newEntry = await this.append(updatedEntry);
    return newEntry.id === id;
  }

  /**
   * Store a new memory entry (preserves ID if provided)
   */
  async store(entry: MemoryEntry): Promise<MemoryEntry> {
    const entryToStore = {
      ...entry,
      tags: entry.tags || entry.metadata?.tags || [],
    };

    // If the entry already has an ID, use it directly instead of generating a new one
    if (entry.id) {
      const checksum = this.generateChecksum(entry.data);
      const completeEntry: MemoryEntry = {
        ...entryToStore,
        checksum,
        timestamp: entry.timestamp || new Date().toISOString(),
      };

      const fileName = this.getFileName(
        completeEntry.type,
        completeEntry.timestamp,
      );
      const filePath = path.join(this.storageDir, fileName);

      // Ensure storage directory exists before writing
      await fs.promises.mkdir(this.storageDir, { recursive: true });

      const line = JSON.stringify(completeEntry);
      await fs.promises.appendFile(filePath, line + "\n");

      // Efficiently track line numbers using a counter
      const currentLineCount = this.lineCounters.get(fileName) || 0;
      const lineNumber = currentLineCount + 1;
      this.lineCounters.set(fileName, lineNumber);

      this.index.set(entry.id, {
        file: fileName,
        line: lineNumber,
        size: Buffer.byteLength(line),
      });

      await this.saveIndex();
      return completeEntry;
    }

    return this.append(entryToStore);
  }

  /**
   * Rebuild the index from all storage files
   */
  async rebuildIndex(): Promise<void> {
    this.index.clear();

    const files = await fs.promises.readdir(this.storageDir);
    const jsonlFiles = files.filter((f) => f.endsWith(".jsonl"));

    for (const file of jsonlFiles) {
      const filePath = path.join(this.storageDir, file);
      const stream = readline.createInterface({
        input: fs.createReadStream(filePath),
        crlfDelay: Infinity,
      });

      let lineNumber = 0;
      for await (const line of stream) {
        try {
          const entry: MemoryEntry = JSON.parse(line);
          const size = Buffer.byteLength(line, "utf8");

          this.index.set(entry.id, {
            file,
            line: lineNumber,
            size,
          });

          lineNumber++;
        } catch (error) {
          // Skip invalid lines
          lineNumber++;
        }
      }
    }

    await this.saveIndex();
  }

  private async rebuildLineCounters(): Promise<void> {
    this.lineCounters.clear();

    // Get all unique file names from the index
    const fileNames = new Set<string>();
    for (const [, location] of this.index) {
      fileNames.add(location.file);
    }

    // Count lines for each file
    for (const fileName of fileNames) {
      const filePath = path.join(this.storageDir, fileName);
      try {
        const lineCount = await this.countLines(filePath);
        this.lineCounters.set(fileName, lineCount);
      } catch (error) {
        // File might not exist, set to 0
        this.lineCounters.set(fileName, 0);
      }
    }
  }

  async close(): Promise<void> {
    // Clear the index and line counters to free memory
    this.index.clear();
    this.lineCounters.clear();
  }
}

export default JSONLStorage;

```

--------------------------------------------------------------------------------
/tests/utils/freshness-tracker.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for Documentation Freshness Tracking Utilities
 */

import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import fs from "fs/promises";
import path from "path";
import os from "os";
import {
  thresholdToMs,
  formatAge,
  parseDocFrontmatter,
  updateDocFrontmatter,
  calculateFreshnessStatus,
  findMarkdownFiles,
  scanDocumentationFreshness,
  initializeFreshnessMetadata,
  STALENESS_PRESETS,
  type StalenessThreshold,
  type DocFrontmatter,
} from "../../src/utils/freshness-tracker.js";

describe("Freshness Tracker Utilities", () => {
  let tempDir: string;

  beforeEach(async () => {
    tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "freshness-test-"));
  });

  afterEach(async () => {
    await fs.rm(tempDir, { recursive: true, force: true });
  });

  describe("thresholdToMs", () => {
    it("should convert minutes to milliseconds", () => {
      const threshold: StalenessThreshold = { value: 30, unit: "minutes" };
      expect(thresholdToMs(threshold)).toBe(30 * 60 * 1000);
    });

    it("should convert hours to milliseconds", () => {
      const threshold: StalenessThreshold = { value: 2, unit: "hours" };
      expect(thresholdToMs(threshold)).toBe(2 * 60 * 60 * 1000);
    });

    it("should convert days to milliseconds", () => {
      const threshold: StalenessThreshold = { value: 7, unit: "days" };
      expect(thresholdToMs(threshold)).toBe(7 * 24 * 60 * 60 * 1000);
    });

    it("should handle fractional values", () => {
      const threshold: StalenessThreshold = { value: 0.5, unit: "hours" };
      expect(thresholdToMs(threshold)).toBe(30 * 60 * 1000);
    });
  });

  describe("formatAge", () => {
    it("should format seconds", () => {
      expect(formatAge(30 * 1000)).toBe("30 seconds");
    });

    it("should format single second", () => {
      expect(formatAge(1000)).toBe("1 second");
    });

    it("should format minutes", () => {
      expect(formatAge(5 * 60 * 1000)).toBe("5 minutes");
    });

    it("should format single minute", () => {
      expect(formatAge(60 * 1000)).toBe("1 minute");
    });

    it("should format hours", () => {
      expect(formatAge(3 * 60 * 60 * 1000)).toBe("3 hours");
    });

    it("should format single hour", () => {
      expect(formatAge(60 * 60 * 1000)).toBe("1 hour");
    });

    it("should format days", () => {
      expect(formatAge(5 * 24 * 60 * 60 * 1000)).toBe("5 days");
    });

    it("should format single day", () => {
      expect(formatAge(24 * 60 * 60 * 1000)).toBe("1 day");
    });

    it("should prefer larger units", () => {
      const twoDaysInMs = 2 * 24 * 60 * 60 * 1000;
      expect(formatAge(twoDaysInMs)).toBe("2 days");
    });
  });

  describe("STALENESS_PRESETS", () => {
    it("should have all expected presets", () => {
      expect(STALENESS_PRESETS.realtime).toEqual({
        value: 30,
        unit: "minutes",
      });
      expect(STALENESS_PRESETS.active).toEqual({ value: 1, unit: "hours" });
      expect(STALENESS_PRESETS.recent).toEqual({ value: 24, unit: "hours" });
      expect(STALENESS_PRESETS.weekly).toEqual({ value: 7, unit: "days" });
      expect(STALENESS_PRESETS.monthly).toEqual({ value: 30, unit: "days" });
      expect(STALENESS_PRESETS.quarterly).toEqual({ value: 90, unit: "days" });
    });
  });

  describe("parseDocFrontmatter", () => {
    it("should parse frontmatter from markdown file", async () => {
      const filePath = path.join(tempDir, "test.md");
      const content = `---
title: Test Document
documcp:
  last_updated: "2025-01-15T10:00:00Z"
  last_validated: "2025-01-15T10:00:00Z"
---

# Test Content`;

      await fs.writeFile(filePath, content, "utf-8");
      const frontmatter = await parseDocFrontmatter(filePath);

      expect(frontmatter.title).toBe("Test Document");
      expect(frontmatter.documcp?.last_updated).toBe("2025-01-15T10:00:00Z");
    });

    it("should return empty object for file without frontmatter", async () => {
      const filePath = path.join(tempDir, "no-frontmatter.md");
      await fs.writeFile(filePath, "# Just Content", "utf-8");

      const frontmatter = await parseDocFrontmatter(filePath);
      expect(frontmatter).toEqual({});
    });

    it("should handle non-existent files gracefully", async () => {
      const filePath = path.join(tempDir, "nonexistent.md");
      const frontmatter = await parseDocFrontmatter(filePath);
      expect(frontmatter).toEqual({});
    });
  });

  describe("updateDocFrontmatter", () => {
    it("should update existing frontmatter", async () => {
      const filePath = path.join(tempDir, "update.md");
      const initialContent = `---
title: Original
documcp:
  last_updated: "2025-01-01T00:00:00Z"
---

Content`;

      await fs.writeFile(filePath, initialContent, "utf-8");

      await updateDocFrontmatter(filePath, {
        last_updated: "2025-01-15T10:00:00Z",
        last_validated: "2025-01-15T10:00:00Z",
      });

      const updated = await parseDocFrontmatter(filePath);
      expect(updated.documcp?.last_updated).toBe("2025-01-15T10:00:00Z");
      expect(updated.documcp?.last_validated).toBe("2025-01-15T10:00:00Z");
    });

    it("should preserve existing frontmatter fields", async () => {
      const filePath = path.join(tempDir, "preserve.md");
      const initialContent = `---
title: Original
description: Test
documcp:
  last_updated: "2025-01-01T00:00:00Z"
  auto_updated: false
---

Content`;

      await fs.writeFile(filePath, initialContent, "utf-8");

      await updateDocFrontmatter(filePath, {
        last_validated: "2025-01-15T10:00:00Z",
      });

      const updated = await parseDocFrontmatter(filePath);
      expect(updated.title).toBe("Original");
      expect(updated.description).toBe("Test");
      expect(updated.documcp?.last_updated).toBe("2025-01-01T00:00:00Z");
      expect(updated.documcp?.auto_updated).toBe(false);
      expect(updated.documcp?.last_validated).toBe("2025-01-15T10:00:00Z");
    });

    it("should add documcp field if not present", async () => {
      const filePath = path.join(tempDir, "add-documcp.md");
      const initialContent = `---
title: No DocuMCP
---

Content`;

      await fs.writeFile(filePath, initialContent, "utf-8");

      await updateDocFrontmatter(filePath, {
        last_updated: "2025-01-15T10:00:00Z",
      });

      const updated = await parseDocFrontmatter(filePath);
      expect(updated.documcp?.last_updated).toBe("2025-01-15T10:00:00Z");
    });
  });

  describe("calculateFreshnessStatus", () => {
    const thresholds = {
      warning: { value: 7, unit: "days" as const },
      stale: { value: 30, unit: "days" as const },
      critical: { value: 90, unit: "days" as const },
    };

    it("should mark file as fresh when recently updated", () => {
      const frontmatter: DocFrontmatter = {
        documcp: {
          last_updated: new Date(
            Date.now() - 2 * 24 * 60 * 60 * 1000,
          ).toISOString(), // 2 days ago
        },
      };

      const status = calculateFreshnessStatus(
        "/test.md",
        "test.md",
        frontmatter,
        thresholds,
      );

      expect(status.stalenessLevel).toBe("fresh");
      expect(status.isStale).toBe(false);
      expect(status.hasMetadata).toBe(true);
    });

    it("should mark file as warning when moderately old", () => {
      const frontmatter: DocFrontmatter = {
        documcp: {
          last_updated: new Date(
            Date.now() - 15 * 24 * 60 * 60 * 1000,
          ).toISOString(), // 15 days ago
        },
      };

      const status = calculateFreshnessStatus(
        "/test.md",
        "test.md",
        frontmatter,
        thresholds,
      );

      expect(status.stalenessLevel).toBe("warning");
      expect(status.isStale).toBe(false);
    });

    it("should mark file as stale when old", () => {
      const frontmatter: DocFrontmatter = {
        documcp: {
          last_updated: new Date(
            Date.now() - 45 * 24 * 60 * 60 * 1000,
          ).toISOString(), // 45 days ago
        },
      };

      const status = calculateFreshnessStatus(
        "/test.md",
        "test.md",
        frontmatter,
        thresholds,
      );

      expect(status.stalenessLevel).toBe("stale");
      expect(status.isStale).toBe(true);
    });

    it("should mark file as critical when very old", () => {
      const frontmatter: DocFrontmatter = {
        documcp: {
          last_updated: new Date(
            Date.now() - 100 * 24 * 60 * 60 * 1000,
          ).toISOString(), // 100 days ago
        },
      };

      const status = calculateFreshnessStatus(
        "/test.md",
        "test.md",
        frontmatter,
        thresholds,
      );

      expect(status.stalenessLevel).toBe("critical");
      expect(status.isStale).toBe(true);
    });

    it("should mark file as unknown when no metadata", () => {
      const frontmatter: DocFrontmatter = {};

      const status = calculateFreshnessStatus(
        "/test.md",
        "test.md",
        frontmatter,
        thresholds,
      );

      expect(status.stalenessLevel).toBe("unknown");
      expect(status.isStale).toBe(true);
      expect(status.hasMetadata).toBe(false);
    });

    it("should include age information", () => {
      const frontmatter: DocFrontmatter = {
        documcp: {
          last_updated: new Date(
            Date.now() - 5 * 24 * 60 * 60 * 1000,
          ).toISOString(),
        },
      };

      const status = calculateFreshnessStatus(
        "/test.md",
        "test.md",
        frontmatter,
        thresholds,
      );

      expect(status.ageFormatted).toBe("5 days");
      expect(status.staleDays).toBe(5);
    });
  });

  describe("findMarkdownFiles", () => {
    it("should find all markdown files recursively", async () => {
      await fs.mkdir(path.join(tempDir, "subdir"));
      await fs.writeFile(path.join(tempDir, "file1.md"), "# Test 1");
      await fs.writeFile(path.join(tempDir, "file2.mdx"), "# Test 2");
      await fs.writeFile(path.join(tempDir, "subdir", "file3.md"), "# Test 3");
      await fs.writeFile(path.join(tempDir, "readme.txt"), "Not markdown");

      const files = await findMarkdownFiles(tempDir);

      expect(files).toHaveLength(3);
      expect(files.some((f) => f.endsWith("file1.md"))).toBe(true);
      expect(files.some((f) => f.endsWith("file2.mdx"))).toBe(true);
      expect(files.some((f) => f.endsWith("file3.md"))).toBe(true);
      expect(files.some((f) => f.endsWith("readme.txt"))).toBe(false);
    });

    it("should skip common directories", async () => {
      await fs.mkdir(path.join(tempDir, "node_modules"));
      await fs.mkdir(path.join(tempDir, ".git"));
      await fs.writeFile(path.join(tempDir, "file1.md"), "# Test");
      await fs.writeFile(
        path.join(tempDir, "node_modules", "skip.md"),
        "# Skip",
      );
      await fs.writeFile(path.join(tempDir, ".git", "skip.md"), "# Skip");

      const files = await findMarkdownFiles(tempDir);

      expect(files).toHaveLength(1);
      expect(files[0]).toMatch(/file1\.md$/);
    });

    it("should handle empty directories", async () => {
      const files = await findMarkdownFiles(tempDir);
      expect(files).toEqual([]);
    });
  });

  describe("scanDocumentationFreshness", () => {
    it("should scan and categorize files by freshness", async () => {
      // Create test files with different ages
      const now = Date.now();

      const freshFile = path.join(tempDir, "fresh.md");
      await fs.writeFile(
        freshFile,
        `---
documcp:
  last_updated: "${new Date(now - 2 * 24 * 60 * 60 * 1000).toISOString()}"
---
# Fresh`,
      );

      const staleFile = path.join(tempDir, "stale.md");
      await fs.writeFile(
        staleFile,
        `---
documcp:
  last_updated: "${new Date(now - 40 * 24 * 60 * 60 * 1000).toISOString()}"
---
# Stale`,
      );

      const noMetadataFile = path.join(tempDir, "no-metadata.md");
      await fs.writeFile(noMetadataFile, "# No Metadata");

      const report = await scanDocumentationFreshness(tempDir, {
        warning: { value: 7, unit: "days" },
        stale: { value: 30, unit: "days" },
        critical: { value: 90, unit: "days" },
      });

      expect(report.totalFiles).toBe(3);
      expect(report.freshFiles).toBe(1);
      expect(report.staleFiles).toBe(1);
      expect(report.filesWithoutMetadata).toBe(1);
    });

    it("should use default thresholds when not provided", async () => {
      await fs.writeFile(path.join(tempDir, "test.md"), "# Test");

      const report = await scanDocumentationFreshness(tempDir);

      expect(report.thresholds).toBeDefined();
      expect(report.thresholds.warning).toBeDefined();
      expect(report.thresholds.stale).toBeDefined();
      expect(report.thresholds.critical).toBeDefined();
    });
  });

  describe("initializeFreshnessMetadata", () => {
    it("should initialize metadata for file without it", async () => {
      const filePath = path.join(tempDir, "init.md");
      await fs.writeFile(filePath, "# Test");

      await initializeFreshnessMetadata(filePath, {
        updateFrequency: "monthly",
        autoUpdated: false,
      });

      const frontmatter = await parseDocFrontmatter(filePath);

      expect(frontmatter.documcp?.last_updated).toBeDefined();
      expect(frontmatter.documcp?.last_validated).toBeDefined();
      expect(frontmatter.documcp?.auto_updated).toBe(false);
      expect(frontmatter.documcp?.update_frequency).toBe("monthly");
    });

    it("should not overwrite existing metadata", async () => {
      const filePath = path.join(tempDir, "existing.md");
      const originalDate = "2025-01-01T00:00:00Z";
      await fs.writeFile(
        filePath,
        `---
documcp:
  last_updated: "${originalDate}"
---
# Test`,
      );

      await initializeFreshnessMetadata(filePath);

      const frontmatter = await parseDocFrontmatter(filePath);
      expect(frontmatter.documcp?.last_updated).toBe(originalDate);
    });

    it("should set staleness threshold when frequency is provided", async () => {
      const filePath = path.join(tempDir, "threshold.md");
      await fs.writeFile(filePath, "# Test");

      await initializeFreshnessMetadata(filePath, {
        updateFrequency: "weekly",
      });

      const frontmatter = await parseDocFrontmatter(filePath);
      expect(frontmatter.documcp?.staleness_threshold).toEqual(
        STALENESS_PRESETS.weekly,
      );
    });
  });
});

```

--------------------------------------------------------------------------------
/src/tools/test-local-deployment.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from "zod";
import { promises as fs } from "fs";
import * as path from "path";
import { spawn, exec } from "child_process";
import { promisify } from "util";
import { MCPToolResponse, formatMCPResponse } from "../types/api.js";

const execAsync = promisify(exec);

const inputSchema = z.object({
  repositoryPath: z.string().describe("Path to the repository"),
  ssg: z.enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]),
  port: z.number().optional().default(3000).describe("Port for local server"),
  timeout: z
    .number()
    .optional()
    .default(60)
    .describe("Timeout in seconds for build process"),
  skipBuild: z
    .boolean()
    .optional()
    .default(false)
    .describe("Skip build step and only start server"),
});

interface LocalTestResult {
  repositoryPath: string;
  ssg: string;
  buildSuccess: boolean;
  buildOutput?: string;
  buildErrors?: string;
  serverStarted: boolean;
  localUrl?: string;
  port: number;
  testScript: string;
  recommendations: string[];
  nextSteps: string[];
}

interface SSGConfig {
  buildCommand: string;
  serveCommand: string;
  buildDir: string;
  configFiles: string[];
  installCommand?: string;
}

const SSG_CONFIGS: Record<string, SSGConfig> = {
  jekyll: {
    buildCommand: "bundle exec jekyll build",
    serveCommand: "bundle exec jekyll serve",
    buildDir: "_site",
    configFiles: ["_config.yml", "_config.yaml"],
    installCommand: "bundle install",
  },
  hugo: {
    buildCommand: "hugo",
    serveCommand: "hugo server",
    buildDir: "public",
    configFiles: [
      "hugo.toml",
      "hugo.yaml",
      "hugo.yml",
      "config.toml",
      "config.yaml",
      "config.yml",
    ],
  },
  docusaurus: {
    buildCommand: "npm run build",
    serveCommand: "npm run serve",
    buildDir: "build",
    configFiles: ["docusaurus.config.js", "docusaurus.config.ts"],
    installCommand: "npm install",
  },
  mkdocs: {
    buildCommand: "mkdocs build",
    serveCommand: "mkdocs serve",
    buildDir: "site",
    configFiles: ["mkdocs.yml", "mkdocs.yaml"],
    installCommand: "pip install -r requirements.txt",
  },
  eleventy: {
    buildCommand: "npx @11ty/eleventy",
    serveCommand: "npx @11ty/eleventy --serve",
    buildDir: "_site",
    configFiles: [".eleventy.js", "eleventy.config.js", ".eleventy.json"],
    installCommand: "npm install",
  },
};

export async function testLocalDeployment(
  args: unknown,
): Promise<{ content: any[] }> {
  const startTime = Date.now();
  const { repositoryPath, ssg, port, timeout, skipBuild } =
    inputSchema.parse(args);

  try {
    const config = SSG_CONFIGS[ssg];
    if (!config) {
      throw new Error(`Unsupported SSG: ${ssg}`);
    }

    // Change to repository directory
    process.chdir(repositoryPath);

    const testResult: LocalTestResult = {
      repositoryPath,
      ssg,
      buildSuccess: false,
      serverStarted: false,
      port,
      testScript: "",
      recommendations: [],
      nextSteps: [],
    };

    // Step 1: Check if configuration exists (always check, even if skipBuild)
    const configExists = await checkConfigurationExists(repositoryPath, config);
    if (!configExists) {
      testResult.recommendations.push(
        `Missing configuration file. Expected one of: ${config.configFiles.join(
          ", ",
        )}`,
      );
      testResult.nextSteps.push(
        "Run generate_config tool to create configuration",
      );
    } else {
      // Always mention which config file was found/expected for test purposes
      testResult.recommendations.push(
        `Using ${ssg} configuration: ${config.configFiles.join(" or ")}`,
      );
    }

    // Step 2: Install dependencies if needed
    if (config.installCommand && !skipBuild) {
      try {
        const { stderr } = await execAsync(config.installCommand, {
          cwd: repositoryPath,
          timeout: timeout * 1000,
        });
        if (stderr && !stderr.includes("npm WARN")) {
          testResult.recommendations.push(
            "Dependency installation warnings detected",
          );
        }
      } catch (error: any) {
        testResult.recommendations.push(
          `Dependency installation failed: ${error.message}`,
        );
        testResult.nextSteps.push(
          "Fix dependency installation issues before testing deployment",
        );
      }
    }

    // Step 3: Build the site (unless skipped)
    if (!skipBuild) {
      try {
        const { stdout, stderr } = await execAsync(config.buildCommand, {
          cwd: repositoryPath,
          timeout: timeout * 1000,
        });
        testResult.buildSuccess = true;
        testResult.buildOutput = stdout;

        if (stderr && stderr.trim()) {
          testResult.buildErrors = stderr;
          if (stderr.includes("error") || stderr.includes("Error")) {
            testResult.recommendations.push(
              "Build completed with errors - review build output",
            );
          }
        }

        // Check if build directory was created
        const buildDirExists = await checkBuildOutput(
          repositoryPath,
          config.buildDir,
        );
        if (!buildDirExists) {
          testResult.recommendations.push(
            `Build directory ${config.buildDir} was not created`,
          );
        }
      } catch (error: any) {
        testResult.buildSuccess = false;
        testResult.buildErrors = error.message;
        testResult.recommendations.push(
          "Build failed - fix build errors before deployment",
        );
        testResult.nextSteps.push(
          "Review build configuration and resolve errors",
        );
      }
    } else {
      testResult.buildSuccess = true; // Assume success if skipped
    }

    // Step 4: Generate test script
    testResult.testScript = generateTestScript(
      ssg,
      config,
      port,
      repositoryPath,
    );

    // Step 5: Try to start local server (non-blocking)
    if (testResult.buildSuccess || skipBuild) {
      const serverResult = await startLocalServer(
        config,
        port,
        repositoryPath,
        10,
      ); // 10 second timeout for server start
      testResult.serverStarted = serverResult.started;
      testResult.localUrl = serverResult.url;

      if (testResult.serverStarted) {
        testResult.recommendations.push(
          "Local server started successfully - test manually at the provided URL",
        );
        testResult.nextSteps.push("Verify content loads correctly in browser");
        testResult.nextSteps.push("Test navigation and responsive design");
      } else {
        testResult.recommendations.push(
          "Could not automatically start local server - run manually using the provided script",
        );
        testResult.nextSteps.push(
          "Start server manually and verify it works before GitHub deployment",
        );
      }
    }

    // Step 6: Generate final recommendations
    if (testResult.buildSuccess && testResult.serverStarted) {
      testResult.recommendations.push(
        "Local deployment test successful - ready for GitHub Pages",
      );
      testResult.nextSteps.push(
        "Run deploy_pages tool to set up GitHub Actions workflow",
      );
    } else if (testResult.buildSuccess && !testResult.serverStarted) {
      testResult.recommendations.push(
        "Build successful but server test incomplete - manual verification needed",
      );
      testResult.nextSteps.push(
        "Test server manually before deploying to GitHub",
      );
    }

    const response: MCPToolResponse<typeof testResult> = {
      success: true,
      data: testResult,
      metadata: {
        toolVersion: "1.0.0",
        executionTime: Date.now() - startTime,
        timestamp: new Date().toISOString(),
      },
      recommendations: [
        {
          type: testResult.buildSuccess ? "info" : "warning",
          title: "Local Deployment Test Complete",
          description: `Build ${
            testResult.buildSuccess ? "succeeded" : "failed"
          }, Server ${
            testResult.serverStarted ? "started" : "failed to start"
          }`,
        },
      ],
      nextSteps: testResult.nextSteps.map((step) => ({
        action: step,
        toolRequired: getRecommendedTool(step),
        description: step,
        priority: testResult.buildSuccess ? "medium" : ("high" as const),
      })),
    };

    return formatMCPResponse(response);
  } catch (error) {
    const errorResponse: MCPToolResponse = {
      success: false,
      error: {
        code: "LOCAL_TEST_FAILED",
        message: `Failed to test local deployment: ${error}`,
        resolution:
          "Ensure repository path is valid and SSG is properly configured",
      },
      metadata: {
        toolVersion: "1.0.0",
        executionTime: Date.now() - startTime,
        timestamp: new Date().toISOString(),
      },
    };
    return formatMCPResponse(errorResponse);
  }
}

async function checkConfigurationExists(
  repoPath: string,
  config: SSGConfig,
): Promise<boolean> {
  for (const configFile of config.configFiles) {
    try {
      await fs.access(path.join(repoPath, configFile));
      return true;
    } catch {
      // File doesn't exist, continue checking
    }
  }
  return false;
}

async function checkBuildOutput(
  repoPath: string,
  buildDir: string,
): Promise<boolean> {
  try {
    const buildPath = path.join(repoPath, buildDir);
    const stats = await fs.stat(buildPath);
    if (stats.isDirectory()) {
      const files = await fs.readdir(buildPath);
      return files.length > 0;
    }
  } catch {
    // Directory doesn't exist or can't be read
  }
  return false;
}

async function startLocalServer(
  config: SSGConfig,
  port: number,
  repoPath: string,
  timeout: number,
): Promise<{ started: boolean; url?: string }> {
  return new Promise((resolve) => {
    let serverProcess: any = null;
    let resolved = false;

    const cleanup = () => {
      if (serverProcess && !serverProcess.killed) {
        try {
          serverProcess.kill("SIGTERM");
          // Force kill if SIGTERM doesn't work after 1 second
          const forceKillTimeout = setTimeout(() => {
            if (serverProcess && !serverProcess.killed) {
              serverProcess.kill("SIGKILL");
            }
          }, 1000);

          // Clear the timeout if process exits normally
          serverProcess.on("exit", () => {
            clearTimeout(forceKillTimeout);
          });
        } catch (error) {
          // Process may already be dead
        }
      }
    };

    const safeResolve = (result: { started: boolean; url?: string }) => {
      if (!resolved) {
        resolved = true;
        cleanup();
        resolve(result);
      }
    };

    const serverTimeout = setTimeout(() => {
      safeResolve({ started: false });
    }, timeout * 1000);

    try {
      let command = config.serveCommand;

      // Modify serve command to use custom port for some SSGs
      if (config.serveCommand.includes("jekyll serve")) {
        command = `${config.serveCommand} --port ${port}`;
      } else if (config.serveCommand.includes("hugo server")) {
        command = `${config.serveCommand} --port ${port}`;
      } else if (config.serveCommand.includes("mkdocs serve")) {
        command = `${config.serveCommand} --dev-addr localhost:${port}`;
      } else if (config.serveCommand.includes("--serve")) {
        command = `${config.serveCommand} --port ${port}`;
      }

      serverProcess = spawn("sh", ["-c", command], {
        cwd: repoPath,
        detached: false,
        stdio: "pipe",
      });

      let serverStarted = false;

      serverProcess.stdout?.on("data", (data: Buffer) => {
        const output = data.toString();

        // Check for server start indicators
        if (
          !serverStarted &&
          (output.includes("Server running") ||
            output.includes("Serving on") ||
            output.includes("Local:") ||
            output.includes("localhost:") ||
            output.includes(`http://127.0.0.1:${port}`) ||
            output.includes(`http://localhost:${port}`))
        ) {
          serverStarted = true;
          clearTimeout(serverTimeout);

          safeResolve({
            started: true,
            url: `http://localhost:${port}`,
          });
        }
      });

      serverProcess.stderr?.on("data", (data: Buffer) => {
        const error = data.toString();

        // Some servers output startup info to stderr
        if (
          !serverStarted &&
          (error.includes("Serving on") ||
            error.includes("Local:") ||
            error.includes("localhost:"))
        ) {
          serverStarted = true;
          clearTimeout(serverTimeout);

          safeResolve({
            started: true,
            url: `http://localhost:${port}`,
          });
        }
      });

      serverProcess.on("error", (_error: Error) => {
        clearTimeout(serverTimeout);
        safeResolve({ started: false });
      });

      serverProcess.on("exit", () => {
        clearTimeout(serverTimeout);
        if (!resolved) {
          safeResolve({ started: false });
        }
      });
    } catch (_error) {
      clearTimeout(serverTimeout);
      safeResolve({ started: false });
    }
  });
}

function generateTestScript(
  ssg: string,
  config: SSGConfig,
  port: number,
  repoPath: string,
): string {
  const commands: string[] = [
    `# Local Deployment Test Script for ${ssg}`,
    `# Generated on ${new Date().toISOString()}`,
    ``,
    `cd "${repoPath}"`,
    ``,
  ];

  // Add install command if needed
  if (config.installCommand) {
    commands.push(`# Install dependencies`);
    commands.push(config.installCommand);
    commands.push(``);
  }

  // Add build command
  commands.push(`# Build the site`);
  commands.push(config.buildCommand);
  commands.push(``);

  // Add serve command with custom port
  commands.push(`# Start local server`);
  let serveCommand = config.serveCommand;

  if (serveCommand.includes("jekyll serve")) {
    serveCommand = `${serveCommand} --port ${port}`;
  } else if (serveCommand.includes("hugo server")) {
    serveCommand = `${serveCommand} --port ${port}`;
  } else if (serveCommand.includes("mkdocs serve")) {
    serveCommand = `${serveCommand} --dev-addr localhost:${port}`;
  } else if (serveCommand.includes("--serve")) {
    serveCommand = `${serveCommand} --port ${port}`;
  }

  commands.push(serveCommand);
  commands.push(``);
  commands.push(`# Open in browser:`);
  commands.push(`# http://localhost:${port}`);

  return commands.join("\n");
}

function getRecommendedTool(step: string): string {
  if (step.includes("generate_config")) return "generate_config";
  if (step.includes("deploy_pages")) return "deploy_pages";
  if (step.includes("verify_deployment")) return "verify_deployment";
  return "manual";
}

```

--------------------------------------------------------------------------------
/src/tools/generate-llm-context.ts:
--------------------------------------------------------------------------------

```typescript
import { formatMCPResponse } from "../types/api.js";
import { promises as fs } from "fs";
import path from "path";
import { z } from "zod";

// Dynamic import to avoid circular dependency
let cachedTools: any[] | null = null;

async function getToolDefinitions(): Promise<any[]> {
  if (cachedTools) return cachedTools;

  try {
    const indexModule = await import("../index.js");
    cachedTools = indexModule.TOOLS || [];
    return cachedTools;
  } catch (error) {
    console.warn("Could not load TOOLS from index.js:", error);
    return [];
  }
}

// Input schema for the tool
export const GenerateLLMContextInputSchema = z.object({
  projectPath: z
    .string()
    .describe(
      "Path to the project root directory where LLM_CONTEXT.md will be generated",
    ),
  includeExamples: z
    .boolean()
    .optional()
    .default(true)
    .describe("Include usage examples for tools"),
  format: z
    .enum(["detailed", "concise"])
    .optional()
    .default("detailed")
    .describe("Level of detail in the generated context"),
});

export type GenerateLLMContextInput = z.infer<
  typeof GenerateLLMContextInputSchema
>;

/**
 * Set tool definitions for the context generator
 * This is called from src/index.ts when TOOLS array is initialized
 */
export function setToolDefinitions(tools: any[]) {
  cachedTools = tools;
}

export async function generateLLMContext(
  params: Partial<GenerateLLMContextInput>,
): Promise<any> {
  try {
    // Parse with defaults
    const validated = GenerateLLMContextInputSchema.parse(params);
    const { projectPath, includeExamples, format } = validated;

    // Always generate LLM_CONTEXT.md in the project root
    const outputPath = path.join(projectPath, "LLM_CONTEXT.md");

    // Get tool definitions dynamically
    const toolDefinitions = await getToolDefinitions();

    // Generate the context content
    const content = generateContextContent(
      includeExamples,
      format,
      toolDefinitions,
    );

    // Write the file
    await fs.writeFile(outputPath, content, "utf-8");

    const metadata = {
      toolVersion: "0.4.1",
      executionTime: 0,
      timestamp: new Date().toISOString(),
    };

    return formatMCPResponse({
      success: true,
      data: {
        message: `LLM context file generated successfully at ${outputPath}`,
        path: path.resolve(outputPath),
        stats: {
          totalTools: toolDefinitions.length,
          fileSize: Buffer.byteLength(content, "utf-8"),
          sections: [
            "Overview",
            "Core Tools",
            "README Tools",
            "Memory System",
            "Phase 3 Features",
            "Workflows",
            "Quick Reference",
          ],
        },
      },
      metadata,
      nextSteps: [
        {
          action:
            "Reference this file with @LLM_CONTEXT.md in your LLM conversations",
          priority: "high" as const,
        },
        {
          action: "Regenerate periodically when new tools are added",
          toolRequired: "generate_llm_context",
          priority: "low" as const,
        },
        {
          action: "Use this as a quick reference for DocuMCP capabilities",
          priority: "medium" as const,
        },
      ],
    });
  } catch (error: any) {
    return formatMCPResponse({
      success: false,
      error: {
        code: "GENERATION_ERROR",
        message: `Failed to generate LLM context: ${error.message}`,
      },
      metadata: {
        toolVersion: "0.4.1",
        executionTime: 0,
        timestamp: new Date().toISOString(),
      },
    });
  }
}

function generateContextContent(
  includeExamples: boolean,
  format: "detailed" | "concise",
  toolDefinitions: any[],
): string {
  const sections: string[] = [];

  // Header
  sections.push(`# DocuMCP LLM Context Reference
**Auto-generated**: ${new Date().toISOString()}

This file provides instant context about DocuMCP's tools and memory system for LLMs.
Reference this file with @ to get comprehensive context about available capabilities.

---
`);

  // Overview
  sections.push(`## Overview

DocuMCP is an intelligent MCP server for GitHub Pages documentation deployment with:
- **${toolDefinitions.length} Tools** for repository analysis, SSG recommendations, and deployment
- **Knowledge Graph** memory system tracking projects, technologies, and deployments
- **Phase 3 Features** including AST-based code analysis and drift detection
- **Diataxis Framework** compliance for documentation structure

---
`);

  // Categorize tools
  const coreTools = toolDefinitions.filter((t) =>
    [
      "analyze_repository",
      "recommend_ssg",
      "generate_config",
      "setup_structure",
      "deploy_pages",
      "verify_deployment",
      "populate_diataxis_content",
      "validate_diataxis_content",
      "update_existing_documentation",
    ].includes(t.name),
  );

  const readmeTools = toolDefinitions.filter((t) =>
    t.name.toLowerCase().includes("readme"),
  );

  const memoryTools = toolDefinitions.filter((t) =>
    ["manage_preferences", "analyze_deployments", "kg_health_check"].includes(
      t.name,
    ),
  );

  const phase3Tools = toolDefinitions.filter((t) =>
    ["sync_code_to_docs", "generate_contextual_content"].includes(t.name),
  );

  const otherTools = toolDefinitions.filter(
    (t) =>
      ![...coreTools, ...readmeTools, ...memoryTools, ...phase3Tools].some(
        (ct) => ct.name === t.name,
      ),
  );

  // Core Documentation Tools
  sections.push(`## Core Documentation Tools

These are the primary tools for analyzing repositories and deploying documentation:
`);

  for (const tool of coreTools) {
    sections.push(formatToolSection(tool, includeExamples, format));
  }

  // README Tools
  if (readmeTools.length > 0) {
    sections.push(`---

## README Analysis & Generation Tools

Specialized tools for README creation, analysis, and optimization:
`);

    for (const tool of readmeTools) {
      sections.push(formatToolSection(tool, includeExamples, format));
    }
  }

  // Phase 3 Tools
  if (phase3Tools.length > 0) {
    sections.push(`---

## Phase 3: Code-to-Docs Synchronization Tools

Advanced tools using AST analysis and drift detection:
`);

    for (const tool of phase3Tools) {
      sections.push(formatToolSection(tool, includeExamples, format));
    }
  }

  // Memory Tools
  if (memoryTools.length > 0) {
    sections.push(`---

## Memory & Analytics Tools

Tools for user preferences, deployment analytics, and knowledge graph management:
`);

    for (const tool of memoryTools) {
      sections.push(formatToolSection(tool, includeExamples, format));
    }
  }

  // Other Tools
  if (otherTools.length > 0) {
    sections.push(`---

## Additional Tools

${otherTools
  .map((t) => formatToolSection(t, includeExamples, format))
  .join("\n")}
`);
  }

  // Memory System
  sections.push(`---

## Memory Knowledge Graph System

DocuMCP includes a persistent memory system that learns from every analysis:

### Entity Types
- **Project**: Software projects with analysis history and metadata
- **User**: User preferences and SSG usage patterns
- **Configuration**: SSG deployment configurations with success rates
- **Documentation**: Documentation structures and patterns
- **CodeFile**: Source code files with metadata and change tracking
- **DocumentationSection**: Documentation sections linked to code
- **Technology**: Languages, frameworks, and tools used in projects

### Relationship Types
- \`project_uses_technology\`: Links projects to their tech stack
- \`user_prefers_ssg\`: Tracks user SSG preferences
- \`project_deployed_with\`: Records deployment configurations and outcomes
- \`similar_to\`: Identifies similar projects for better recommendations
- \`documents\`: Links code files to documentation sections
- \`outdated_for\`: Flags documentation that's out of sync with code
- \`depends_on\`: Tracks technology dependencies

### Storage Location
- Default: \`.documcp/memory/\`
- Files: \`knowledge-graph-entities.jsonl\`, \`knowledge-graph-relationships.jsonl\`
- Backups: \`.documcp/memory/backups/\`
- Snapshots: \`.documcp/snapshots/\` (for drift detection)

### Memory Benefits
1. **Context-Aware Recommendations**: Uses historical data to improve SSG suggestions
2. **Learning from Success**: Tracks which configurations work best
3. **Similar Project Insights**: Leverages patterns from similar projects
4. **Drift Detection**: Automatically identifies when docs are out of sync
5. **User Preferences**: Adapts to individual user patterns over time

---
`);

  // Phase 3 Features
  sections.push(`## Phase 3 Features (Code-to-Docs Sync)

### AST-Based Code Analysis
- Multi-language support: TypeScript, JavaScript, Python, Go, Rust, Java, Ruby, Bash
- Extracts functions, classes, interfaces, types, imports, exports
- Tracks complexity metrics and code signatures
- Detects semantic changes (not just text diffs)

### Drift Detection
- **Snapshot-based approach**: Stores code and documentation state over time
- **Impact analysis**: Categorizes changes (breaking, major, minor, patch)
- **Affected documentation tracking**: Links code changes to specific docs
- **Automatic suggestions**: Generates update recommendations

### Drift Types Detected
- **Outdated**: Documentation references old API signatures
- **Incorrect**: Documented features no longer exist in code
- **Missing**: New code features lack documentation
- **Breaking**: API changes that invalidate existing docs

### Sync Modes
- \`detect\`: Analyze drift without making changes
- \`preview\`: Show proposed changes
- \`apply\`: Apply high-confidence changes automatically (threshold: 0.8)
- \`auto\`: Apply all changes (use with caution)

---
`);

  // Workflows
  sections.push(`## Common Workflows

### 1. New Documentation Site Setup
\`\`\`
1. analyze_repository (path: "./")
2. recommend_ssg (analysisId: from step 1)
3. generate_config (ssg: from step 2, outputPath: "./")
4. setup_structure (path: "./docs", ssg: from step 2)
5. populate_diataxis_content (analysisId: from step 1, docsPath: "./docs")
6. deploy_pages (repository: repo-url, ssg: from step 2)
\`\`\`

### 2. Documentation Synchronization (Phase 3)
\`\`\`
1. sync_code_to_docs (projectPath: "./", docsPath: "./docs", mode: "detect")
2. Review drift report and affected sections
3. sync_code_to_docs (mode: "apply", autoApplyThreshold: 0.8)
4. Manual review of remaining changes
\`\`\`

### 3. Content Generation from Code
\`\`\`
1. generate_contextual_content (filePath: "./src/api.ts", documentationType: "reference")
2. generate_contextual_content (filePath: "./src/api.ts", documentationType: "tutorial")
3. Review and integrate generated content
\`\`\`

### 4. Existing Documentation Improvement
\`\`\`
1. analyze_repository (path: "./")
2. update_existing_documentation (analysisId: from step 1, docsPath: "./docs")
3. validate_diataxis_content (contentPath: "./docs", analysisId: from step 1)
4. check_documentation_links (documentation_path: "./docs")
\`\`\`

### 5. README Enhancement
\`\`\`
1. analyze_readme (project_path: "./")
2. evaluate_readme_health (readme_path: "./README.md")
3. readme_best_practices (readme_path: "./README.md", generate_template: true)
4. optimize_readme (readme_path: "./README.md")
\`\`\`

---
`);

  // Quick Reference
  sections.push(`## Quick Reference Table

| Tool | Primary Use | Key Parameters | Output |
|------|-------------|----------------|--------|
${coreTools
  .map(
    (t) =>
      `| \`${t.name}\` | ${t.description.slice(0, 50)}... | ${getKeyParams(
        t,
      )} | Analysis/Config |`,
  )
  .join("\n")}

---

## Tips for LLMs

1. **Always start with \`analyze_repository\`** to get project context
2. **Use the knowledge graph**: Tools automatically store and retrieve relevant history
3. **Phase 3 tools need setup**: Ensure project has code structure before running sync
4. **Memory persists**: The system learns from every interaction
5. **Workflows are composable**: Chain tools together for complex operations
6. **Permission-aware**: All tools respect MCP root permissions

---

## Storage Locations to Reference

- **Memory**: \`.documcp/memory/\`
- **Snapshots**: \`.documcp/snapshots/\`
- **Knowledge Graph Entities**: \`.documcp/memory/knowledge-graph-entities.jsonl\`
- **Knowledge Graph Relationships**: \`.documcp/memory/knowledge-graph-relationships.jsonl\`
- **User Preferences**: Stored in knowledge graph with \`user_prefers_ssg\` edges

---

*This file is auto-generated. To regenerate, use the \`generate_llm_context\` tool.*
`);

  return sections.join("\n");
}

function formatToolSection(
  tool: any,
  includeExamples: boolean,
  format: "detailed" | "concise",
): string {
  const sections: string[] = [];

  sections.push(`### \`${tool.name}\``);
  sections.push(`**Description**: ${tool.description}`);

  if (format === "detailed" && tool.inputSchema) {
    sections.push("\n**Parameters**:");
    const schema = tool.inputSchema._def?.schema || tool.inputSchema;

    if (schema.shape) {
      for (const [key, value] of Object.entries(schema.shape)) {
        const field = value as any;
        const optional = field.isOptional() ? " (optional)" : " (required)";
        const description = field.description || "";
        const defaultVal = field._def.defaultValue
          ? ` [default: ${JSON.stringify(field._def.defaultValue())}]`
          : "";

        sections.push(`- \`${key}\`${optional}: ${description}${defaultVal}`);
      }
    }
  }

  if (includeExamples && format === "detailed") {
    const example = getToolExample(tool.name);
    if (example) {
      sections.push(`\n**Example**:\n\`\`\`typescript\n${example}\n\`\`\``);
    }
  }

  sections.push(""); // blank line
  return sections.join("\n");
}

function getKeyParams(tool: any): string {
  const schema = tool.inputSchema._def?.schema || tool.inputSchema;
  if (!schema.shape) return "N/A";

  const required = Object.entries(schema.shape)
    .filter(([_, value]) => !(value as any).isOptional())
    .map(([key]) => key)
    .slice(0, 3);

  return required.join(", ") || "N/A";
}

function getToolExample(toolName: string): string | null {
  const examples: Record<string, string> = {
    analyze_repository: `analyze_repository({
  path: "./",
  depth: "standard"
})`,
    recommend_ssg: `recommend_ssg({
  analysisId: "repo_abc123",
  userId: "default",
  preferences: {
    priority: "simplicity",
    ecosystem: "javascript"
  }
})`,
    sync_code_to_docs: `sync_code_to_docs({
  projectPath: "./",
  docsPath: "./docs",
  mode: "detect",
  createSnapshot: true
})`,
    generate_contextual_content: `generate_contextual_content({
  filePath: "./src/api.ts",
  documentationType: "reference",
  includeExamples: true,
  style: "detailed"
})`,
    deploy_pages: `deploy_pages({
  repository: "user/repo",
  ssg: "docusaurus",
  branch: "gh-pages",
  userId: "default"
})`,
  };

  return examples[toolName] || null;
}

```

--------------------------------------------------------------------------------
/tests/tools/manage-sitemap.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for manage-sitemap MCP tool
 */

import { promises as fs } from "fs";
import path from "path";
import { tmpdir } from "os";
import {
  manageSitemap,
  ManageSitemapInputSchema,
} from "../../src/tools/manage-sitemap.js";

/**
 * Helper to parse data from MCP tool response
 */
function parseMCPResponse(result: { content: any[] }): any {
  if (!result.content || !result.content[0]) {
    throw new Error("Invalid MCP response structure");
  }
  return JSON.parse(result.content[0].text);
}

describe("manage-sitemap tool", () => {
  let testDir: string;
  let docsDir: string;

  beforeEach(async () => {
    testDir = path.join(tmpdir(), `sitemap-tool-test-${Date.now()}`);
    docsDir = path.join(testDir, "docs");
    await fs.mkdir(docsDir, { recursive: true });
  });

  afterEach(async () => {
    try {
      await fs.rm(testDir, { recursive: true, force: true });
    } catch (error) {
      // Ignore cleanup errors
    }
  });

  describe("input validation", () => {
    it("should validate required fields", () => {
      expect(() => {
        ManageSitemapInputSchema.parse({});
      }).toThrow();
    });

    it("should validate action enum", () => {
      expect(() => {
        ManageSitemapInputSchema.parse({
          action: "invalid",
          docsPath: "/path",
        });
      }).toThrow();
    });

    it("should accept valid input", () => {
      const result = ManageSitemapInputSchema.parse({
        action: "generate",
        docsPath: "/path/to/docs",
        baseUrl: "https://example.com",
      });

      expect(result.action).toBe("generate");
      expect(result.docsPath).toBe("/path/to/docs");
      expect(result.baseUrl).toBe("https://example.com");
    });
  });

  describe("generate action", () => {
    it("should generate sitemap.xml", async () => {
      // Create test documentation
      await fs.writeFile(path.join(docsDir, "index.md"), "# Home");
      await fs.writeFile(path.join(docsDir, "guide.md"), "# Guide");

      const result = await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      expect(result.content).toBeDefined();
      expect(result.content[0].type).toBe("text");
      expect(result.content[0].text).toContain("✅");
      expect(result.content[0].text).toContain(
        "Sitemap generated successfully",
      );

      // Verify data is in the response
      const data = JSON.parse(result.content[0].text);
      expect(data.action).toBe("generate");
      expect(data.totalUrls).toBe(2);

      // Verify file was created
      const sitemapPath = path.join(docsDir, "sitemap.xml");
      const exists = await fs
        .access(sitemapPath)
        .then(() => true)
        .catch(() => false);
      expect(exists).toBe(true);
    });

    it("should require baseUrl for generate action", async () => {
      const result = await manageSitemap({
        action: "generate",
        docsPath: docsDir,
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error.code).toBe("BASE_URL_REQUIRED");
      expect(data.error.message).toContain("baseUrl is required");
    });

    it("should return error if docs directory does not exist", async () => {
      const result = await manageSitemap({
        action: "generate",
        docsPath: "/nonexistent/path",
        baseUrl: "https://example.com",
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error.code).toBe("DOCS_DIR_NOT_FOUND");
      expect(data.error.message).toContain("not found");
    });

    it("should include statistics in output", async () => {
      await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
      await fs.mkdir(path.join(docsDir, "reference"), { recursive: true });
      await fs.writeFile(
        path.join(docsDir, "tutorials", "guide.md"),
        "# Tutorial",
      );
      await fs.writeFile(path.join(docsDir, "reference", "api.md"), "# API");

      const result = await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const output = result.content[0].text;
      expect(output).toContain("URLs by Category");
      expect(output).toContain("Change Frequencies");
      expect(output).toContain("Next Steps");
    });
  });

  describe("validate action", () => {
    it("should validate existing sitemap", async () => {
      // Generate a sitemap first
      await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const result = await manageSitemap({
        action: "validate",
        docsPath: docsDir,
      });

      expect(result.content[0].text).toContain("✅");
      expect(result.content[0].text).toContain("Sitemap is valid");

      const data = parseMCPResponse(result);
      expect(data.valid).toBe(true);
    });

    it("should return error if sitemap does not exist", async () => {
      const result = await manageSitemap({
        action: "validate",
        docsPath: docsDir,
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error.code).toBe("SITEMAP_NOT_FOUND");
      expect(data.error.message).toContain("Sitemap not found");
    });

    it("should detect invalid sitemap", async () => {
      // Create invalid sitemap
      const sitemapPath = path.join(docsDir, "sitemap.xml");
      await fs.writeFile(
        sitemapPath,
        `<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
  <url>
    <loc>ftp://example.com/page.html</loc>
    <priority>5.0</priority>
  </url>
</urlset>`,
      );

      const result = await manageSitemap({
        action: "validate",
        docsPath: docsDir,
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error.code).toBe("VALIDATION_FAILED");
      expect(data.error.message).toContain("validation failed");
      expect(data.data.valid).toBe(false);
      expect(data.data.errorCount).toBeGreaterThan(0);
    });
  });

  describe("update action", () => {
    it("should update existing sitemap", async () => {
      // Create initial sitemap
      await fs.writeFile(path.join(docsDir, "page1.md"), "# Page 1");
      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      // Add new page
      await fs.writeFile(path.join(docsDir, "page2.md"), "# Page 2");

      const result = await manageSitemap({
        action: "update",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      expect(result.content[0].text).toContain("✅");
      expect(result.content[0].text).toContain("Sitemap updated successfully");

      const data = parseMCPResponse(result);
      expect(data.added).toBe(1);
      expect(data.total).toBe(2);
    });

    it("should require baseUrl for update action", async () => {
      const result = await manageSitemap({
        action: "update",
        docsPath: docsDir,
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error.code).toBe("BASE_URL_REQUIRED");
      expect(data.error.message).toContain("baseUrl is required");
    });

    it("should show removed pages", async () => {
      // Create sitemap with 2 pages
      await fs.writeFile(path.join(docsDir, "page1.md"), "# Page 1");
      await fs.writeFile(path.join(docsDir, "page2.md"), "# Page 2");
      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      // Remove one page
      await fs.rm(path.join(docsDir, "page2.md"));

      const result = await manageSitemap({
        action: "update",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const data = parseMCPResponse(result);
      expect(data.removed).toBe(1);
      expect(data.total).toBe(1);
    });

    it("should detect no changes", async () => {
      await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const result = await manageSitemap({
        action: "update",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      expect(result.content[0].text).toContain("No changes detected");

      const data = parseMCPResponse(result);
      expect(data.added).toBe(0);
      expect(data.removed).toBe(0);
    });
  });

  describe("list action", () => {
    it("should list all URLs from sitemap", async () => {
      await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
      await fs.writeFile(
        path.join(docsDir, "tutorials", "guide.md"),
        "# Tutorial Guide",
      );
      await fs.writeFile(path.join(docsDir, "index.md"), "# Home");

      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const result = await manageSitemap({
        action: "list",
        docsPath: docsDir,
      });

      expect(result.content[0].text).toContain("Sitemap URLs");
      expect(result.content[0].text).toContain("Total: 2");

      const data = parseMCPResponse(result);
      expect(data.totalUrls).toBe(2);
      expect(data.urls).toHaveLength(2);
    });

    it("should group URLs by category", async () => {
      await fs.mkdir(path.join(docsDir, "tutorials"), { recursive: true });
      await fs.mkdir(path.join(docsDir, "reference"), { recursive: true });
      await fs.writeFile(
        path.join(docsDir, "tutorials", "guide.md"),
        "# Tutorial",
      );
      await fs.writeFile(path.join(docsDir, "reference", "api.md"), "# API");

      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const result = await manageSitemap({
        action: "list",
        docsPath: docsDir,
      });

      const output = result.content[0].text;
      expect(output).toContain("tutorial");
      expect(output).toContain("reference");
    });

    it("should return error if sitemap does not exist", async () => {
      const result = await manageSitemap({
        action: "list",
        docsPath: docsDir,
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error.code).toBe("SITEMAP_NOT_FOUND");
      expect(data.error.message).toContain("Sitemap not found");
    });
  });

  describe("custom sitemap path", () => {
    it("should use custom sitemap path", async () => {
      const customPath = path.join(testDir, "custom-sitemap.xml");
      await fs.writeFile(path.join(docsDir, "page.md"), "# Page");

      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
        sitemapPath: customPath,
      });

      const exists = await fs
        .access(customPath)
        .then(() => true)
        .catch(() => false);
      expect(exists).toBe(true);
    });
  });

  describe("include and exclude patterns", () => {
    it("should respect include patterns", async () => {
      await fs.writeFile(path.join(docsDir, "page.md"), "# Markdown");
      await fs.writeFile(path.join(docsDir, "page.html"), "<h1>HTML</h1>");
      await fs.writeFile(path.join(docsDir, "data.json"), "{}");

      const result = await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
        includePatterns: ["**/*.md"],
      });

      const data = parseMCPResponse(result);
      expect(data.totalUrls).toBe(1);
    });

    it("should respect exclude patterns", async () => {
      await fs.mkdir(path.join(docsDir, "drafts"), { recursive: true });
      await fs.writeFile(path.join(docsDir, "page.md"), "# Page");
      await fs.writeFile(path.join(docsDir, "drafts", "draft.md"), "# Draft");

      const result = await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
        excludePatterns: ["**/drafts/**"],
      });

      const data = parseMCPResponse(result);
      expect(data.totalUrls).toBe(1);
    });
  });

  describe("change frequency", () => {
    it("should use custom update frequency", async () => {
      await fs.writeFile(path.join(docsDir, "page.md"), "# Page");

      await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
        updateFrequency: "daily",
      });

      const sitemapPath = path.join(docsDir, "sitemap.xml");
      const xml = await fs.readFile(sitemapPath, "utf-8");

      // Should contain daily for pages without specific category
      expect(xml).toContain("<changefreq>");
    });
  });

  describe("error handling", () => {
    it("should handle invalid action gracefully", async () => {
      const result = await manageSitemap({
        action: "generate" as any,
        docsPath: "/invalid/path",
        baseUrl: "https://example.com",
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error).toBeDefined();
    });

    it("should handle file system errors", async () => {
      // Try to write to read-only location (will fail on most systems)
      const readOnlyPath = "/root/docs";

      const result = await manageSitemap({
        action: "generate",
        docsPath: readOnlyPath,
        baseUrl: "https://example.com",
      });

      const data = parseMCPResponse(result);
      expect(data.success).toBe(false);
      expect(data.error).toBeDefined();
    });
  });

  describe("integration with other tools", () => {
    it("should work with Diataxis structure", async () => {
      // Create Diataxis structure
      const categories = ["tutorials", "how-to", "reference", "explanation"];
      for (const category of categories) {
        await fs.mkdir(path.join(docsDir, category), { recursive: true });
        await fs.writeFile(
          path.join(docsDir, category, "index.md"),
          `# ${category}`,
        );
      }

      const result = await manageSitemap({
        action: "generate",
        docsPath: docsDir,
        baseUrl: "https://example.com",
      });

      const data = parseMCPResponse(result);
      expect(data.totalUrls).toBe(4);
      expect(data.categories).toHaveProperty("tutorial");
      expect(data.categories).toHaveProperty("how-to");
      expect(data.categories).toHaveProperty("reference");
      expect(data.categories).toHaveProperty("explanation");
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/integration/readme-technical-writer.test.ts:
--------------------------------------------------------------------------------

```typescript
import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import { promises as fs } from "fs";
import { join } from "path";
import { analyzeReadme } from "../../src/tools/analyze-readme.js";
import { optimizeReadme } from "../../src/tools/optimize-readme.js";
import { tmpdir } from "os";

describe("README Technical Writer Integration Tests", () => {
  let testDir: string;
  let readmePath: string;

  beforeEach(async () => {
    // Create temporary test directory
    testDir = join(tmpdir(), `test-readme-integration-${Date.now()}`);
    await fs.mkdir(testDir, { recursive: true });
    readmePath = join(testDir, "README.md");
  });

  afterEach(async () => {
    // Cleanup test directory
    try {
      await fs.rm(testDir, { recursive: true, force: true });
    } catch {
      // Ignore cleanup errors
    }
  });

  describe("Real-world README analysis and optimization workflow", () => {
    it("should analyze and optimize a typical open source project README", async () => {
      // Create a realistic README that needs optimization
      const originalReadme = `# MyAwesome Library

MyAwesome Library is a comprehensive JavaScript library that provides a wide range of utilities and functions for modern web development. It has been carefully designed to address common challenges that developers face when building complex applications, and it incorporates industry best practices to ensure optimal performance, maintainability, and ease of use.

## Table of Contents

- [Installation](#installation)
- [Usage](#usage)
- [API Documentation](#api-documentation)
- [Contributing](#contributing)
- [License](#license)

## Installation

Installing MyAwesome Library is straightforward and can be accomplished through several different methods depending on your project setup and preferences.

### Using npm

If you're using npm as your package manager, you can install MyAwesome Library by running the following command in your terminal:

\`\`\`bash
npm install myawesome-library
\`\`\`

### Using yarn

Alternatively, if you prefer to use yarn as your package manager, you can install the library with:

\`\`\`bash
yarn add myawesome-library
\`\`\`

### Using CDN

For quick prototyping or if you prefer not to use a package manager, you can include MyAwesome Library directly from a CDN:

\`\`\`html
<script src="https://cdn.jsdelivr.net/npm/myawesome-library@latest/dist/myawesome.min.js"></script>
\`\`\`

## Usage

MyAwesome Library provides a simple and intuitive API that makes it easy to get started with your projects. Here are some basic usage examples to help you understand how to integrate the library into your applications.

### Basic Example

\`\`\`javascript
import { MyAwesome } from 'myawesome-library';

const awesome = new MyAwesome();
awesome.doSomething();
\`\`\`

### Advanced Configuration

For more advanced use cases, you can configure the library with various options:

\`\`\`javascript
import { MyAwesome } from 'myawesome-library';

const awesome = new MyAwesome({
  apiKey: 'your-api-key',
  environment: 'production',
  debug: false,
  timeout: 5000
});
\`\`\`

## API Documentation

This section provides comprehensive documentation for all the methods and properties available in MyAwesome Library.

### Core Methods

#### \`doSomething(options?)\`

Performs the primary functionality of the library.

**Parameters:**
- \`options\` (Object, optional): Configuration options
  - \`param1\` (String): Description of parameter 1
  - \`param2\` (Number): Description of parameter 2
  - \`param3\` (Boolean): Description of parameter 3

**Returns:** Promise<Result>

**Example:**
\`\`\`javascript
const result = await awesome.doSomething({
  param1: 'value',
  param2: 42,
  param3: true
});
\`\`\`

#### \`configure(config)\`

Updates the configuration of the library instance.

**Parameters:**
- \`config\` (Object): New configuration object

**Returns:** void

### Utility Methods

#### \`validate(data)\`

Validates input data according to library specifications.

**Parameters:**
- \`data\` (Any): Data to validate

**Returns:** Boolean

#### \`transform(input, options)\`

Transforms input data using specified options.

**Parameters:**
- \`input\` (Any): Input data to transform
- \`options\` (Object): Transformation options

**Returns:** Any

## Contributing

We welcome contributions from the community! MyAwesome Library is an open source project, and we appreciate any help in making it better.

### Development Setup

To set up the development environment:

1. Fork the repository
2. Clone your fork: \`git clone https://github.com/yourusername/myawesome-library.git\`
3. Install dependencies: \`npm install\`
4. Run tests: \`npm test\`
5. Start development server: \`npm run dev\`

### Coding Standards

Please ensure your code follows our coding standards:

- Use TypeScript for all new code
- Follow ESLint configuration
- Write tests for new features
- Update documentation as needed
- Use conventional commit messages

### Pull Request Process

1. Create a feature branch from main
2. Make your changes
3. Add tests for new functionality
4. Ensure all tests pass
5. Update documentation
6. Submit a pull request

## License

This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.

## Support

If you encounter any issues or have questions about MyAwesome Library, please:

1. Check the [documentation](https://myawesome-library.dev/docs)
2. Search existing [issues](https://github.com/user/myawesome-library/issues)
3. Create a new issue if needed
4. Join our [Discord community](https://discord.gg/myawesome)

## Changelog

See [CHANGELOG.md](CHANGELOG.md) for a list of changes and version history.

## Acknowledgments

- Thanks to all contributors who have helped make this project possible
- Special thanks to the open source community for inspiration and support
- Built with love using TypeScript, Jest, and other amazing tools`;

      await fs.writeFile(readmePath, originalReadme);

      // Step 1: Analyze the README
      console.log("🔍 Analyzing README...");
      const analysisResult = await analyzeReadme({
        project_path: testDir,
        target_audience: "developers",
        optimization_level: "moderate",
      });

      expect(analysisResult.success).toBe(true);
      expect(analysisResult.data?.analysis.overallScore).toBeDefined();
      expect(
        analysisResult.data?.analysis.lengthAnalysis.currentWords,
      ).toBeGreaterThan(500);
      expect(
        analysisResult.data?.analysis.optimizationOpportunities.length,
      ).toBeGreaterThan(0);

      console.log(
        `📊 Analysis Score: ${analysisResult.data?.analysis.overallScore}/100`,
      );
      console.log(
        `📝 Word Count: ${analysisResult.data?.analysis.lengthAnalysis.currentWords}`,
      );
      console.log(
        `💡 Optimization Opportunities: ${analysisResult.data?.analysis.optimizationOpportunities.length}`,
      );

      // Step 2: Optimize the README
      console.log("\n🛠️  Optimizing README...");
      const optimizationResult = await optimizeReadme({
        readme_path: readmePath,
        strategy: "developer_focused",
        max_length: 300,
        include_tldr: true,
        create_docs_directory: true,
        output_path: readmePath,
      });

      expect(optimizationResult.success).toBe(true);
      expect(optimizationResult.data?.optimization.optimizedContent).toContain(
        "## TL;DR",
      );
      expect(
        optimizationResult.data?.optimization.originalLength,
      ).toBeGreaterThan(0);
      // Note: Optimization may not always reduce length due to TL;DR addition
      expect(
        optimizationResult.data?.optimization.optimizedLength,
      ).toBeGreaterThan(0);

      console.log(
        `📉 Length Reduction: ${optimizationResult.data?.optimization.reductionPercentage}%`,
      );
      console.log(
        `🔄 Restructuring Changes: ${optimizationResult.data?.optimization.restructuringChanges.length}`,
      );
      console.log(
        `📁 Extracted Sections: ${optimizationResult.data?.optimization.extractedSections.length}`,
      );

      // Step 3: Verify the optimized README is better
      const optimizedContent = await fs.readFile(readmePath, "utf-8");
      expect(optimizedContent).toContain("## TL;DR");
      // Note: Length may increase due to TL;DR addition, but structure improves
      expect(optimizedContent.length).toBeGreaterThan(0);

      // Step 4: Re-analyze to confirm improvement
      console.log("\n🔍 Re-analyzing optimized README...");
      const reanalysisResult = await analyzeReadme({
        project_path: testDir,
        target_audience: "developers",
      });

      expect(reanalysisResult.success).toBe(true);
      console.log(
        `📊 New Analysis Score: ${reanalysisResult.data?.analysis.overallScore}/100`,
      );

      // The optimized version should have fewer optimization opportunities
      const originalOpportunities =
        analysisResult.data?.analysis.optimizationOpportunities.length ?? 0;
      const newOpportunities =
        reanalysisResult.data?.analysis.optimizationOpportunities.length ?? 0;
      expect(newOpportunities).toBeLessThanOrEqual(originalOpportunities);
    });

    it("should handle enterprise-focused optimization strategy", async () => {
      const enterpriseReadme = `# Enterprise Solution

Our enterprise solution provides comprehensive business capabilities.

## Features

- Feature 1
- Feature 2
- Feature 3

## Installation

Standard installation process.

## Usage

Basic usage instructions.

## Support

Contact our support team.`;

      await fs.writeFile(readmePath, enterpriseReadme);

      const result = await optimizeReadme({
        readme_path: readmePath,
        strategy: "enterprise_focused",
        max_length: 200,
      });

      expect(result.success).toBe(true);
      expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");

      // Enterprise strategy should provide relevant optimization
      expect(result.data?.optimization.recommendations.length).toBeGreaterThan(
        0,
      );
      expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");
    });

    it("should handle community-focused optimization strategy", async () => {
      const communityReadme = `# Open Source Project

A project for the community.

## Installation

npm install project

## Usage

Basic usage.

## License

MIT License`;

      await fs.writeFile(readmePath, communityReadme);

      const result = await optimizeReadme({
        readme_path: readmePath,
        strategy: "community_focused",
        max_length: 150,
      });

      expect(result.success).toBe(true);
      expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");

      // Community strategy should focus on contribution and collaboration
      const optimizedContent = result.data?.optimization.optimizedContent || "";
      expect(optimizedContent.toLowerCase()).toMatch(
        /contribut|collaborat|communit/,
      );
    });
  });

  describe("Error handling and edge cases", () => {
    it("should handle README with no headings", async () => {
      const noHeadingsReadme = `This is a README without any headings. It just contains plain text describing the project. There are no sections or structure to work with.`;

      await fs.writeFile(readmePath, noHeadingsReadme);

      const analysisResult = await analyzeReadme({
        project_path: testDir,
      });

      expect(analysisResult.success).toBe(true);
      expect(
        analysisResult.data?.analysis.structureAnalysis.scannabilityScore,
      ).toBeLessThan(50);
      expect(
        analysisResult.data?.analysis.optimizationOpportunities.length,
      ).toBeGreaterThan(0);

      const optimizationResult = await optimizeReadme({
        readme_path: readmePath,
        strategy: "general",
      });

      expect(optimizationResult.success).toBe(true);
      expect(optimizationResult.data?.optimization.optimizedContent).toContain(
        "## TL;DR",
      );
    });

    it("should handle very short README", async () => {
      const shortReadme = `# Project\n\nShort description.`;

      await fs.writeFile(readmePath, shortReadme);

      const analysisResult = await analyzeReadme({
        project_path: testDir,
        max_length_target: 100,
      });

      expect(analysisResult.success).toBe(true);
      expect(analysisResult.data?.analysis.lengthAnalysis.exceedsTarget).toBe(
        false,
      );

      const optimizationResult = await optimizeReadme({
        readme_path: readmePath,
        max_length: 100,
      });

      expect(optimizationResult.success).toBe(true);
      // Should still add TL;DR even for short READMEs
      expect(optimizationResult.data?.optimization.optimizedContent).toContain(
        "## TL;DR",
      );
    });

    it("should handle README with existing TL;DR", async () => {
      const readmeWithTldr = `# Project

## TL;DR

This project does X for Y users.

## Installation

npm install project

## Usage

Use it like this.`;

      await fs.writeFile(readmePath, readmeWithTldr);

      const result = await optimizeReadme({
        readme_path: readmePath,
        preserve_existing: true,
      });

      expect(result.success).toBe(true);
      // The tool may still generate a TL;DR even with existing one for optimization
      expect(result.data?.optimization.optimizedContent).toContain(
        "This project does X for Y users",
      );
    });
  });

  describe("Performance and scalability", () => {
    it("should handle large README files efficiently", async () => {
      // Create a large README with many sections
      const largeSections = Array.from({ length: 50 }, (_, i) =>
        `## Section ${i + 1}\n\nThis is section ${
          i + 1
        } with some content. `.repeat(20),
      ).join("\n\n");

      const largeReadme = `# Large Project\n\n${largeSections}`;

      await fs.writeFile(readmePath, largeReadme);

      const startTime = Date.now();

      const analysisResult = await analyzeReadme({
        project_path: testDir,
        max_length_target: 500,
      });

      const analysisTime = Date.now() - startTime;

      expect(analysisResult.success).toBe(true);
      expect(analysisTime).toBeLessThan(5000); // Should complete within 5 seconds
      expect(analysisResult.data?.analysis.lengthAnalysis.exceedsTarget).toBe(
        true,
      );

      const optimizationStartTime = Date.now();

      const optimizationResult = await optimizeReadme({
        readme_path: readmePath,
        max_length: 500,
        create_docs_directory: true,
      });

      const optimizationTime = Date.now() - optimizationStartTime;

      expect(optimizationResult.success).toBe(true);
      expect(optimizationTime).toBeLessThan(5000); // Should complete within 5 seconds
      expect(
        optimizationResult.data?.optimization.extractedSections.length,
      ).toBeGreaterThan(0);
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/tools/validate-documentation-freshness.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Integration Tests for validate_documentation_freshness Tool
 */

import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
import fs from "fs/promises";
import path from "path";
import os from "os";
import { simpleGit } from "simple-git";
import {
  validateDocumentationFreshness,
  type ValidateDocumentationFreshnessInput,
} from "../../src/tools/validate-documentation-freshness.js";
import { parseDocFrontmatter } from "../../src/utils/freshness-tracker.js";

describe("validate_documentation_freshness Tool", () => {
  let tempDir: string;
  let docsDir: string;
  let projectDir: string;

  beforeEach(async () => {
    tempDir = await fs.mkdtemp(
      path.join(os.tmpdir(), "validate-freshness-test-"),
    );
    docsDir = path.join(tempDir, "docs");
    projectDir = tempDir;
    await fs.mkdir(docsDir);
  });

  afterEach(async () => {
    await fs.rm(tempDir, { recursive: true, force: true });
  });

  describe("Initialization", () => {
    it("should initialize metadata for files without it", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test Document");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.initialized).toBe(1);

      const frontmatter = await parseDocFrontmatter(
        path.join(docsDir, "test.md"),
      );
      expect(frontmatter.documcp?.last_updated).toBeDefined();
      expect(frontmatter.documcp?.last_validated).toBeDefined();
    });

    it("should skip files that already have metadata", async () => {
      await fs.writeFile(
        path.join(docsDir, "existing.md"),
        `---
documcp:
  last_updated: "2025-01-01T00:00:00Z"
---
# Existing`,
      );

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.initialized).toBe(0);
      expect(result.data.report.skipped).toBe(1);
    });

    it("should set default update frequency", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
        updateFrequency: "weekly",
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);

      const frontmatter = await parseDocFrontmatter(
        path.join(docsDir, "test.md"),
      );
      expect(frontmatter.documcp?.update_frequency).toBe("weekly");
    });
  });

  describe("Updating Existing Metadata", () => {
    it("should update last_validated for existing files when requested", async () => {
      await fs.writeFile(
        path.join(docsDir, "existing.md"),
        `---
documcp:
  last_updated: "2025-01-01T00:00:00Z"
---
# Existing`,
      );

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        updateExisting: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.updated).toBe(1);

      const frontmatter = await parseDocFrontmatter(
        path.join(docsDir, "existing.md"),
      );
      expect(frontmatter.documcp?.last_validated).toBeDefined();
      expect(
        new Date(frontmatter.documcp?.last_validated!).getTime(),
      ).toBeGreaterThan(new Date("2025-01-01").getTime());
    });

    it("should not update existing files when updateExisting is false", async () => {
      const originalDate = "2025-01-01T00:00:00Z";
      await fs.writeFile(
        path.join(docsDir, "existing.md"),
        `---
documcp:
  last_updated: "${originalDate}"
  last_validated: "${originalDate}"
---
# Existing`,
      );

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        updateExisting: false,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.updated).toBe(0);

      const frontmatter = await parseDocFrontmatter(
        path.join(docsDir, "existing.md"),
      );
      expect(frontmatter.documcp?.last_validated).toBe(originalDate);
    });
  });

  describe("Git Integration", () => {
    it("should add git commit hash when git is available", async () => {
      // Initialize git repo
      const git = simpleGit(projectDir);
      await git.init();
      await git.addConfig("user.name", "Test User");
      await git.addConfig("user.email", "[email protected]");
      await fs.writeFile(path.join(projectDir, "README.md"), "# Test Repo");
      await git.add(".");
      await git.commit("Initial commit");

      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
        validateAgainstGit: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.currentCommit).toBeDefined();

      const frontmatter = await parseDocFrontmatter(
        path.join(docsDir, "test.md"),
      );
      expect(frontmatter.documcp?.validated_against_commit).toBeDefined();
      expect(frontmatter.documcp?.validated_against_commit).toBe(
        result.data.report.currentCommit,
      );
    });

    it("should work without git when validateAgainstGit is false", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
        validateAgainstGit: false,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.currentCommit).toBeUndefined();
    });

    it("should handle non-git directories gracefully", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
        validateAgainstGit: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.currentCommit).toBeUndefined();
    });
  });

  describe("Batch Operations", () => {
    it("should process multiple files", async () => {
      await fs.writeFile(path.join(docsDir, "file1.md"), "# File 1");
      await fs.writeFile(path.join(docsDir, "file2.md"), "# File 2");
      await fs.writeFile(path.join(docsDir, "file3.md"), "# File 3");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.totalFiles).toBe(3);
      expect(result.data.report.initialized).toBe(3);
    });

    it("should handle nested directories", async () => {
      await fs.mkdir(path.join(docsDir, "api"));
      await fs.mkdir(path.join(docsDir, "guides"));

      await fs.writeFile(path.join(docsDir, "index.md"), "# Index");
      await fs.writeFile(path.join(docsDir, "api", "endpoints.md"), "# API");
      await fs.writeFile(
        path.join(docsDir, "guides", "tutorial.md"),
        "# Guide",
      );

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.totalFiles).toBe(3);
    });

    it("should provide individual file results", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.data.report.files).toBeDefined();
      expect(result.data.report.files.length).toBe(1);
      expect(result.data.report.files[0].action).toBe("initialized");
    });
  });

  describe("Error Handling", () => {
    it("should handle non-existent docs directory", async () => {
      const input: ValidateDocumentationFreshnessInput = {
        docsPath: "/nonexistent/docs",
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(false);
      expect(result.error).toBeDefined();
      expect(result.error?.code).toBe("FRESHNESS_VALIDATION_FAILED");
    });

    it("should track file-level errors", async () => {
      // Create a file that will cause issues
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      // Make it read-only to cause write errors (skip on Windows)
      if (process.platform !== "win32") {
        await fs.chmod(path.join(docsDir, "test.md"), 0o444);

        const input: ValidateDocumentationFreshnessInput = {
          docsPath: docsDir,
          projectPath: projectDir,
          initializeMissing: true,
        };

        const result = await validateDocumentationFreshness(input);

        // Restore permissions for cleanup
        await fs.chmod(path.join(docsDir, "test.md"), 0o644);

        expect(result.data.report.errors).toBeGreaterThan(0);
      }
    });

    it("should handle empty docs directory", async () => {
      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.totalFiles).toBe(0);
    });
  });

  describe("Output Format", () => {
    it("should include formatted report", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.data.formattedReport).toBeDefined();
      expect(result.data.formattedReport).toContain(
        "Documentation Freshness Validation Report",
      );
      expect(result.data.formattedReport).toContain("Summary");
      expect(result.data.formattedReport).toContain("Actions Performed");
    });

    it("should include summary", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.data.summary).toBeDefined();
      expect(result.data.summary).toContain("Validated");
      expect(result.data.summary).toContain("initialized");
    });

    it("should include metadata", async () => {
      await fs.writeFile(path.join(docsDir, "test.md"), "# Test");

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.metadata).toBeDefined();
      expect(result.metadata.toolVersion).toBe("1.0.0");
      expect(result.metadata.timestamp).toBeDefined();
      expect(result.metadata.executionTime).toBeGreaterThanOrEqual(0);
    });
  });

  describe("Update Frequency Presets", () => {
    const frequencies: Array<
      "realtime" | "active" | "recent" | "weekly" | "monthly" | "quarterly"
    > = ["realtime", "active", "recent", "weekly", "monthly", "quarterly"];

    frequencies.forEach((frequency) => {
      it(`should work with ${frequency} update frequency`, async () => {
        await fs.writeFile(
          path.join(docsDir, `test-${frequency}.md`),
          "# Test",
        );

        const input: ValidateDocumentationFreshnessInput = {
          docsPath: docsDir,
          projectPath: projectDir,
          initializeMissing: true,
          updateFrequency: frequency,
        };

        const result = await validateDocumentationFreshness(input);

        expect(result.success).toBe(true);

        const frontmatter = await parseDocFrontmatter(
          path.join(docsDir, `test-${frequency}.md`),
        );
        expect(frontmatter.documcp?.update_frequency).toBe(frequency);
      });
    });
  });

  describe("Mixed File States", () => {
    it("should handle mix of initialized, updated, and skipped files", async () => {
      // File without metadata (will be initialized)
      await fs.writeFile(path.join(docsDir, "new.md"), "# New");

      // File with metadata (will be skipped if updateExisting=false)
      await fs.writeFile(
        path.join(docsDir, "existing.md"),
        `---
documcp:
  last_updated: "2025-01-01T00:00:00Z"
---
# Existing`,
      );

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
        updateExisting: false,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.initialized).toBe(1);
      expect(result.data.report.skipped).toBe(1);
      expect(result.data.report.updated).toBe(0);
    });

    it("should update all when both initializeMissing and updateExisting are true", async () => {
      await fs.writeFile(path.join(docsDir, "new.md"), "# New");
      await fs.writeFile(
        path.join(docsDir, "existing.md"),
        `---
documcp:
  last_updated: "2025-01-01T00:00:00Z"
---
# Existing`,
      );

      const input: ValidateDocumentationFreshnessInput = {
        docsPath: docsDir,
        projectPath: projectDir,
        initializeMissing: true,
        updateExisting: true,
      };

      const result = await validateDocumentationFreshness(input);

      expect(result.success).toBe(true);
      expect(result.data.report.initialized).toBe(1);
      expect(result.data.report.updated).toBe(1);
    });
  });
});

```

--------------------------------------------------------------------------------
/docs/knowledge-graph.md:
--------------------------------------------------------------------------------

```markdown
---
documcp:
  last_updated: "2025-11-20T00:46:21.958Z"
  last_validated: "2025-11-20T00:46:21.958Z"
  auto_updated: false
  update_frequency: monthly
---

# Knowledge Graph Documentation

## Overview

The DocuMCP Knowledge Graph is an intelligent semantic network that captures relationships between projects, technologies, deployments, user preferences, and documentation patterns. It enables smart recommendations, deployment tracking, preference learning, and context-aware documentation generation.

## Architecture

### Core Components

- **Graph Database**: In-memory graph with persistent storage
- **Node Types**: Projects, technologies, configurations, deployments, users
- **Edge Types**: Relationships, dependencies, recommendations, usage patterns
- **Intelligence Layer**: Pattern recognition, recommendation engine, drift detection

### Node Types

#### Project Nodes

```typescript
interface ProjectNode {
  id: string;
  type: "project";
  properties: {
    name: string;
    path: string;
    primaryLanguage: string;
    framework?: string;
    lastAnalyzed: string;
    structure: {
      totalFiles: number;
      languages: Record<string, number>;
      hasTests: boolean;
      hasCI: boolean;
      hasDocs: boolean;
    };
  };
}
```

#### Technology Nodes

```typescript
interface TechnologyNode {
  id: string;
  type: "technology";
  properties: {
    name: string;
    category: "language" | "framework" | "tool" | "platform";
    version?: string;
    ecosystem: string;
    popularity: number;
    stability: number;
  };
}
```

#### Configuration Nodes

```typescript
interface ConfigurationNode {
  id: string;
  type: "configuration";
  properties: {
    ssg: string;
    settings: Record<string, any>;
    optimizations: string[];
    lastUsed: string;
    successRate: number;
  };
}
```

#### User Nodes

```typescript
interface UserNode {
  id: string;
  type: "user";
  properties: {
    userId: string;
    preferences: {
      preferredSSGs: string[];
      expertise: "beginner" | "intermediate" | "advanced";
      technologies: string[];
    };
    activity: {
      totalDeployments: number;
      successfulDeployments: number;
      lastActive: string;
    };
  };
}
```

### Edge Types

#### Project Relationships

- `depends_on`: Project dependencies and technology usage
- `similar_to`: Projects with similar characteristics
- `derived_from`: Project templates and forks

#### Deployment Tracking

- `deployed_with`: Project deployed using specific SSG/configuration
- `succeeded_at`: Successful deployment timestamp and metrics
- `failed_at`: Failed deployment with error analysis

#### User Patterns

- `prefers`: User SSG and technology preferences
- `succeeded_with`: User's successful deployment patterns
- `learned_from`: Preference updates based on experience

#### Recommendation Flows

- `recommends`: SSG recommendations with confidence scores
- `optimizes_for`: Configuration optimizations for specific scenarios
- `suggests`: Next-step suggestions based on current state

## Knowledge Graph Integration

### Initialization

```typescript
import { initializeKnowledgeGraph, getKnowledgeGraph } from "./kg-integration";

// Initialize with storage directory
await initializeKnowledgeGraph("/path/to/storage");

// Get graph instance
const kg = await getKnowledgeGraph();
```

### Project Management

#### Creating Projects

```typescript
import { createOrUpdateProject } from "./kg-integration";

const project = await createOrUpdateProject({
  id: "my-project-123",
  timestamp: new Date().toISOString(),
  path: "/path/to/project",
  projectName: "My Documentation Site",
  structure: {
    totalFiles: 150,
    languages: {
      typescript: 80,
      javascript: 45,
      markdown: 25,
    },
    hasTests: true,
    hasCI: true,
    hasDocs: true,
  },
});
```

#### Querying Projects

```typescript
// Find project by ID
const project = await kg.findNode({
  type: "project",
  properties: { id: "my-project-123" },
});

// Find similar projects
const similarProjects = await kg.findNodes({
  type: "project",
  properties: {
    "structure.primaryLanguage": "typescript",
  },
});
```

### Deployment Tracking

#### Recording Deployments

```typescript
import { trackDeployment } from "./kg-integration";

// Successful deployment
await trackDeployment("project-123", "docusaurus", true, {
  buildTime: 45000,
  branch: "main",
  customDomain: "docs.example.com",
});

// Failed deployment
await trackDeployment("project-123", "hugo", false, {
  errorMessage: "Build failed: missing dependencies",
  failureStage: "build",
  buildTime: 15000,
});
```

#### Querying Deployment History

```typescript
// Get all deployments for a project
const deployments = await kg.findEdges({
  source: "project:my-project-123",
  type: "deployed_with",
});

// Get successful deployments only
const successfulDeployments = deployments.filter(
  (edge) => edge.properties.success === true,
);
```

### Recommendation Engine

#### SSG Recommendations

```typescript
import { getDeploymentRecommendations } from "./kg-integration";

const recommendations = await getDeploymentRecommendations("project-123");

// Returns sorted by confidence
recommendations.forEach((rec) => {
  console.log(`${rec.ssg}: ${rec.confidence}% confidence`);
  console.log(`Reason: ${rec.reason}`);
});
```

#### Technology Compatibility

```typescript
// Find compatible technologies
const compatibleSSGs = await kg.findEdges({
  source: "technology:react",
  type: "compatible_with",
});

const recommendations = compatibleSSGs
  .filter((edge) => edge.target.startsWith("ssg:"))
  .sort((a, b) => b.confidence - a.confidence);
```

### User Preference Learning

#### Preference Management

```typescript
import { getUserPreferenceManager } from "./user-preferences";

const manager = await getUserPreferenceManager("user-123");

// Track SSG usage
await manager.trackSSGUsage({
  ssg: "docusaurus",
  success: true,
  timestamp: new Date().toISOString(),
  projectType: "javascript-library",
});

// Get personalized recommendations
const personalizedRecs = await manager.getSSGRecommendations();
```

#### Learning Patterns

```typescript
// Update preferences based on deployment success
await manager.updatePreferences({
  preferredSSGs: ["docusaurus", "hugo"],
  expertise: "intermediate",
  technologies: ["react", "typescript", "node"],
});

// Get usage statistics
const stats = await manager.getUsageStatistics();
console.log(`Total deployments: ${stats.totalDeployments}`);
console.log(`Success rate: ${stats.successRate}%`);
```

## Code Integration (Phase 1.2)

### Code File Entities

```typescript
import { createCodeFileEntities } from "./kg-code-integration";

// Create code file nodes with AST analysis
const codeFiles = await createCodeFileEntities(
  "project-123",
  "/path/to/repository",
);

// Each code file includes:
// - Functions and classes (via AST parsing)
// - Dependencies and imports
// - Complexity metrics
// - Change detection (content hash)
```

### Documentation Linking

```typescript
import {
  createDocumentationEntities,
  linkCodeToDocs,
} from "./kg-code-integration";

// Create documentation section nodes
const docSections = await createDocumentationEntities(
  "project-123",
  extractedContent,
);

// Link code files to documentation
const relationships = await linkCodeToDocs(codeFiles, docSections);

// Detect outdated documentation
const outdatedLinks = relationships.filter(
  (edge) => edge.type === "outdated_for",
);
```

## Query Patterns

### Basic Queries

#### Node Queries

```typescript
// Find all projects using React
const reactProjects = await kg.findNodes({
  type: "project",
  properties: {
    "structure.technologies": { contains: "react" },
  },
});

// Find high-success configurations
const reliableConfigs = await kg.findNodes({
  type: "configuration",
  properties: {
    successRate: { gte: 0.9 },
  },
});
```

#### Edge Queries

```typescript
// Find all deployment relationships
const deployments = await kg.findEdges({
  type: "deployed_with",
});

// Find user preferences
const userPrefs = await kg.findEdges({
  source: "user:developer-123",
  type: "prefers",
});
```

### Complex Queries

#### Multi-hop Traversal

```typescript
// Find recommended SSGs for similar projects
const recommendations = await kg.query(`
  MATCH (p1:project {id: 'my-project'})
  MATCH (p2:project)-[:similar_to]-(p1)
  MATCH (p2)-[:deployed_with]->(config:configuration)
  WHERE config.successRate > 0.8
  RETURN config.ssg, AVG(config.successRate) as avgSuccess
  ORDER BY avgSuccess DESC
`);
```

#### Aggregation Queries

```typescript
// Get deployment statistics by SSG
const ssgStats = await kg.aggregate({
  groupBy: "ssg",
  metrics: ["successRate", "buildTime", "userSatisfaction"],
  filters: {
    timestamp: { gte: "2024-01-01" },
  },
});
```

### Pattern Detection

#### Success Patterns

```typescript
// Identify high-success patterns
const successPatterns = await kg.findPatterns({
  nodeType: "project",
  edgeType: "deployed_with",
  threshold: 0.9,
  minOccurrences: 5,
});

// Example pattern: TypeScript + Docusaurus = 95% success rate
```

#### Failure Analysis

```typescript
// Analyze failure patterns
const failurePatterns = await kg.findPatterns({
  nodeType: "project",
  edgeType: "failed_at",
  groupBy: ["technology", "ssg", "errorType"],
});
```

## Memory Management

### Storage and Persistence

```typescript
// Configure storage directory
const storage = new KnowledgeGraphStorage({
  directory: "/path/to/kg-storage",
  format: "jsonl", // or "sqlite", "json"
  compression: true,
  backupInterval: "daily",
});

// Initialize with storage
await initializeKnowledgeGraph(storage);
```

### Memory Cleanup

```typescript
import { memoryCleanup } from "./memory-management";

// Clean old memories (default: 30 days)
await memoryCleanup({
  daysToKeep: 30,
  dryRun: false, // Set true to preview
});
```

### Memory Export/Import

```typescript
import { memoryExport, memoryImportAdvanced } from "./memory-management";

// Export knowledge graph
await memoryExport({
  format: "json",
  outputPath: "/backup/kg-export.json",
  filter: {
    nodeTypes: ["project", "configuration"],
    dateRange: { since: "2024-01-01" },
  },
});

// Import knowledge graph
await memoryImportAdvanced({
  inputPath: "/backup/kg-export.json",
  options: {
    mergeStrategy: "update",
    validateSchema: true,
    conflictResolution: "newer-wins",
  },
});
```

## Analytics and Insights

### Memory Insights

```typescript
import { memoryInsights } from "./memory-management";

const insights = await memoryInsights({
  projectId: "my-project",
  timeRange: {
    from: "2024-01-01",
    to: "2024-12-31",
  },
});

console.log(`Deployment success rate: ${insights.deploymentSuccessRate}`);
console.log(`Most successful SSG: ${insights.mostSuccessfulSSG}`);
console.log(`Optimization opportunities: ${insights.optimizations.length}`);
```

### Temporal Analysis

```typescript
import { memoryTemporalAnalysis } from "./memory-management";

const trends = await memoryTemporalAnalysis({
  analysisType: "patterns",
  query: {
    nodeType: "project",
    edgeType: "deployed_with",
    timeWindow: "monthly",
  },
});

// Analyze deployment trends over time
trends.patterns.forEach((pattern) => {
  console.log(`${pattern.month}: ${pattern.successRate}% success`);
});
```

### Intelligent Analysis

```typescript
import { memoryIntelligentAnalysis } from "./memory-management";

const analysis = await memoryIntelligentAnalysis({
  projectPath: "/path/to/project",
  baseAnalysis: repositoryAnalysis,
});

console.log(`Predicted success rate: ${analysis.predictions.successRate}`);
console.log(`Recommendations: ${analysis.recommendations.length}`);
console.log(`Risk factors: ${analysis.riskFactors.length}`);
```

## Visualization

### Network Visualization

```typescript
import { memoryVisualization } from "./memory-management";

// Generate network diagram
const networkViz = await memoryVisualization({
  visualizationType: "network",
  options: {
    layout: "force-directed",
    nodeSize: "degree",
    colorBy: "nodeType",
    filterEdges: ["deployed_with", "recommends"],
  },
});

// Export as SVG or interactive HTML
await networkViz.export("/output/knowledge-graph.svg");
```

### Timeline Dashboard

```typescript
// Generate deployment timeline
const timeline = await memoryVisualization({
  visualizationType: "timeline",
  options: {
    timeRange: "last-6-months",
    groupBy: "project",
    metrics: ["success-rate", "build-time"],
    interactive: true,
  },
});
```

## Best Practices

### Performance Optimization

- Use indexed queries for frequent lookups
- Implement query result caching for repeated patterns
- Periodically clean up outdated relationships
- Use batch operations for bulk updates

### Data Quality

- Validate node properties before insertion
- Implement schema versioning for compatibility
- Use unique constraints to prevent duplicates
- Regular integrity checks and repair

### Security and Privacy

- Encrypt sensitive preference data
- Implement access controls for user data
- Audit log for data access and modifications
- GDPR compliance for user preference management

### Monitoring and Maintenance

- Monitor query performance and optimization
- Track knowledge graph growth and memory usage
- Automated backup and disaster recovery
- Version control for schema changes

## Troubleshooting

### Common Issues

**Memory Growth**

- Implement periodic cleanup of old deployment records
- Archive historical data beyond retention period
- Monitor node/edge count growth patterns

**Query Performance**

- Add indexes for frequently queried properties
- Optimize complex traversal queries
- Use query result caching for expensive operations

**Data Consistency**

- Validate relationships before creation
- Implement transaction-like operations for atomic updates
- Regular consistency checks and repair tools

### Debug Tools

**Graph Inspector**

```typescript
import { graphInspector } from "./debug-tools";

const stats = await graphInspector.getStatistics();
console.log(`Nodes: ${stats.nodeCount}, Edges: ${stats.edgeCount}`);
console.log(`Storage size: ${stats.storageSize}MB`);

const orphanedNodes = await graphInspector.findOrphanedNodes();
console.log(`Orphaned nodes: ${orphanedNodes.length}`);
```

**Query Profiler**

```typescript
const profiler = await graphInspector.profileQuery(complexQuery);
console.log(`Execution time: ${profiler.executionTime}ms`);
console.log(`Nodes traversed: ${profiler.nodesTraversed}`);
console.log(`Optimization suggestions: ${profiler.suggestions}`);
```

## Related Documentation

- [Memory System](./tutorials/memory-workflows.md) - Overall memory architecture and patterns
- [User Preferences](./reference/mcp-tools.md#manage_preferences) - Preference learning and management
- [Deployment Tracking](./explanation/architecture.md#deployment-tracking) - Deployment outcome analysis
- [Repository Analysis](./how-to/repository-analysis.md) - Project analysis and indexing

```

--------------------------------------------------------------------------------
/tests/api/mcp-responses.test.ts:
--------------------------------------------------------------------------------

```typescript
// API tests for MCP response format compliance and standardization
import { formatMCPResponse, MCPToolResponse } from "../../src/types/api";

describe("API Response Standardization Tests", () => {
  describe("MCPToolResponse Interface Compliance", () => {
    it("should validate successful response structure", () => {
      const successResponse: MCPToolResponse<{ data: string }> = {
        success: true,
        data: { data: "test-data" },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 100,
          timestamp: "2023-01-01T00:00:00.000Z",
        },
        recommendations: [
          {
            type: "info",
            title: "Test Recommendation",
            description: "This is a test recommendation",
          },
        ],
        nextSteps: [
          {
            action: "Next Action",
            toolRequired: "next_tool",
            description: "Description of next step",
            priority: "high",
          },
        ],
      };

      expect(successResponse.success).toBe(true);
      expect(successResponse.data).toBeDefined();
      expect(successResponse.metadata).toBeDefined();
      expect(successResponse.metadata.toolVersion).toBe("1.0.0");
      expect(successResponse.metadata.executionTime).toBe(100);
      expect(successResponse.recommendations).toHaveLength(1);
      expect(successResponse.nextSteps).toHaveLength(1);
    });

    it("should validate error response structure", () => {
      const errorResponse: MCPToolResponse = {
        success: false,
        error: {
          code: "TEST_ERROR",
          message: "Test error message",
          details: { context: "test" },
          resolution: "Test resolution steps",
        },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 50,
          timestamp: "2023-01-01T00:00:00.000Z",
        },
      };

      expect(errorResponse.success).toBe(false);
      expect(errorResponse.error).toBeDefined();
      expect(errorResponse.error!.code).toBe("TEST_ERROR");
      expect(errorResponse.error!.message).toBe("Test error message");
      expect(errorResponse.error!.resolution).toBe("Test resolution steps");
      expect(errorResponse.data).toBeUndefined();
    });

    it("should validate recommendation types", () => {
      const recommendations = [
        {
          type: "info" as const,
          title: "Info",
          description: "Info description",
        },
        {
          type: "warning" as const,
          title: "Warning",
          description: "Warning description",
        },
        {
          type: "critical" as const,
          title: "Critical",
          description: "Critical description",
        },
      ];

      recommendations.forEach((rec) => {
        expect(["info", "warning", "critical"]).toContain(rec.type);
        expect(rec.title).toBeDefined();
        expect(rec.description).toBeDefined();
      });
    });

    it("should validate next step priorities", () => {
      const nextSteps = [
        {
          action: "Low Priority",
          toolRequired: "tool1",
          priority: "low" as const,
        },
        {
          action: "Medium Priority",
          toolRequired: "tool2",
          priority: "medium" as const,
        },
        {
          action: "High Priority",
          toolRequired: "tool3",
          priority: "high" as const,
        },
      ];

      nextSteps.forEach((step) => {
        expect(["low", "medium", "high"]).toContain(step.priority);
        expect(step.action).toBeDefined();
        expect(step.toolRequired).toBeDefined();
      });
    });
  });

  describe("formatMCPResponse Function", () => {
    it("should format successful response correctly", () => {
      const response: MCPToolResponse<{ result: string }> = {
        success: true,
        data: { result: "success" },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 123,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
        recommendations: [
          {
            type: "info",
            title: "Success",
            description: "Operation completed successfully",
          },
        ],
        nextSteps: [
          {
            action: "Proceed to next step",
            toolRequired: "next_tool",
            priority: "medium",
          },
        ],
      };

      const formatted = formatMCPResponse(response);

      expect(formatted.content).toBeDefined();
      expect(formatted.content.length).toBeGreaterThan(0);
      expect(formatted.isError).toBeFalsy();

      // Check main data is included
      const dataContent = formatted.content.find((c) =>
        c.text.includes("success"),
      );
      expect(dataContent).toBeDefined();

      // Check metadata is included
      const metadataContent = formatted.content.find((c) =>
        c.text.includes("123ms"),
      );
      expect(metadataContent).toBeDefined();

      // Check recommendations are included
      const recommendationContent = formatted.content.find((c) =>
        c.text.includes("Recommendations:"),
      );
      expect(recommendationContent).toBeDefined();

      // Check next steps are included
      const nextStepContent = formatted.content.find((c) =>
        c.text.includes("Next Steps:"),
      );
      expect(nextStepContent).toBeDefined();
    });

    it("should format error response correctly", () => {
      const errorResponse: MCPToolResponse = {
        success: false,
        error: {
          code: "VALIDATION_ERROR",
          message: "Input validation failed",
          resolution: "Check your input parameters",
        },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 25,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      const formatted = formatMCPResponse(errorResponse);

      expect(formatted.content).toBeDefined();
      expect(formatted.isError).toBe(true);

      // Check error message is included
      const errorContent = formatted.content.find((c) =>
        c.text.includes("Input validation failed"),
      );
      expect(errorContent).toBeDefined();

      // Check resolution is included
      const resolutionContent = formatted.content.find((c) =>
        c.text.includes("Check your input parameters"),
      );
      expect(resolutionContent).toBeDefined();
    });

    it("should handle responses without optional fields", () => {
      const minimalResponse: MCPToolResponse<string> = {
        success: true,
        data: "minimal data",
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 10,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      const formatted = formatMCPResponse(minimalResponse);

      expect(formatted.content).toBeDefined();
      expect(formatted.isError).toBeFalsy();

      // Should not include recommendations or next steps sections
      const fullText = formatted.content.map((c) => c.text).join("\n");
      expect(fullText).not.toContain("Recommendations:");
      expect(fullText).not.toContain("Next Steps:");
    });

    it("should include recommendation icons correctly", () => {
      const response: MCPToolResponse<{}> = {
        success: true,
        data: {},
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 10,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
        recommendations: [
          { type: "info", title: "Info", description: "Info description" },
          {
            type: "warning",
            title: "Warning",
            description: "Warning description",
          },
          {
            type: "critical",
            title: "Critical",
            description: "Critical description",
          },
        ],
      };

      const formatted = formatMCPResponse(response);
      const recommendationText =
        formatted.content.find((c) => c.text.includes("Recommendations:"))
          ?.text || "";

      expect(recommendationText).toContain("ℹ️"); // Info icon
      expect(recommendationText).toContain("⚠️"); // Warning icon
      expect(recommendationText).toContain("🔴"); // Critical icon
    });

    it("should format next steps without toolRequired but with description", () => {
      const response: MCPToolResponse<{}> = {
        success: true,
        data: {},
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 10,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
        nextSteps: [
          {
            action: "Manual Step",
            description: "This step requires manual intervention",
            priority: "high",
          },
        ],
      };

      const formatted = formatMCPResponse(response);
      const nextStepText =
        formatted.content.find((c) => c.text.includes("Next Steps:"))?.text ||
        "";

      expect(nextStepText).toContain("Manual Step");
      expect(nextStepText).toContain("This step requires manual intervention");
      expect(nextStepText).not.toContain("use "); // Should not have "use" since no toolRequired
    });
  });

  describe("Response Consistency Across Tools", () => {
    it("should ensure all tools follow the same metadata structure", () => {
      const commonMetadata = {
        toolVersion: "1.0.0",
        executionTime: 100,
        timestamp: "2023-01-01T12:00:00.000Z",
      };

      // Test that metadata structure is consistent
      expect(commonMetadata.toolVersion).toMatch(/^\d+\.\d+\.\d+$/);
      expect(typeof commonMetadata.executionTime).toBe("number");
      expect(commonMetadata.executionTime).toBeGreaterThanOrEqual(0);
      expect(commonMetadata.timestamp).toMatch(
        /^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}/,
      );
    });

    it("should validate error code consistency", () => {
      const errorCodes = [
        "ANALYSIS_FAILED",
        "RECOMMENDATION_FAILED",
        "CONFIG_GENERATION_FAILED",
        "STRUCTURE_SETUP_FAILED",
        "DEPLOYMENT_SETUP_FAILED",
        "VERIFICATION_FAILED",
      ];

      errorCodes.forEach((code) => {
        expect(code).toMatch(/^[A-Z_]+$/);
        expect(code).toContain("_");
        expect(code.endsWith("_FAILED")).toBe(true);
      });
    });

    it("should validate next step tool references", () => {
      const validTools = [
        "analyze_repository",
        "recommend_ssg",
        "generate_config",
        "setup_structure",
        "deploy_pages",
        "verify_deployment",
      ];

      validTools.forEach((tool) => {
        expect(tool).toMatch(/^[a-z_]+$/);
        expect(tool).not.toContain("-");
        expect(tool).not.toContain(" ");
      });
    });

    it("should validate recommendation action patterns", () => {
      const recommendationActions = [
        "Get SSG Recommendation",
        "Generate Configuration",
        "Setup Documentation Structure",
        "Setup GitHub Pages Deployment",
        "Verify Deployment Setup",
      ];

      recommendationActions.forEach((action) => {
        expect(action).toMatch(/^[A-Z]/); // Starts with capital
        expect(action.length).toBeGreaterThan(5); // Meaningful length
        expect(action.endsWith(".")).toBe(false); // No trailing period
      });
    });
  });

  describe("Backward Compatibility", () => {
    it("should maintain MCP content format compatibility", () => {
      const response: MCPToolResponse<{ test: boolean }> = {
        success: true,
        data: { test: true },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 50,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      const formatted = formatMCPResponse(response);

      // Must have content array for MCP compatibility
      expect(formatted.content).toBeDefined();
      expect(Array.isArray(formatted.content)).toBe(true);

      // Each content item must have type and text
      formatted.content.forEach((item) => {
        expect(item.type).toBe("text");
        expect(typeof item.text).toBe("string");
        expect(item.text.length).toBeGreaterThan(0);
      });
    });

    it("should handle legacy response format gracefully", () => {
      // Test that we can still process responses that don't have all new fields
      const legacyStyleData = {
        success: true,
        result: "legacy result",
        timestamp: "2023-01-01T12:00:00.000Z",
      };

      // Should not throw even if not strictly typed
      expect(() => {
        const formatted = formatMCPResponse({
          success: true,
          data: legacyStyleData,
          metadata: {
            toolVersion: "1.0.0",
            executionTime: 100,
            timestamp: "2023-01-01T12:00:00.000Z",
          },
        });
        return formatted;
      }).not.toThrow();
    });
  });

  describe("Error Boundary Testing", () => {
    it("should handle undefined data gracefully", () => {
      const response: MCPToolResponse = {
        success: true,
        // data is undefined
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 10,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      const formatted = formatMCPResponse(response);
      expect(formatted.content).toBeDefined();
      expect(formatted.content.length).toBeGreaterThan(0);
    });

    it("should handle null values in data", () => {
      const response: MCPToolResponse<{ value: null }> = {
        success: true,
        data: { value: null },
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 10,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      expect(() => formatMCPResponse(response)).not.toThrow();
    });

    it("should handle very large data objects", () => {
      const largeData = {
        items: Array.from({ length: 1000 }, (_, i) => ({
          id: i,
          value: `item-${i}`,
        })),
      };

      const response: MCPToolResponse<typeof largeData> = {
        success: true,
        data: largeData,
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 1000,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      const formatted = formatMCPResponse(response);
      expect(formatted.content).toBeDefined();

      // Should include the large data in JSON format
      const dataContent = formatted.content.find((c) =>
        c.text.includes('"items"'),
      );
      expect(dataContent).toBeDefined();
    });

    it("should handle circular references safely", () => {
      const circularData: any = { name: "test" };
      circularData.self = circularData;

      // Should not cause JSON.stringify to throw
      expect(() => {
        JSON.stringify(circularData);
      }).toThrow();

      // But our formatter should handle it (though we should avoid circular refs)
      // This test documents the expected behavior
      const response: MCPToolResponse<any> = {
        success: true,
        data: { safe: "data" }, // Use safe data instead
        metadata: {
          toolVersion: "1.0.0",
          executionTime: 10,
          timestamp: "2023-01-01T12:00:00.000Z",
        },
      };

      expect(() => formatMCPResponse(response)).not.toThrow();
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/memory/manager.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Comprehensive unit tests for Memory Manager
 * Tests memory management, search, caching, and context-aware operations
 * Part of Issue #54 - Core Memory System Unit Tests
 */

import { promises as fs } from "fs";
import path from "path";
import os from "os";
import {
  MemoryManager,
  MemoryContext,
  MemorySearchOptions,
} from "../../src/memory/manager.js";
import { MemoryEntry } from "../../src/memory/storage.js";

describe("MemoryManager", () => {
  let manager: MemoryManager;
  let tempDir: string;

  beforeEach(async () => {
    // Create unique temp directory for each test
    tempDir = path.join(
      os.tmpdir(),
      `memory-manager-test-${Date.now()}-${Math.random()
        .toString(36)
        .substr(2, 9)}`,
    );
    await fs.mkdir(tempDir, { recursive: true });
    manager = new MemoryManager(tempDir);
    await manager.initialize();
  });

  afterEach(async () => {
    // Cleanup temp directory
    try {
      await fs.rm(tempDir, { recursive: true, force: true });
    } catch (error) {
      // Ignore cleanup errors
    }
  });

  describe("Basic Memory Operations", () => {
    test("should create manager instance and initialize", async () => {
      expect(manager).toBeDefined();
      expect(manager).toBeInstanceOf(MemoryManager);
    });

    test("should remember and recall memories", async () => {
      const data = {
        projectName: "test-project",
        language: "typescript",
        framework: "react",
      };

      const metadata = {
        projectId: "test-proj-001",
        repository: "github.com/test/repo",
        tags: ["frontend", "typescript"],
      };

      // Set context to ensure projectId is preserved
      manager.setContext({ projectId: "test-proj-001" });

      const memoryEntry = await manager.remember("analysis", data, metadata);
      expect(memoryEntry.id).toBeDefined();
      expect(typeof memoryEntry.id).toBe("string");

      const recalled = await manager.recall(memoryEntry.id);
      expect(recalled).not.toBeNull();
      expect(recalled?.data).toEqual(data);
      expect(recalled?.metadata.projectId).toBe("test-proj-001");
      expect(recalled?.type).toBe("analysis");
    });

    test("should return null for non-existent memory", async () => {
      const result = await manager.recall("non-existent-id");
      expect(result).toBeNull();
    });

    test("should forget memories", async () => {
      const memoryEntry = await manager.remember("analysis", {
        data: "to-forget",
      });

      // Verify it exists
      const beforeForget = await manager.recall(memoryEntry.id);
      expect(beforeForget).not.toBeNull();

      // Forget it
      const forgotten = await manager.forget(memoryEntry.id);
      expect(forgotten).toBe(true);

      // Verify it's gone
      const afterForget = await manager.recall(memoryEntry.id);
      expect(afterForget).toBeNull();
    });

    test("should return false when forgetting non-existent memory", async () => {
      const result = await manager.forget("non-existent-id");
      expect(result).toBe(false);
    });
  });

  describe("Context Management", () => {
    test("should set and get context", async () => {
      const context: MemoryContext = {
        projectId: "context-test",
        repository: "github.com/context/repo",
        branch: "feature/memory",
        user: "test-user",
        session: "session-123",
      };

      manager.setContext(context);

      const data = { contextTest: true, value: 42 };
      const memoryEntry = await manager.remember("analysis", data);

      expect(memoryEntry.metadata.projectId).toBe("context-test");
    });

    test("should use context when remembering", async () => {
      const context: MemoryContext = {
        projectId: "auto-context-test",
        repository: "github.com/auto/repo",
      };

      manager.setContext(context);

      // Create multiple memories with current context
      const memory1 = await manager.remember("analysis", { step: 1 });
      const memory2 = await manager.remember("recommendation", { step: 2 });
      const memory3 = await manager.remember("deployment", { step: 3 });

      // Verify memories inherit the context
      expect(memory1.metadata.projectId).toBe("auto-context-test");
      expect(memory2.metadata.projectId).toBe("auto-context-test");
      expect(memory3.metadata.projectId).toBe("auto-context-test");

      // Test that we can recall them
      const recalled1 = await manager.recall(memory1.id);
      expect(recalled1?.metadata.projectId).toBe("auto-context-test");
    });
  });

  describe("Search Functionality", () => {
    test("should handle search operations", async () => {
      // Create some test data first
      manager.setContext({ projectId: "search-test" });

      await manager.remember(
        "analysis",
        {
          project: "test-search",
          language: "typescript",
        },
        { tags: ["frontend"] },
      );

      // Test basic search functionality
      const results = await manager.search("");
      expect(Array.isArray(results)).toBe(true);

      // Search functionality may be basic, so we just test it doesn't throw
      const projectResults = await manager.search({ projectId: "search-test" });
      expect(Array.isArray(projectResults)).toBe(true);
    });

    test("should handle search with different query types", async () => {
      const options: MemorySearchOptions = {
        semantic: false,
        fuzzy: true,
        sortBy: "timestamp",
      };

      const results = await manager.search("test", options);
      expect(Array.isArray(results)).toBe(true);
    });
  });

  describe("Memory Analytics", () => {
    test("should handle basic memory queries", async () => {
      // Create test data
      manager.setContext({ projectId: "analytics-test" });

      await manager.remember("analysis", { score: 85 });
      await manager.remember("recommendation", { confidence: 0.8 });

      // Test basic search functionality
      const allMemories = await manager.search("");
      expect(Array.isArray(allMemories)).toBe(true);

      // The number of memories may vary based on implementation
      // Just verify the search works and returns memories when they exist
      if (allMemories.length > 0) {
        expect(allMemories[0]).toHaveProperty("type");
        expect(allMemories[0]).toHaveProperty("data");
        expect(allMemories[0]).toHaveProperty("metadata");
      }
    });
  });

  describe("Caching and Performance", () => {
    test("should handle performance operations", async () => {
      // Store test data
      manager.setContext({ projectId: "cache-test" });

      await manager.remember("analysis", { cached: true });
      await manager.remember("recommendation", { cached: true });

      // Test search performance
      const startTime1 = Date.now();
      const results1 = await manager.search("");
      const time1 = Date.now() - startTime1;

      const startTime2 = Date.now();
      const results2 = await manager.search("");
      const time2 = Date.now() - startTime2;

      expect(Array.isArray(results1)).toBe(true);
      expect(Array.isArray(results2)).toBe(true);

      // Both searches should complete quickly
      expect(time1).toBeLessThan(1000);
      expect(time2).toBeLessThan(1000);
    });

    test("should handle concurrent operations safely", async () => {
      const concurrentOps = 10;
      const promises: Promise<MemoryEntry>[] = [];

      manager.setContext({ projectId: "concurrent-test" });

      // Create multiple concurrent remember operations
      for (let i = 0; i < concurrentOps; i++) {
        const promise = manager.remember(
          "analysis",
          {
            index: i,
            data: `concurrent-test-${i}`,
          },
          {
            tags: [`tag-${i % 5}`],
          },
        );
        promises.push(promise);
      }

      const memoryEntries = await Promise.all(promises);
      expect(memoryEntries).toHaveLength(concurrentOps);
      expect(new Set(memoryEntries.map((m) => m.id)).size).toBe(concurrentOps); // All IDs should be unique
    });
  });

  describe("Memory Lifecycle Management", () => {
    test("should manage memory entries over time", async () => {
      manager.setContext({ projectId: "lifecycle-test" });

      const originalData = { version: 1, status: "draft" };
      const memoryEntry = await manager.remember("analysis", originalData);

      expect(memoryEntry.data.version).toBe(1);
      expect(memoryEntry.data.status).toBe("draft");

      // Verify persistence
      const recalled = await manager.recall(memoryEntry.id);
      expect(recalled?.data.version).toBe(1);
      expect(recalled?.data.status).toBe("draft");
    });

    test("should handle bulk operations efficiently", async () => {
      const bulkSize = 20;
      const memoryEntries: MemoryEntry[] = [];

      manager.setContext({ projectId: "bulk-test" });

      // Create bulk memories
      const startTime = Date.now();
      for (let i = 0; i < bulkSize; i++) {
        const entry = await manager.remember("analysis", {
          index: i,
          category: i % 3 === 0 ? "A" : i % 3 === 1 ? "B" : "C",
        });
        memoryEntries.push(entry);
      }
      const createTime = Date.now() - startTime;

      expect(createTime).toBeLessThan(5000); // Should complete within 5 seconds
      expect(memoryEntries).toHaveLength(bulkSize);

      // Test search functionality
      const searchStartTime = Date.now();
      const allMemories = await manager.search("");
      const searchTime = Date.now() - searchStartTime;

      expect(Array.isArray(allMemories)).toBe(true);
      expect(searchTime).toBeLessThan(1000); // Should search within 1 second
    });
  });

  describe("Error Handling", () => {
    test("should handle invalid memory types gracefully", async () => {
      // TypeScript should prevent this, but test runtime behavior
      const memoryEntry = await manager.remember("configuration", {
        test: true,
      });
      const recalled = await manager.recall(memoryEntry.id);

      expect(recalled?.type).toBe("configuration");
      expect(recalled?.data.test).toBe(true);
    });

    test("should handle malformed search queries", async () => {
      // Test with various edge case queries
      const emptyResult = await manager.search("");
      expect(Array.isArray(emptyResult)).toBe(true);

      const specialCharsResult = await manager.search("@#$%^&*()[]{}");
      expect(Array.isArray(specialCharsResult)).toBe(true);

      const unicodeResult = await manager.search("测试🚀");
      expect(Array.isArray(unicodeResult)).toBe(true);
    });

    test("should handle memory storage errors", async () => {
      // Test with extremely large data that might cause issues
      const largeData = {
        huge: "x".repeat(100000), // 100KB string
        array: new Array(10000)
          .fill(0)
          .map((_, i) => ({ id: i, data: `item-${i}` })),
      };

      // Should handle large data gracefully
      const memoryEntry = await manager.remember("analysis", largeData);
      expect(memoryEntry.id).toBeDefined();

      const recalled = await manager.recall(memoryEntry.id);
      expect(recalled?.data.huge).toHaveLength(100000);
      expect(recalled?.data.array).toHaveLength(10000);
    });

    test("should handle non-existent memory operations", async () => {
      // Test recalling non-existent memory
      const nonExistent = await manager.recall("non-existent-id");
      expect(nonExistent).toBeNull();

      // Test forgetting non-existent memory
      const forgotResult = await manager.forget("non-existent-id");
      expect(forgotResult).toBe(false);

      // Test searching with no results
      const searchResults = await manager.search("definitely-not-found-12345");
      expect(Array.isArray(searchResults)).toBe(true);
      expect(searchResults).toHaveLength(0);
    });
  });

  describe("Event System", () => {
    test("should emit events on memory operations", async () => {
      let eventCount = 0;
      const events: string[] = [];

      manager.on("memory-created", (entry: MemoryEntry) => {
        expect(entry.type).toBe("analysis");
        expect(entry.data.eventTest).toBe(true);
        eventCount++;
        events.push("created");
      });

      manager.on("memory-deleted", (id: string) => {
        expect(typeof id).toBe("string");
        eventCount++;
        events.push("deleted");
      });

      // Trigger events
      const memoryEntry = await manager.remember("analysis", {
        eventTest: true,
      });
      await manager.forget(memoryEntry.id);

      // Give events time to fire
      await new Promise((resolve) => setTimeout(resolve, 50));

      // Verify events were triggered
      expect(eventCount).toBeGreaterThanOrEqual(1); // At least memory-created should fire
      expect(events).toContain("created");
    });

    test("should emit context change events", () => {
      let contextChanged = false;

      manager.on("context-changed", (context: MemoryContext) => {
        expect(context.projectId).toBe("event-test");
        expect(context.user).toBe("event-user");
        contextChanged = true;
      });

      manager.setContext({
        projectId: "event-test",
        user: "event-user",
      });

      // Give event time to fire
      setTimeout(() => {
        // Event system may not be implemented, so we don't require it
        expect(true).toBe(true);
      }, 50);
    });
  });

  describe("Search with Grouping and Sorting", () => {
    test("should group results by type", async () => {
      await manager.remember("analysis", { test: 1 }, { projectId: "proj1" });
      await manager.remember("deployment", { test: 2 }, { projectId: "proj1" });
      await manager.remember("analysis", { test: 3 }, { projectId: "proj2" });

      const grouped: any = await manager.search("", { groupBy: "type" });

      expect(grouped).toHaveProperty("analysis");
      expect(grouped).toHaveProperty("deployment");
      expect(grouped.analysis.length).toBe(2);
      expect(grouped.deployment.length).toBe(1);
    });

    test("should group results by project", async () => {
      manager.setContext({ projectId: "proj1" });
      await manager.remember("analysis", { test: 1 });

      manager.setContext({ projectId: "proj2" });
      await manager.remember("analysis", { test: 2 });

      const grouped: any = await manager.search("", { groupBy: "project" });

      expect(grouped).toHaveProperty("proj1");
      expect(grouped).toHaveProperty("proj2");
    });

    test("should group results by date", async () => {
      await manager.remember("analysis", { test: 1 }, { projectId: "proj1" });

      const grouped: any = await manager.search("", { groupBy: "date" });

      const today = new Date().toISOString().split("T")[0];
      expect(grouped).toHaveProperty(today);
    });

    test("should sort results by type", async () => {
      await manager.remember("recommendation", { test: 1 }, {});
      await manager.remember("analysis", { test: 2 }, {});

      const results = await manager.search("", { sortBy: "type" });

      expect(results[0].type).toBe("analysis");
      expect(results[1].type).toBe("recommendation");
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/memory/kg-link-validator.test.ts:
--------------------------------------------------------------------------------

```typescript
import { promises as fs } from "fs";
import path from "path";
import os from "os";
import {
  validateExternalLinks,
  validateAndStoreDocumentationLinks,
  extractLinksFromContent,
  storeLinkValidationInKG,
  getLinkValidationHistory,
} from "../../src/memory/kg-link-validator";
import { getKnowledgeGraph } from "../../src/memory/kg-integration";

describe("KG Link Validator", () => {
  let tempDir: string;
  const originalCwd = process.cwd();

  beforeEach(async () => {
    tempDir = path.join(os.tmpdir(), `kg-link-${Date.now()}`);
    await fs.mkdir(tempDir, { recursive: true });
    process.chdir(tempDir);
  });

  afterEach(async () => {
    process.chdir(originalCwd);
    try {
      await fs.rm(tempDir, { recursive: true, force: true });
    } catch {
      // Ignore cleanup errors
    }
  });

  describe("validateExternalLinks", () => {
    it("should validate valid URLs", async () => {
      const urls = ["https://www.google.com", "https://github.com"];

      const result = await validateExternalLinks(urls, {
        timeout: 5000,
      });

      expect(result.totalLinks).toBe(2);
      expect(result.results).toHaveLength(2);
      expect(result.validLinks + result.brokenLinks + result.unknownLinks).toBe(
        2,
      );
    });

    it("should detect broken links", async () => {
      const urls = ["https://this-domain-definitely-does-not-exist-12345.com"];

      const result = await validateExternalLinks(urls, {
        timeout: 3000,
      });

      expect(result.totalLinks).toBe(1);
      expect(result.results).toHaveLength(1);
      // Should be either broken or unknown
      expect(result.brokenLinks + result.unknownLinks).toBeGreaterThan(0);
    });

    it("should handle empty URL list", async () => {
      const result = await validateExternalLinks([]);

      expect(result.totalLinks).toBe(0);
      expect(result.results).toHaveLength(0);
    });

    it("should respect timeout option", async () => {
      const urls = ["https://www.google.com"];

      const startTime = Date.now();
      await validateExternalLinks(urls, {
        timeout: 1000,
      });
      const duration = Date.now() - startTime;

      // Should complete reasonably quickly
      expect(duration).toBeLessThan(10000);
    });

    it("should use default timeout when not provided", async () => {
      const urls = ["https://www.google.com"];

      const result = await validateExternalLinks(urls);

      expect(result).toBeDefined();
      expect(result.totalLinks).toBe(1);
    });

    it("should handle validation errors gracefully", async () => {
      const urls = ["https://httpstat.us/500"]; // Returns 500 error

      const result = await validateExternalLinks(urls, {
        timeout: 5000,
      });

      expect(result.totalLinks).toBe(1);
      // Should be marked as broken or unknown
      expect(result.brokenLinks + result.unknownLinks).toBeGreaterThan(0);
    });

    it("should count warning links correctly", async () => {
      const urls = ["https://httpstat.us/301"]; // Redirect

      const result = await validateExternalLinks(urls, {
        timeout: 5000,
      });

      expect(result.totalLinks).toBe(1);
      // Should handle redirects as valid (fetch follows redirects)
      expect(
        result.validLinks +
          result.brokenLinks +
          result.warningLinks +
          result.unknownLinks,
      ).toBe(1);
    });

    it("should handle network errors in validation loop", async () => {
      const urls = ["https://invalid-url-12345.test", "https://www.google.com"];

      const result = await validateExternalLinks(urls, {
        timeout: 3000,
      });

      expect(result.totalLinks).toBe(2);
      expect(result.results).toHaveLength(2);
    });

    it("should include response time in valid results", async () => {
      const urls = ["https://www.google.com"];

      const result = await validateExternalLinks(urls, {
        timeout: 5000,
      });

      expect(result.results[0].lastChecked).toBeDefined();
      if (result.results[0].status === "valid") {
        expect(result.results[0].responseTime).toBeDefined();
        expect(result.results[0].responseTime).toBeGreaterThan(0);
      }
    });

    it("should include response time in broken results", async () => {
      const urls = ["https://httpstat.us/404"];

      const result = await validateExternalLinks(urls, {
        timeout: 5000,
      });

      expect(result.results[0].lastChecked).toBeDefined();
      if (
        result.results[0].status === "broken" &&
        result.results[0].statusCode
      ) {
        expect(result.results[0].responseTime).toBeDefined();
      }
    });
  });

  describe("extractLinksFromContent", () => {
    it("should extract external links", () => {
      const content = `
        # Test
        [Google](https://www.google.com)
        [GitHub](https://github.com)
      `;

      const result = extractLinksFromContent(content);

      expect(result.externalLinks.length).toBeGreaterThan(0);
    });

    it("should extract internal links", () => {
      const content = `
        # Test
        [Page 1](./page1.md)
        [Page 2](../page2.md)
      `;

      const result = extractLinksFromContent(content);

      expect(result.internalLinks.length).toBeGreaterThan(0);
    });

    it("should handle mixed links", () => {
      const content = `
        # Test
        [External](https://example.com)
        [Internal](./page.md)
      `;

      const result = extractLinksFromContent(content);

      expect(result.externalLinks.length).toBeGreaterThan(0);
      expect(result.internalLinks.length).toBeGreaterThan(0);
    });

    it("should extract HTTP links", () => {
      const content = `[Link](http://example.com)`;

      const result = extractLinksFromContent(content);

      expect(result.externalLinks).toContain("http://example.com");
    });

    it("should extract HTML anchor links", () => {
      const content = `<a href="https://example.com">Link</a>`;

      const result = extractLinksFromContent(content);

      expect(result.externalLinks).toContain("https://example.com");
    });

    it("should extract HTML anchor links with single quotes", () => {
      const content = `<a href='https://example.com'>Link</a>`;

      const result = extractLinksFromContent(content);

      expect(result.externalLinks).toContain("https://example.com");
    });

    it("should extract internal HTML links", () => {
      const content = `<a href="./page.md">Link</a>`;

      const result = extractLinksFromContent(content);

      expect(result.internalLinks).toContain("./page.md");
    });

    it("should remove duplicate links", () => {
      const content = `
        [Link1](https://example.com)
        [Link2](https://example.com)
        [Link3](./page.md)
        [Link4](./page.md)
      `;

      const result = extractLinksFromContent(content);

      expect(result.externalLinks.length).toBe(1);
      expect(result.internalLinks.length).toBe(1);
    });

    it("should handle content with no links", () => {
      const content = "# Test\nNo links here";

      const result = extractLinksFromContent(content);

      expect(result.externalLinks).toEqual([]);
      expect(result.internalLinks).toEqual([]);
    });
  });

  describe("validateAndStoreDocumentationLinks", () => {
    it("should validate and store documentation links", async () => {
      const content =
        "# Test\n[Link](./other.md)\n[External](https://example.com)";

      const result = await validateAndStoreDocumentationLinks(
        "test-project",
        content,
      );

      expect(result).toBeDefined();
      expect(result.totalLinks).toBeGreaterThan(0);
    });

    it("should handle documentation without links", async () => {
      const content = "# Test\nNo links here";

      const result = await validateAndStoreDocumentationLinks(
        "test-project",
        content,
      );

      expect(result).toBeDefined();
      expect(result.totalLinks).toBe(0);
    });

    it("should handle content with only internal links", async () => {
      const content = "# Test\n[Page](./page.md)";

      const result = await validateAndStoreDocumentationLinks(
        "test-project",
        content,
      );

      expect(result).toBeDefined();
      // Only external links are validated
      expect(result.totalLinks).toBe(0);
    });
  });

  describe("storeLinkValidationInKG", () => {
    it("should store validation results with no broken links", async () => {
      const summary = {
        totalLinks: 5,
        validLinks: 5,
        brokenLinks: 0,
        warningLinks: 0,
        unknownLinks: 0,
        results: [],
      };

      await storeLinkValidationInKG("doc-section-1", summary);

      const kg = await getKnowledgeGraph();
      const nodes = await kg.getAllNodes();

      // Find the specific validation node for this test
      const validationNode = nodes.find(
        (n) =>
          n.type === "link_validation" &&
          n.properties.totalLinks === 5 &&
          n.properties.brokenLinks === 0,
      );
      expect(validationNode).toBeDefined();
      expect(validationNode?.properties.totalLinks).toBe(5);
      expect(validationNode?.properties.healthScore).toBe(100);
    });

    it("should store validation results with broken links", async () => {
      const summary = {
        totalLinks: 10,
        validLinks: 7,
        brokenLinks: 3,
        warningLinks: 0,
        unknownLinks: 0,
        results: [
          {
            url: "https://broken1.com",
            status: "broken" as const,
            lastChecked: new Date().toISOString(),
          },
          {
            url: "https://broken2.com",
            status: "broken" as const,
            lastChecked: new Date().toISOString(),
          },
          {
            url: "https://broken3.com",
            status: "broken" as const,
            lastChecked: new Date().toISOString(),
          },
        ],
      };

      await storeLinkValidationInKG("doc-section-2", summary);

      const kg = await getKnowledgeGraph();
      const edges = await kg.findEdges({
        source: "doc-section-2",
        type: "has_link_validation",
      });

      expect(edges.length).toBeGreaterThan(0);
    });

    it("should create requires_fix edge for broken links", async () => {
      const summary = {
        totalLinks: 10,
        validLinks: 4,
        brokenLinks: 6,
        warningLinks: 0,
        unknownLinks: 0,
        results: [
          {
            url: "https://broken.com",
            status: "broken" as const,
            lastChecked: new Date().toISOString(),
          },
        ],
      };

      await storeLinkValidationInKG("doc-section-3", summary);

      const kg = await getKnowledgeGraph();
      const allNodes = await kg.getAllNodes();
      const validationNode = allNodes.find(
        (n) => n.type === "link_validation" && n.properties.brokenLinks === 6,
      );

      expect(validationNode).toBeDefined();

      const requiresFixEdges = await kg.findEdges({
        source: validationNode!.id,
        type: "requires_fix",
      });

      expect(requiresFixEdges.length).toBeGreaterThan(0);
      expect(requiresFixEdges[0].properties.severity).toBe("high"); // > 5 broken links
    });

    it("should set medium severity for few broken links", async () => {
      const summary = {
        totalLinks: 10,
        validLinks: 8,
        brokenLinks: 2,
        warningLinks: 0,
        unknownLinks: 0,
        results: [
          {
            url: "https://broken.com",
            status: "broken" as const,
            lastChecked: new Date().toISOString(),
          },
        ],
      };

      await storeLinkValidationInKG("doc-section-4", summary);

      const kg = await getKnowledgeGraph();
      const allNodes = await kg.getAllNodes();
      const validationNode = allNodes.find(
        (n) => n.type === "link_validation" && n.properties.brokenLinks === 2,
      );

      const requiresFixEdges = await kg.findEdges({
        source: validationNode!.id,
        type: "requires_fix",
      });

      expect(requiresFixEdges[0].properties.severity).toBe("medium");
    });

    it("should calculate health score correctly", async () => {
      const summary = {
        totalLinks: 20,
        validLinks: 15,
        brokenLinks: 5,
        warningLinks: 0,
        unknownLinks: 0,
        results: [],
      };

      await storeLinkValidationInKG("doc-section-5", summary);

      const kg = await getKnowledgeGraph();
      const nodes = await kg.getAllNodes();

      const validationNode = nodes.find(
        (n) => n.type === "link_validation" && n.properties.totalLinks === 20,
      );

      expect(validationNode?.properties.healthScore).toBe(75);
    });

    it("should handle zero links with 100% health score", async () => {
      const summary = {
        totalLinks: 0,
        validLinks: 0,
        brokenLinks: 0,
        warningLinks: 0,
        unknownLinks: 0,
        results: [],
      };

      await storeLinkValidationInKG("doc-section-6", summary);

      const kg = await getKnowledgeGraph();
      const nodes = await kg.getAllNodes();

      const validationNode = nodes.find(
        (n) => n.type === "link_validation" && n.properties.totalLinks === 0,
      );

      expect(validationNode?.properties.healthScore).toBe(100);
    });
  });

  describe("getLinkValidationHistory", () => {
    it("should retrieve validation history", async () => {
      const summary1 = {
        totalLinks: 5,
        validLinks: 5,
        brokenLinks: 0,
        warningLinks: 0,
        unknownLinks: 0,
        results: [],
      };

      await storeLinkValidationInKG("doc-section-7", summary1);

      const history = await getLinkValidationHistory("doc-section-7");

      expect(history.length).toBeGreaterThan(0);
      expect(history[0].type).toBe("link_validation");
    });

    it("should return empty array for non-existent doc section", async () => {
      const history = await getLinkValidationHistory("non-existent");

      expect(history).toEqual([]);
    });

    it("should sort history by newest first", async () => {
      // Add two validations with delay to ensure different timestamps
      const summary1 = {
        totalLinks: 5,
        validLinks: 5,
        brokenLinks: 0,
        warningLinks: 0,
        unknownLinks: 0,
        results: [],
      };

      await storeLinkValidationInKG("doc-section-8", summary1);

      // Small delay to ensure different timestamp
      await new Promise((resolve) => setTimeout(resolve, 10));

      const summary2 = {
        totalLinks: 6,
        validLinks: 6,
        brokenLinks: 0,
        warningLinks: 0,
        unknownLinks: 0,
        results: [],
      };

      await storeLinkValidationInKG("doc-section-8", summary2);

      const history = await getLinkValidationHistory("doc-section-8");

      expect(history.length).toBeGreaterThan(1);
      // First item should be newest
      const firstTimestamp = new Date(
        history[0].properties.lastValidated,
      ).getTime();
      const secondTimestamp = new Date(
        history[1].properties.lastValidated,
      ).getTime();
      expect(firstTimestamp).toBeGreaterThanOrEqual(secondTimestamp);
    });
  });
});

```

--------------------------------------------------------------------------------
/tests/memory/manager-advanced.test.ts:
--------------------------------------------------------------------------------

```typescript
/**
 * Tests for uncovered branches in Memory Manager
 * Covers: getRelated (lines 171-202), export (lines 381-398), import (lines 409-415)
 */

import { promises as fs } from "fs";
import path from "path";
import os from "os";
import { MemoryManager } from "../../src/memory/manager.js";
import { MemoryEntry } from "../../src/memory/storage.js";

describe("MemoryManager - Advanced Features Coverage", () => {
  let manager: MemoryManager;
  let tempDir: string;

  beforeEach(async () => {
    tempDir = path.join(
      os.tmpdir(),
      `manager-advanced-test-${Date.now()}-${Math.random()
        .toString(36)
        .substr(2, 9)}`,
    );
    await fs.mkdir(tempDir, { recursive: true });
    manager = new MemoryManager(tempDir);
    await manager.initialize();
  });

  afterEach(async () => {
    try {
      await manager.close();
      await fs.rm(tempDir, { recursive: true, force: true });
    } catch (error) {
      // Ignore cleanup errors
    }
  });

  describe("getRelated - Tag-based Relationships (lines 189-195)", () => {
    it("should find related memories by overlapping tags", async () => {
      // Create entries with overlapping tags
      const entry1 = await manager.remember(
        "analysis",
        { name: "Project A" },
        {
          projectId: "proj-001",
          tags: ["typescript", "react", "frontend"],
        },
      );

      await manager.remember(
        "analysis",
        { name: "Project B" },
        {
          projectId: "proj-002",
          tags: ["typescript", "vue", "frontend"],
        },
      );

      await manager.remember(
        "analysis",
        { name: "Project C" },
        {
          projectId: "proj-003",
          tags: ["python", "backend"],
        },
      );

      // Get related memories for entry1 (should find project B via overlapping tags)
      const related = await manager.getRelated(entry1, 10);

      expect(related.length).toBeGreaterThan(0);

      // Should include Project B (shares typescript and frontend tags)
      const relatedNames = related.map((r) => r.data.name);
      expect(relatedNames).toContain("Project B");

      // Should not include entry1 itself
      expect(relatedNames).not.toContain("Project A");
    });

    it("should find related memories by same type (lines 182-186)", async () => {
      const entry1 = await manager.remember(
        "recommendation",
        { ssg: "jekyll" },
        { projectId: "proj-001" },
      );

      await manager.remember(
        "recommendation",
        { ssg: "hugo" },
        { projectId: "proj-002" },
      );

      await manager.remember(
        "analysis",
        { type: "different" },
        { projectId: "proj-003" },
      );

      const related = await manager.getRelated(entry1, 10);

      // Should find the other recommendation, not the analysis
      expect(related.length).toBeGreaterThan(0);
      const types = related.map((r) => r.type);
      expect(types).toContain("recommendation");
    });

    it("should find related memories by same project (lines 174-179)", async () => {
      manager.setContext({ projectId: "shared-project" });

      const entry1 = await manager.remember(
        "analysis",
        { step: "step1" },
        { projectId: "shared-project" },
      );

      await manager.remember(
        "analysis",
        { step: "step2" },
        { projectId: "shared-project" },
      );

      await manager.remember(
        "analysis",
        { step: "step3" },
        { projectId: "different-project" },
      );

      const related = await manager.getRelated(entry1, 10);

      // Should find step2 from same project
      expect(related.length).toBeGreaterThan(0);
      const projectIds = related.map((r) => r.metadata.projectId);
      expect(projectIds).toContain("shared-project");
    });

    it("should deduplicate and limit related memories (lines 198-202)", async () => {
      const entry1 = await manager.remember(
        "analysis",
        { name: "Entry 1" },
        {
          projectId: "proj-001",
          tags: ["tag1", "tag2"],
        },
      );

      // Create many related entries
      for (let i = 0; i < 20; i++) {
        await manager.remember(
          "analysis",
          { name: `Entry ${i + 2}` },
          {
            projectId: "proj-001",
            tags: i < 10 ? ["tag1"] : ["tag2"],
          },
        );
      }

      // Request limit of 5
      const related = await manager.getRelated(entry1, 5);

      // Should be limited to 5 (deduplicated)
      expect(related.length).toBeLessThanOrEqual(5);

      // Should not include entry1 itself
      const names = related.map((r) => r.data.name);
      expect(names).not.toContain("Entry 1");
    });

    it("should handle entry without tags gracefully (line 189)", async () => {
      const entryNoTags = await manager.remember(
        "analysis",
        { name: "No Tags" },
        { projectId: "proj-001" },
      );

      await manager.remember(
        "analysis",
        { name: "Also No Tags" },
        { projectId: "proj-001" },
      );

      // Should still find related by project
      const related = await manager.getRelated(entryNoTags, 10);
      expect(related.length).toBeGreaterThan(0);
    });

    it("should handle entry with empty tags array (line 189)", async () => {
      const entryEmptyTags = await manager.remember(
        "analysis",
        { name: "Empty Tags" },
        {
          projectId: "proj-001",
          tags: [],
        },
      );

      await manager.remember(
        "analysis",
        { name: "Other Entry" },
        { projectId: "proj-001" },
      );

      const related = await manager.getRelated(entryEmptyTags, 10);
      expect(related.length).toBeGreaterThan(0);
    });
  });

  describe("CSV Export (lines 381-398)", () => {
    it("should export memories as CSV format", async () => {
      manager.setContext({ projectId: "csv-proj-001" });

      await manager.remember(
        "analysis",
        { test: "data1" },
        {
          repository: "github.com/test/repo1",
          ssg: "jekyll",
        },
      );

      manager.setContext({ projectId: "csv-proj-002" });

      await manager.remember(
        "recommendation",
        { test: "data2" },
        {
          repository: "github.com/test/repo2",
          ssg: "hugo",
        },
      );

      // Export as CSV
      const csvData = await manager.export("csv");

      // Verify CSV structure
      expect(csvData).toContain("id,timestamp,type,projectId,repository,ssg");
      expect(csvData).toContain("csv-proj-001");
      expect(csvData).toContain("csv-proj-002");
      expect(csvData).toContain("github.com/test/repo1");
      expect(csvData).toContain("github.com/test/repo2");
      expect(csvData).toContain("jekyll");
      expect(csvData).toContain("hugo");

      // Verify rows are comma-separated
      const lines = csvData.split("\n").filter((l) => l.trim());
      expect(lines.length).toBeGreaterThanOrEqual(3); // header + 2 rows

      // Each line should have the same number of commas
      const headerCommas = (lines[0].match(/,/g) || []).length;
      for (let i = 1; i < lines.length; i++) {
        const rowCommas = (lines[i].match(/,/g) || []).length;
        expect(rowCommas).toBe(headerCommas);
      }
    });

    it("should export memories for specific project only", async () => {
      manager.setContext({ projectId: "project-a" });
      await manager.remember("analysis", { project: "A" }, {});

      manager.setContext({ projectId: "project-b" });
      await manager.remember("analysis", { project: "B" }, {});

      // Export only project-a
      const csvData = await manager.export("csv", "project-a");

      expect(csvData).toContain("project-a");
      expect(csvData).not.toContain("project-b");
    });

    it("should handle missing metadata fields in CSV export (lines 393-395)", async () => {
      // Create entry with minimal metadata
      await manager.remember("analysis", { test: "minimal" }, {});

      const csvData = await manager.export("csv");

      // Should have empty fields for missing metadata
      const lines = csvData.split("\n");
      expect(lines.length).toBeGreaterThan(1);

      // Verify header
      expect(lines[0]).toContain("id,timestamp,type,projectId,repository,ssg");

      // Data row should have appropriate number of commas (empty fields)
      const dataRow = lines[1];
      const headerCommas = (lines[0].match(/,/g) || []).length;
      const dataCommas = (dataRow.match(/,/g) || []).length;
      expect(dataCommas).toBe(headerCommas);
    });

    it("should export as JSON by default", async () => {
      await manager.remember(
        "analysis",
        { json: "test" },
        { projectId: "json-proj" },
      );

      const jsonData = await manager.export("json");

      const parsed = JSON.parse(jsonData);
      expect(Array.isArray(parsed)).toBe(true);
      expect(parsed.length).toBeGreaterThan(0);
      expect(parsed[0].data.json).toBe("test");
    });
  });

  describe("CSV Import (lines 409-428)", () => {
    it("should import memories from CSV format", async () => {
      // Create CSV data
      const csvData = `id,timestamp,type,projectId,repository,ssg
mem-001,2024-01-01T00:00:00.000Z,analysis,proj-csv-001,github.com/test/repo1,jekyll
mem-002,2024-01-02T00:00:00.000Z,recommendation,proj-csv-002,github.com/test/repo2,hugo
mem-003,2024-01-03T00:00:00.000Z,deployment,proj-csv-003,github.com/test/repo3,mkdocs`;

      const imported = await manager.import(csvData, "csv");

      expect(imported).toBe(3);

      // Verify entries were imported
      const recalled1 = await manager.recall("mem-001");
      expect(recalled1).not.toBeNull();
      expect(recalled1?.type).toBe("analysis");
      expect(recalled1?.metadata.projectId).toBe("proj-csv-001");
      expect(recalled1?.metadata.ssg).toBe("jekyll");

      const recalled2 = await manager.recall("mem-002");
      expect(recalled2).not.toBeNull();
      expect(recalled2?.type).toBe("recommendation");
    });

    it("should skip malformed CSV rows (line 414)", async () => {
      // CSV with mismatched column counts
      const csvData = `id,timestamp,type,projectId,repository,ssg
mem-001,2024-01-01T00:00:00.000Z,analysis,proj-001,github.com/test/repo,jekyll
mem-002,2024-01-02T00:00:00.000Z,recommendation
mem-003,2024-01-03T00:00:00.000Z,deployment,proj-003,github.com/test/repo3,mkdocs`;

      const imported = await manager.import(csvData, "csv");

      // Should import 2 (skipping the malformed row)
      expect(imported).toBe(2);

      // Verify valid entries were imported
      const recalled1 = await manager.recall("mem-001");
      expect(recalled1).not.toBeNull();

      // Malformed entry should not be imported
      const recalled2 = await manager.recall("mem-002");
      expect(recalled2).toBeNull();

      const recalled3 = await manager.recall("mem-003");
      expect(recalled3).not.toBeNull();
    });

    it("should import memories from JSON format", async () => {
      const jsonData = JSON.stringify([
        {
          id: "json-001",
          timestamp: "2024-01-01T00:00:00.000Z",
          type: "analysis",
          data: { test: "json-import" },
          metadata: { projectId: "json-proj" },
        },
      ]);

      const imported = await manager.import(jsonData, "json");

      expect(imported).toBe(1);

      const recalled = await manager.recall("json-001");
      expect(recalled).not.toBeNull();
      expect(recalled?.data.test).toBe("json-import");
    });

    it("should emit import-complete event (line 437)", async () => {
      let eventEmitted = false;
      let importedCount = 0;

      manager.on("import-complete", (count) => {
        eventEmitted = true;
        importedCount = count;
      });

      const jsonData = JSON.stringify([
        {
          id: "event-001",
          timestamp: "2024-01-01T00:00:00.000Z",
          type: "analysis",
          data: {},
          metadata: {},
        },
      ]);

      await manager.import(jsonData, "json");

      expect(eventEmitted).toBe(true);
      expect(importedCount).toBe(1);
    });

    it("should handle empty CSV import gracefully", async () => {
      const csvData = `id,timestamp,type,projectId,repository,ssg`;

      const imported = await manager.import(csvData, "csv");

      expect(imported).toBe(0);
    });

    it("should handle empty JSON import gracefully", async () => {
      const jsonData = JSON.stringify([]);

      const imported = await manager.import(jsonData, "json");

      expect(imported).toBe(0);
    });
  });

  describe("Export and Import Round-trip", () => {
    it("should maintain data integrity through CSV round-trip", async () => {
      // Create test data
      manager.setContext({
        projectId: "roundtrip-proj",
        repository: "github.com/test/roundtrip",
      });
      const originalEntry = await manager.remember(
        "analysis",
        { roundtrip: "test" },
        {
          ssg: "docusaurus",
        },
      );

      // Export as CSV
      const csvData = await manager.export("csv");

      // Create new manager and import
      const tempDir2 = path.join(
        os.tmpdir(),
        `manager-roundtrip-${Date.now()}`,
      );
      await fs.mkdir(tempDir2, { recursive: true });
      const manager2 = new MemoryManager(tempDir2);
      await manager2.initialize();

      const imported = await manager2.import(csvData, "csv");
      expect(imported).toBeGreaterThan(0);

      // Verify data matches
      const recalled = await manager2.recall(originalEntry.id);
      expect(recalled).not.toBeNull();
      expect(recalled?.type).toBe(originalEntry.type);
      expect(recalled?.metadata.projectId).toBe(
        originalEntry.metadata.projectId,
      );
      expect(recalled?.metadata.ssg).toBe(originalEntry.metadata.ssg);

      await manager2.close();
      await fs.rm(tempDir2, { recursive: true, force: true });
    });

    it("should maintain data integrity through JSON round-trip", async () => {
      // Create test data with complex structure
      manager.setContext({ projectId: "json-roundtrip" });
      const originalEntry = await manager.remember(
        "analysis",
        {
          complex: "data",
          nested: { value: 123 },
          array: [1, 2, 3],
        },
        {
          tags: ["tag1", "tag2"],
        },
      );

      // Export as JSON
      const jsonData = await manager.export("json");

      // Create new manager and import
      const tempDir2 = path.join(
        os.tmpdir(),
        `manager-json-roundtrip-${Date.now()}`,
      );
      await fs.mkdir(tempDir2, { recursive: true });
      const manager2 = new MemoryManager(tempDir2);
      await manager2.initialize();

      const imported = await manager2.import(jsonData, "json");
      expect(imported).toBeGreaterThan(0);

      // Verify complex data maintained
      const recalled = await manager2.recall(originalEntry.id);
      expect(recalled).not.toBeNull();
      expect(recalled?.data).toEqual(originalEntry.data);
      expect(recalled?.metadata.tags).toEqual(originalEntry.metadata.tags);

      await manager2.close();
      await fs.rm(tempDir2, { recursive: true, force: true });
    });
  });
});

```

--------------------------------------------------------------------------------
/src/tools/evaluate-readme-health.ts:
--------------------------------------------------------------------------------

```typescript
import { z } from "zod";
import { promises as fs } from "fs";
import path from "path";
import { formatMCPResponse } from "../types/api.js";

// Input validation schema
const EvaluateReadmeHealthSchema = z.object({
  readme_path: z.string().min(1, "README path is required"),
  project_type: z
    .enum([
      "community_library",
      "enterprise_tool",
      "personal_project",
      "documentation",
    ])
    .optional()
    .default("community_library"),
  repository_path: z.string().optional(),
});

// Input type that matches what users actually pass (project_type is optional)
export interface EvaluateReadmeHealthInput {
  readme_path: string;
  project_type?:
    | "community_library"
    | "enterprise_tool"
    | "personal_project"
    | "documentation";
  repository_path?: string;
}

// Health score interfaces
interface HealthScoreComponent {
  name: string;
  score: number;
  maxScore: number;
  details: HealthCheckDetail[];
}

interface HealthCheckDetail {
  check: string;
  passed: boolean;
  points: number;
  maxPoints: number;
  recommendation?: string;
}

interface ReadmeHealthReport {
  overallScore: number;
  maxScore: number;
  grade: "A" | "B" | "C" | "D" | "F";
  components: {
    communityHealth: HealthScoreComponent;
    accessibility: HealthScoreComponent;
    onboarding: HealthScoreComponent;
    contentQuality: HealthScoreComponent;
  };
  recommendations: string[];
  strengths: string[];
  criticalIssues: string[];
  estimatedImprovementTime: string;
}

export async function evaluateReadmeHealth(input: EvaluateReadmeHealthInput) {
  const startTime = Date.now();
  try {
    // Validate input
    const validatedInput = EvaluateReadmeHealthSchema.parse(input);

    // Read README file
    const readmePath = path.resolve(validatedInput.readme_path);
    const readmeContent = await fs.readFile(readmePath, "utf-8");

    // Get repository context if available
    let repoContext: any = null;
    if (validatedInput.repository_path) {
      repoContext = await analyzeRepositoryContext(
        validatedInput.repository_path,
      );
    }

    // Evaluate all health components
    const communityHealth = evaluateCommunityHealth(readmeContent, repoContext);
    const accessibility = evaluateAccessibility(readmeContent);
    const onboarding = evaluateOnboarding(
      readmeContent,
      validatedInput.project_type,
    );
    const contentQuality = evaluateContentQuality(readmeContent);

    // Calculate overall score
    const totalScore =
      communityHealth.score +
      accessibility.score +
      onboarding.score +
      contentQuality.score;
    const maxTotalScore =
      communityHealth.maxScore +
      accessibility.maxScore +
      onboarding.maxScore +
      contentQuality.maxScore;
    const percentage = (totalScore / maxTotalScore) * 100;

    // Generate grade
    const grade = getGrade(percentage);

    // Generate recommendations and insights
    const recommendations = generateHealthRecommendations(
      [communityHealth, accessibility, onboarding, contentQuality],
      "general",
    );
    const strengths = identifyStrengths([
      communityHealth,
      accessibility,
      onboarding,
      contentQuality,
    ]);
    const criticalIssues = identifyCriticalIssues([
      communityHealth,
      accessibility,
      onboarding,
      contentQuality,
    ]);

    const report: ReadmeHealthReport = {
      overallScore: Math.round(percentage),
      maxScore: 100,
      grade,
      components: {
        communityHealth,
        accessibility,
        onboarding,
        contentQuality,
      },
      recommendations,
      strengths,
      criticalIssues,
      estimatedImprovementTime: estimateImprovementTime(
        recommendations.length,
        criticalIssues.length,
      ),
    };

    const response = {
      readmePath: validatedInput.readme_path,
      projectType: validatedInput.project_type,
      healthReport: report,
      summary: generateSummary(report),
      nextSteps: generateNextSteps(report),
    };

    return formatMCPResponse({
      success: true,
      data: response,
      metadata: {
        toolVersion: "1.0.0",
        executionTime: Date.now() - startTime,
        timestamp: new Date().toISOString(),
      },
    });
  } catch (error) {
    return formatMCPResponse({
      success: false,
      error: {
        code: "README_HEALTH_EVALUATION_FAILED",
        message: `Failed to evaluate README health: ${error}`,
        resolution: "Ensure README path is valid and file is readable",
      },
      metadata: {
        toolVersion: "1.0.0",
        executionTime: Date.now() - startTime,
        timestamp: new Date().toISOString(),
      },
    });
  }
}

function evaluateCommunityHealth(
  content: string,
  _repoContext: any,
): HealthScoreComponent {
  const checks: HealthCheckDetail[] = [
    {
      check: "Code of Conduct linked",
      passed: /code.of.conduct|conduct\.md|\.github\/code_of_conduct/i.test(
        content,
      ),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Add a link to your Code of Conduct to establish community standards",
    },
    {
      check: "Contributing guidelines visible",
      passed: /contributing|contribute\.md|\.github\/contributing/i.test(
        content,
      ),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Include contributing guidelines to help new contributors get started",
    },
    {
      check: "Issue/PR templates mentioned",
      passed:
        /issue.template|pull.request.template|\.github\/issue_template|\.github\/pull_request_template/i.test(
          content,
        ),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Reference issue and PR templates to streamline contributions",
    },
    {
      check: "Security policy linked",
      passed: /security\.md|security.policy|\.github\/security/i.test(content),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Add a security policy to handle vulnerability reports responsibly",
    },
    {
      check: "Support channels provided",
      passed: /support|help|discord|slack|discussions|forum|community/i.test(
        content,
      ),
      points: 0,
      maxPoints: 5,
      recommendation: "Provide clear support channels for users seeking help",
    },
  ];

  // Award points for passed checks
  checks.forEach((check) => {
    if (check.passed) {
      check.points = check.maxPoints;
    }
  });

  const totalScore = checks.reduce((sum, check) => sum + check.points, 0);
  const maxScore = checks.reduce((sum, check) => sum + check.maxPoints, 0);

  return {
    name: "Community Health",
    score: totalScore,
    maxScore,
    details: checks,
  };
}

function evaluateAccessibility(content: string): HealthScoreComponent {
  const lines = content.split("\n");
  const headings = lines.filter((line) => line.trim().startsWith("#"));
  const images = content.match(/!\[.*?\]\(.*?\)/g) || [];

  const checks: HealthCheckDetail[] = [
    {
      check: "Scannable structure with proper spacing",
      passed: content.includes("\n\n") && lines.length > 10,
      points: 0,
      maxPoints: 5,
      recommendation: "Use proper spacing and breaks to make content scannable",
    },
    {
      check: "Clear heading hierarchy",
      passed: headings.length >= 3 && headings.some((h) => h.startsWith("##")),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Use proper heading hierarchy (H1, H2, H3) to structure content",
    },
    {
      check: "Alt text for images",
      passed:
        images.length === 0 || images.every((img) => !img.includes("![](")),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Add descriptive alt text for all images for screen readers",
    },
    {
      check: "Inclusive language",
      passed: !/\b(guys|blacklist|whitelist|master|slave)\b/i.test(content),
      points: 0,
      maxPoints: 5,
      recommendation:
        'Use inclusive language (e.g., "team" instead of "guys", "allowlist/blocklist")',
    },
  ];

  // Award points for passed checks
  checks.forEach((check) => {
    if (check.passed) {
      check.points = check.maxPoints;
    }
  });

  const totalScore = checks.reduce((sum, check) => sum + check.points, 0);
  const maxScore = checks.reduce((sum, check) => sum + check.maxPoints, 0);

  return {
    name: "Accessibility",
    score: totalScore,
    maxScore,
    details: checks,
  };
}

function evaluateOnboarding(
  content: string,
  _projectType: string,
): HealthScoreComponent {
  const checks: HealthCheckDetail[] = [
    {
      check: "Quick start section",
      passed: /quick.start|getting.started|installation|setup/i.test(content),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Add a quick start section to help users get up and running fast",
    },
    {
      check: "Prerequisites clearly listed",
      passed: /prerequisites|requirements|dependencies|before.you.begin/i.test(
        content,
      ),
      points: 0,
      maxPoints: 5,
      recommendation: "Clearly list all prerequisites and system requirements",
    },
    {
      check: "First contribution guide",
      passed: /first.contribution|new.contributor|beginner|newcomer/i.test(
        content,
      ),
      points: 0,
      maxPoints: 5,
      recommendation:
        "Include guidance specifically for first-time contributors",
    },
    {
      check: "Good first issues mentioned",
      passed: /good.first.issue|beginner.friendly|easy.pick|help.wanted/i.test(
        content,
      ),
      points: 0,
      maxPoints: 5,
      recommendation: "Mention good first issues or beginner-friendly tasks",
    },
  ];

  // Award points for passed checks
  checks.forEach((check) => {
    if (check.passed) {
      check.points = check.maxPoints;
    }
  });

  const totalScore = checks.reduce((sum, check) => sum + check.points, 0);
  const maxScore = checks.reduce((sum, check) => sum + check.maxPoints, 0);

  return {
    name: "Onboarding",
    score: totalScore,
    maxScore,
    details: checks,
  };
}

function evaluateContentQuality(content: string): HealthScoreComponent {
  const wordCount = content.split(/\s+/).length;
  const codeBlocks = (content.match(/```/g) || []).length / 2;
  const links = (content.match(/\[.*?\]\(.*?\)/g) || []).length;

  const checks: HealthCheckDetail[] = [
    {
      check: "Adequate content length",
      passed: wordCount >= 50 && wordCount <= 2000,
      points: 0,
      maxPoints: 5,
      recommendation:
        "Maintain optimal README length (50-2000 words) for readability",
    },
    {
      check: "Code examples provided",
      passed: codeBlocks >= 2,
      points: 0,
      maxPoints: 5,
      recommendation: "Include practical code examples to demonstrate usage",
    },
    {
      check: "External links present",
      passed: links >= 3,
      points: 0,
      maxPoints: 5,
      recommendation:
        "Add relevant external links (docs, demos, related projects)",
    },
    {
      check: "Project description clarity",
      passed: /## |### /.test(content) && content.length > 500,
      points: 0,
      maxPoints: 5,
      recommendation:
        "Provide clear, detailed project description with proper structure",
    },
  ];

  // Award points for passed checks
  checks.forEach((check) => {
    if (check.passed) {
      check.points = check.maxPoints;
    }
  });

  const totalScore = checks.reduce((sum, check) => sum + check.points, 0);
  const maxScore = checks.reduce((sum, check) => sum + check.maxPoints, 0);

  return {
    name: "Content Quality",
    score: totalScore,
    maxScore,
    details: checks,
  };
}

async function analyzeRepositoryContext(repoPath: string): Promise<any> {
  try {
    const repoDir = path.resolve(repoPath);
    const files = await fs.readdir(repoDir);

    return {
      hasCodeOfConduct: files.includes("CODE_OF_CONDUCT.md"),
      hasContributing: files.includes("CONTRIBUTING.md"),
      hasSecurityPolicy: files.includes("SECURITY.md"),
      hasGithubDir: files.includes(".github"),
      packageJson: files.includes("package.json"),
    };
  } catch (error) {
    return null;
  }
}

function getGrade(percentage: number): "A" | "B" | "C" | "D" | "F" {
  if (percentage >= 90) return "A";
  if (percentage >= 80) return "B";
  if (percentage >= 70) return "C";
  if (percentage >= 60) return "D";
  return "F";
}

function generateHealthRecommendations(
  analysis: any[],
  _projectType: string,
): string[] {
  const recommendations: string[] = [];

  analysis.forEach((component: any) => {
    component.details.forEach((detail: any) => {
      if (detail.points < detail.maxPoints) {
        recommendations.push(`${component.name}: ${detail.recommendation}`);
      }
    });
  });

  return recommendations.slice(0, 10); // Top 10 recommendations
}

function identifyStrengths(components: HealthScoreComponent[]): string[] {
  const strengths: string[] = [];

  components.forEach((component) => {
    const passedChecks = component.details.filter((detail) => detail.passed);
    if (passedChecks.length > component.details.length / 2) {
      strengths.push(
        `Strong ${component.name.toLowerCase()}: ${passedChecks
          .map((c) => c.check.toLowerCase())
          .join(", ")}`,
      );
    }
  });

  return strengths;
}

function identifyCriticalIssues(components: HealthScoreComponent[]): string[] {
  const critical: string[] = [];

  components.forEach((component) => {
    if (component.score < component.maxScore * 0.3) {
      // Less than 30% score
      critical.push(
        `Critical: Poor ${component.name.toLowerCase()} (${component.score}/${
          component.maxScore
        } points)`,
      );
    }
  });

  return critical;
}

function estimateImprovementTime(
  recommendationCount: number,
  criticalCount: number,
): string {
  const baseTime = recommendationCount * 15; // 15 minutes per recommendation
  const criticalTime = criticalCount * 30; // 30 minutes per critical issue
  const totalMinutes = baseTime + criticalTime;

  if (totalMinutes < 60) return `${totalMinutes} minutes`;
  if (totalMinutes < 480) return `${Math.round(totalMinutes / 60)} hours`;
  return `${Math.round(totalMinutes / 480)} days`;
}

function generateSummary(report: ReadmeHealthReport): string {
  const { overallScore, grade, components } = report;

  const componentScores = Object.values(components)
    .map((c) => `${c.name}: ${c.score}/${c.maxScore}`)
    .join(", ");

  return `README Health Score: ${overallScore}/100 (Grade ${grade}). Component breakdown: ${componentScores}. ${report.criticalIssues.length} critical issues identified.`;
}

function generateNextSteps(report: ReadmeHealthReport): string[] {
  const steps: string[] = [];

  if (report.criticalIssues.length > 0) {
    steps.push(
      "Address critical issues first to establish baseline community health",
    );
  }

  if (report.recommendations.length > 0) {
    steps.push(
      `Implement top ${Math.min(
        3,
        report.recommendations.length,
      )} recommendations for quick wins`,
    );
  }

  if (report.overallScore < 85) {
    steps.push("Target 85+ health score for optimal community engagement");
  }

  steps.push("Re-evaluate after improvements to track progress");

  return steps;
}

```
Page 6/20FirstPrevNextLast