#
tokens: 49331/50000 14/307 files (page 5/33)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 5 of 33. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── ARCHITECTURAL_CHANGES_SUMMARY.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── adr-0001-mcp-server-architecture.md
│   │   ├── adr-0002-repository-analysis-engine.md
│   │   ├── adr-0003-static-site-generator-recommendation-engine.md
│   │   ├── adr-0004-diataxis-framework-integration.md
│   │   ├── adr-0005-github-pages-deployment-automation.md
│   │   ├── adr-0006-mcp-tools-api-design.md
│   │   ├── adr-0007-mcp-prompts-and-resources-integration.md
│   │   ├── adr-0008-intelligent-content-population-engine.md
│   │   ├── adr-0009-content-accuracy-validation-framework.md
│   │   ├── adr-0010-mcp-resource-pattern-redesign.md
│   │   ├── adr-0011-ce-mcp-compatibility.md
│   │   ├── adr-0012-priority-scoring-system-for-documentation-drift.md
│   │   ├── adr-0013-release-pipeline-and-package-distribution.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── CE-MCP-FINDINGS.md
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── change-watcher.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── drift-priority-scoring.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── llm-integration.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── ISSUE_IMPLEMENTATION_SUMMARY.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── change-watcher.ts
│   │   ├── check-documentation-links.ts
│   │   ├── cleanup-agent-artifacts.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── simulate-execution.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── artifact-detector.ts
│   │   ├── ast-analyzer.ts
│   │   ├── change-watcher.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── execution-simulator.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── llm-client.ts
│   │   ├── permission-checker.ts
│   │   ├── semantic-analyzer.ts
│   │   ├── sitemap-generator.ts
│   │   ├── usage-metadata.ts
│   │   └── user-feedback-integration.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── call-graph-builder.test.ts
│   ├── change-watcher-priority.integration.test.ts
│   ├── change-watcher.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── execution-simulator.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-documentation-examples.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas-documentation-examples.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── cleanup-agent-artifacts.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── artifact-detector.test.ts
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector-diataxis.test.ts
│       ├── drift-detector-priority.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       ├── llm-client.test.ts
│       ├── semantic-analyzer.test.ts
│       ├── sitemap-generator.test.ts
│       ├── usage-metadata.test.ts
│       └── user-feedback-integration.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/docs/how-to/drift-priority-scoring.md:
--------------------------------------------------------------------------------

```markdown
  1 | # Prioritizing Documentation Drift Updates
  2 | 
  3 | This guide explains how to use the priority scoring system to identify and triage documentation drift based on importance and urgency.
  4 | 
  5 | ## Overview
  6 | 
  7 | The priority scoring system ranks documentation drift issues by importance, helping teams focus on the most critical documentation updates first. Each drift detection result receives a comprehensive priority score based on multiple factors.
  8 | 
  9 | ## Priority Levels
 10 | 
 11 | | Level        | Score Range | SLA                    | Description                                             |
 12 | | ------------ | ----------- | ---------------------- | ------------------------------------------------------- |
 13 | | **Critical** | 80-100      | Update immediately     | Breaking changes in heavily-used APIs with complex code |
 14 | | **High**     | 60-79       | Update within 1 day    | Major changes affecting documented features             |
 15 | | **Medium**   | 40-59       | Update within 1 week   | Multiple minor changes or stale documentation           |
 16 | | **Low**      | 0-39        | Update when convenient | Patch-level changes or well-documented code             |
 17 | 
 18 | ## Scoring Factors
 19 | 
 20 | The priority score (0-100) is calculated using six weighted factors:
 21 | 
 22 | ### 1. Code Complexity (20% weight)
 23 | 
 24 | Measures how complex the changed code is:
 25 | 
 26 | - Uses AST analysis complexity metrics
 27 | - Higher complexity = higher priority
 28 | - Adjusted by drift severity (critical/high/medium/low)
 29 | 
 30 | **Example**: A complex algorithm change scores higher than a simple utility function change.
 31 | 
 32 | ### 2. Usage Frequency (25% weight)
 33 | 
 34 | Estimates how often the code is used:
 35 | 
 36 | - Based on export count (public APIs)
 37 | - Documentation references
 38 | - Optional: Actual usage metrics (function calls, imports)
 39 | 
 40 | **Example**: A widely-imported authentication function scores higher than an internal helper.
 41 | 
 42 | ### 3. Change Magnitude (25% weight)
 43 | 
 44 | Evaluates the size and impact of changes:
 45 | 
 46 | - Breaking changes: 100 (maximum priority)
 47 | - Major changes: 20 points each
 48 | - Minor changes: 8 points each
 49 | 
 50 | **Example**: Changing a function signature (breaking) scores 100, while adding a parameter with default (major) scores 20.
 51 | 
 52 | ### 4. Documentation Coverage (15% weight)
 53 | 
 54 | Inverted scoring - lower coverage = higher priority:
 55 | 
 56 | - Missing documentation: 90
 57 | - Partially documented: 40-80 (based on coverage ratio)
 58 | - Well documented: 0-40
 59 | 
 60 | **Example**: Undocumented new features score higher than changes to well-documented APIs.
 61 | 
 62 | ### 5. Staleness (10% weight)
 63 | 
 64 | Based on how long since documentation was last updated:
 65 | 
 66 | - 90+ days old: 100
 67 | - 30-90 days: 80
 68 | - 14-30 days: 60
 69 | - 7-14 days: 40
 70 | - Less than 7 days: 20
 71 | 
 72 | **Example**: Documentation untouched for 3 months scores higher than recently updated docs.
 73 | 
 74 | ### 6. User Feedback (5% weight)
 75 | 
 76 | Future integration with issue tracking:
 77 | 
 78 | - Currently returns 0 (placeholder)
 79 | - Will incorporate reported documentation issues
 80 | - User complaints increase priority
 81 | 
 82 | ## Basic Usage
 83 | 
 84 | ### Detect Drift with Priority Scores
 85 | 
 86 | ```typescript
 87 | import { DriftDetector } from "./utils/drift-detector.js";
 88 | 
 89 | const detector = new DriftDetector(projectPath);
 90 | await detector.initialize();
 91 | 
 92 | // Create snapshots
 93 | const oldSnapshot = await detector.loadLatestSnapshot();
 94 | const newSnapshot = await detector.createSnapshot(projectPath, docsPath);
 95 | 
 96 | // Detect drift with priority scoring
 97 | const results = await detector.detectDriftWithPriority(
 98 |   oldSnapshot,
 99 |   newSnapshot,
100 | );
101 | 
102 | for (const result of results) {
103 |   console.log(`File: ${result.filePath}`);
104 |   console.log(`Priority: ${result.priorityScore.recommendation}`);
105 |   console.log(`Score: ${result.priorityScore.overall}/100`);
106 |   console.log(`Action: ${result.priorityScore.suggestedAction}`);
107 | }
108 | ```
109 | 
110 | ### Get Prioritized Results
111 | 
112 | Results sorted by priority (highest first):
113 | 
114 | ```typescript
115 | const prioritizedResults = await detector.getPrioritizedDriftResults(
116 |   oldSnapshot,
117 |   newSnapshot,
118 | );
119 | 
120 | // Handle critical issues first
121 | const critical = prioritizedResults.filter(
122 |   (r) => r.priorityScore?.recommendation === "critical",
123 | );
124 | 
125 | for (const result of critical) {
126 |   console.log(`URGENT: ${result.filePath}`);
127 |   console.log(`Breaking changes: ${result.impactAnalysis.breakingChanges}`);
128 | }
129 | ```
130 | 
131 | ## Advanced Configuration
132 | 
133 | ### Custom Weights
134 | 
135 | Adjust scoring weights based on your team's priorities:
136 | 
137 | ```typescript
138 | const detector = new DriftDetector(projectPath);
139 | 
140 | // Emphasize change magnitude and usage frequency
141 | detector.setCustomWeights({
142 |   changeMagnitude: 0.35, // Increased from 0.25
143 |   usageFrequency: 0.3, // Increased from 0.25
144 |   codeComplexity: 0.15, // Decreased from 0.20
145 |   documentationCoverage: 0.1,
146 |   staleness: 0.08,
147 |   userFeedback: 0.02,
148 | });
149 | ```
150 | 
151 | **Note**: Weights are applied as-is in the weighted sum calculation. The default weights sum to 1.0, but custom weights don't need to. Partial updates merge with defaults.
152 | 
153 | ### Usage Metadata
154 | 
155 | Provide actual usage metrics for more accurate scoring:
156 | 
157 | ```typescript
158 | import { UsageMetadata } from "./utils/drift-detector.js";
159 | 
160 | const usageMetadata: UsageMetadata = {
161 |   filePath: "/src/api/auth.ts",
162 |   functionCalls: new Map([
163 |     ["authenticate", 1500], // Called 1500 times
164 |     ["validateToken", 800],
165 |   ]),
166 |   classInstantiations: new Map([["AuthManager", 50]]),
167 |   imports: new Map([
168 |     ["authenticate", 25], // Imported by 25 files
169 |   ]),
170 | };
171 | 
172 | const results = await detector.detectDriftWithPriority(
173 |   oldSnapshot,
174 |   newSnapshot,
175 |   usageMetadata,
176 | );
177 | ```
178 | 
179 | ## Integration Examples
180 | 
181 | ### CI/CD Pipeline
182 | 
183 | Fail builds for critical drift:
184 | 
185 | ```typescript
186 | const results = await detector.getPrioritizedDriftResults(
187 |   oldSnapshot,
188 |   newSnapshot,
189 | );
190 | 
191 | const criticalCount = results.filter(
192 |   (r) => r.priorityScore?.recommendation === "critical",
193 | ).length;
194 | 
195 | if (criticalCount > 0) {
196 |   console.error(`❌ ${criticalCount} critical documentation drift issues`);
197 |   process.exit(1);
198 | }
199 | ```
200 | 
201 | ### Task Management Integration
202 | 
203 | Export to GitHub Issues or Jira:
204 | 
205 | ```typescript
206 | for (const result of prioritizedResults) {
207 |   const score = result.priorityScore;
208 | 
209 |   await createIssue({
210 |     title: `[${score.recommendation.toUpperCase()}] Update docs for ${
211 |       result.filePath
212 |     }`,
213 |     body: `
214 | ## Priority Score: ${score.overall}/100
215 | 
216 | ${score.suggestedAction}
217 | 
218 | ### Factors:
219 | - Code Complexity: ${score.factors.codeComplexity}
220 | - Usage Frequency: ${score.factors.usageFrequency}
221 | - Change Magnitude: ${score.factors.changeMagnitude}
222 | - Coverage: ${score.factors.documentationCoverage}
223 | - Staleness: ${score.factors.staleness}
224 | 
225 | ### Impact:
226 | - Breaking: ${result.impactAnalysis.breakingChanges}
227 | - Major: ${result.impactAnalysis.majorChanges}
228 | - Minor: ${result.impactAnalysis.minorChanges}
229 |     `.trim(),
230 |     labels: [score.recommendation, "documentation", "drift"],
231 |     priority: score.recommendation,
232 |   });
233 | }
234 | ```
235 | 
236 | ### Dashboard Visualization
237 | 
238 | Group by priority level:
239 | 
240 | ```typescript
241 | const byPriority = {
242 |   critical: results.filter(
243 |     (r) => r.priorityScore?.recommendation === "critical",
244 |   ),
245 |   high: results.filter((r) => r.priorityScore?.recommendation === "high"),
246 |   medium: results.filter((r) => r.priorityScore?.recommendation === "medium"),
247 |   low: results.filter((r) => r.priorityScore?.recommendation === "low"),
248 | };
249 | 
250 | console.log(`
251 | 📊 Documentation Drift Summary
252 | ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
253 | 🔴 Critical: ${byPriority.critical.length} (update immediately)
254 | 🟠 High:     ${byPriority.high.length} (update within 1 day)
255 | 🟡 Medium:   ${byPriority.medium.length} (update within 1 week)
256 | 🟢 Low:      ${byPriority.low.length} (update when convenient)
257 | `);
258 | ```
259 | 
260 | ## Best Practices
261 | 
262 | ### 1. Regular Scanning
263 | 
264 | Run drift detection regularly to catch issues early:
265 | 
266 | ```bash
267 | # Daily CI job
268 | npm run drift:detect --priority-threshold=high
269 | ```
270 | 
271 | ### 2. Triage Workflow
272 | 
273 | 1. **Critical (immediate)**: Assign to on-call developer
274 | 2. **High (1 day)**: Add to current sprint
275 | 3. **Medium (1 week)**: Add to backlog
276 | 4. **Low (when convenient)**: Batch with other low-priority updates
277 | 
278 | ### 3. Custom Weights by Project
279 | 
280 | Different projects have different priorities:
281 | 
282 | **API Library**:
283 | 
284 | - High weight on usage frequency (30%)
285 | - High weight on breaking changes (30%)
286 | 
287 | **Internal Tool**:
288 | 
289 | - Lower weight on usage frequency (15%)
290 | - Higher weight on complexity (25%)
291 | 
292 | ### 4. Monitor Trends
293 | 
294 | Track priority scores over time:
295 | 
296 | ```typescript
297 | // Store scores in time series database
298 | const metrics = {
299 |   timestamp: new Date(),
300 |   criticalCount: byPriority.critical.length,
301 |   averageScore:
302 |     results.reduce((sum, r) => sum + r.priorityScore.overall, 0) /
303 |     results.length,
304 |   totalDrift: results.length,
305 | };
306 | 
307 | await metricsDB.insert(metrics);
308 | ```
309 | 
310 | ## Troubleshooting
311 | 
312 | ### Scores Seem Too High/Low
313 | 
314 | **Problem**: All drift scores high priority, or all low priority.
315 | 
316 | **Solutions**:
317 | 
318 | 1. Adjust custom weights for your context
319 | 2. Verify snapshot data is accurate
320 | 3. Check if usage metadata is available
321 | 4. Review complexity calculations
322 | 
323 | ### Missing Documentation Dominates
324 | 
325 | **Problem**: Missing docs always score 90, drowning out other issues.
326 | 
327 | **Solutions**:
328 | 
329 | 1. Lower documentationCoverage weight
330 | 2. Focus on documented code first
331 | 3. Use filters: `results.filter(r => r.impactAnalysis.affectedDocFiles.length > 0)`
332 | 
333 | ### Breaking Changes Not Prioritized
334 | 
335 | **Problem**: Breaking changes should be critical but aren't.
336 | 
337 | **Solutions**:
338 | 
339 | 1. Increase changeMagnitude weight
340 | 2. Verify impact analysis is detecting breaking changes correctly
341 | 3. Check if other factors (low complexity, no docs) are pulling down score
342 | 
343 | ## Related Documentation
344 | 
345 | - [Drift Detection System](./repository-analysis.md#drift-detection)
346 | - [AST-Based Analysis](../explanation/ast-analysis.md)
347 | - [CI/CD Integration](./github-pages-deployment.md)
348 | 
349 | ## API Reference
350 | 
351 | See [DriftDetector API Reference](../reference/drift-detector.md) for complete method documentation.
352 | 
```

--------------------------------------------------------------------------------
/docs/adrs/adr-0001-mcp-server-architecture.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | id: adr-1-mcp-server-architecture
  3 | title: "ADR-001: MCP Server Architecture using TypeScript SDK"
  4 | sidebar_label: "ADR-001: MCP Server Architecture"
  5 | sidebar_position: 1
  6 | documcp:
  7 |   last_updated: "2025-11-20T00:46:21.934Z"
  8 |   last_validated: "2025-12-09T19:41:38.566Z"
  9 |   auto_updated: false
 10 |   update_frequency: monthly
 11 |   validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
 12 | ---
 13 | 
 14 | # ADR-001: MCP Server Architecture using TypeScript SDK
 15 | 
 16 | ## Status
 17 | 
 18 | Accepted
 19 | 
 20 | ## Context
 21 | 
 22 | DocuMCP requires a robust server architecture that can integrate seamlessly with development environments like GitHub Copilot, Claude Desktop, and other MCP-enabled tools. The server needs to provide intelligent repository analysis, static site generator recommendations, and automated documentation deployment workflows.
 23 | 
 24 | Key requirements:
 25 | 
 26 | - Standards-compliant MCP protocol implementation
 27 | - Stateless operation for consistency and reliability
 28 | - Modular design separating concerns
 29 | - Integration with existing developer workflows
 30 | - Scalable architecture supporting complex multi-step operations
 31 | 
 32 | ## Decision
 33 | 
 34 | We will implement the DocuMCP server using the TypeScript Model Context Protocol SDK, following a modular, stateless architecture pattern.
 35 | 
 36 | ### Core Architectural Components:
 37 | 
 38 | 1. **MCP Server Foundation**: TypeScript-based implementation using official MCP SDK
 39 | 2. **Repository Analysis Engine**: Multi-layered analysis of project characteristics
 40 | 3. **Static Site Generator Recommendation Engine**: Algorithmic decision framework
 41 | 4. **File Generation and Template System**: Template-based configuration generation
 42 | 5. **GitHub Integration Layer**: Automated deployment orchestration
 43 | 
 44 | ### Design Principles:
 45 | 
 46 | - **Stateless Operation**: Each invocation analyzes current repository state
 47 | - **Modular Design**: Clear separation between analysis, recommendation, generation, and deployment
 48 | - **Standards Compliance**: Full adherence to MCP specification requirements
 49 | - **Session Context**: Temporary context preservation within single sessions for complex workflows
 50 | 
 51 | ## Alternatives Considered
 52 | 
 53 | ### Python-based Implementation
 54 | 
 55 | - **Pros**: Rich ecosystem for NLP and analysis, familiar to many developers
 56 | - **Cons**: Less mature MCP SDK, deployment complexity, slower startup times
 57 | - **Decision**: Rejected due to MCP ecosystem maturity in TypeScript
 58 | 
 59 | ### Go-based Implementation
 60 | 
 61 | - **Pros**: High performance, excellent concurrency, small binary size
 62 | - **Cons**: Limited MCP SDK support, smaller ecosystem for documentation tools
 63 | - **Decision**: Rejected due to limited MCP tooling and development velocity concerns
 64 | 
 65 | ### Stateful Server with Database
 66 | 
 67 | - **Pros**: Could cache analysis results, maintain user preferences
 68 | - **Cons**: Deployment complexity, synchronization issues, potential staleness
 69 | - **Decision**: Rejected to maintain simplicity and ensure consistency
 70 | 
 71 | ## Consequences
 72 | 
 73 | ### Positive
 74 | 
 75 | - **Developer Familiarity**: TypeScript is widely known in the target developer community
 76 | - **MCP Ecosystem**: Mature tooling and extensive documentation available
 77 | - **Rapid Development**: Rich ecosystem accelerates feature development
 78 | - **Integration**: Seamless integration with existing JavaScript/TypeScript tooling
 79 | - **Consistency**: Stateless design eliminates synchronization issues
 80 | - **Reliability**: Reduces complexity and potential failure modes
 81 | 
 82 | ### Negative
 83 | 
 84 | - **Runtime Overhead**: Node.js runtime may have higher memory usage than compiled alternatives
 85 | - **Startup Time**: Node.js startup may be slower than Go or Rust alternatives
 86 | - **Dependency Management**: npm ecosystem can introduce supply chain complexity
 87 | 
 88 | ### Risks and Mitigations
 89 | 
 90 | - **Supply Chain Security**: Use npm audit and dependency scanning in CI/CD
 91 | - **Performance**: Implement intelligent caching and optimize hot paths
 92 | - **Memory Usage**: Monitor and optimize memory allocation patterns
 93 | 
 94 | ## Implementation Details
 95 | 
 96 | ### Project Structure
 97 | 
 98 | ```
 99 | src/
100 | ├── server/           # MCP server implementation
101 | ├── analysis/         # Repository analysis engine
102 | ├── recommendation/   # SSG recommendation logic
103 | ├── generation/       # File and template generation
104 | ├── deployment/       # GitHub integration
105 | └── types/           # TypeScript type definitions
106 | ```
107 | 
108 | ### Key Dependencies
109 | 
110 | - `@modelcontextprotocol/typescript-sdk`: MCP protocol implementation
111 | - `typescript`: Type safety and development experience
112 | - `zod`: Runtime type validation for MCP tools
113 | - `yaml`: Configuration file parsing and generation
114 | - `mustache`: Template rendering engine
115 | - `simple-git`: Git repository interaction
116 | 
117 | ### Error Handling Strategy
118 | 
119 | - Comprehensive input validation using Zod schemas
120 | - Structured error responses with actionable guidance
121 | - Graceful degradation for partial analysis failures
122 | - Detailed logging for debugging and monitoring
123 | 
124 | ## Compliance and Standards
125 | 
126 | - Full MCP specification compliance for protocol interactions
127 | - JSON-RPC message handling with proper error codes
128 | - Standardized tool parameter validation and responses
129 | - Security best practices for file system access and Git operations
130 | 
131 | ## Research Integration (2025-01-14)
132 | 
133 | ### Performance Validation
134 | 
135 | **Research Findings Incorporated**: Comprehensive analysis validates our architectural decisions:
136 | 
137 | 1. **TypeScript MCP SDK Performance**:
138 | 
139 |    - ✅ JSON-RPC 2.0 protocol provides minimal communication overhead
140 |    - ✅ Native WebSocket/stdio transport layers optimize performance
141 |    - ✅ Type safety adds compile-time benefits without runtime performance cost
142 | 
143 | 2. **Node.js Memory Optimization** (Critical for Repository Analysis):
144 |    - **Streaming Implementation**: 10x memory reduction for files >100MB
145 |    - **Worker Thread Pool**: 3-4x performance improvement for parallel processing
146 |    - **Memory-Mapped Files**: 5x speed improvement for large directory traversal
147 | 
148 | ### Updated Implementation Strategy
149 | 
150 | Based on research validation, the architecture will implement:
151 | 
152 | ```typescript
153 | // Enhanced streaming approach for large repositories
154 | class RepositoryAnalyzer {
155 |   private workerPool: WorkerPool;
156 |   private streamThreshold = 10 * 1024 * 1024; // 10MB
157 | 
158 |   async analyzeRepository(repoPath: string): Promise<AnalysisResult> {
159 |     try {
160 |       const files = await this.scanDirectory(repoPath);
161 | 
162 |       // Parallel processing with worker threads
163 |       const chunks = this.chunkFiles(files, this.workerPool.size);
164 |       const results = await Promise.allSettled(
165 |         chunks.map((chunk) => this.workerPool.execute("analyzeChunk", chunk)),
166 |       );
167 | 
168 |       // Handle partial failures gracefully
169 |       const successfulResults = results
170 |         .filter(
171 |           (result): result is PromiseFulfilledResult<any> =>
172 |             result.status === "fulfilled",
173 |         )
174 |         .map((result) => result.value);
175 | 
176 |       if (successfulResults.length === 0) {
177 |         throw new Error("All analysis chunks failed");
178 |       }
179 | 
180 |       return this.aggregateResults(successfulResults);
181 |     } catch (error) {
182 |       throw new Error(`Repository analysis failed: ${error.message}`);
183 |     }
184 |   }
185 | 
186 |   private async analyzeFile(filePath: string): Promise<FileAnalysis> {
187 |     try {
188 |       const stats = await fs.stat(filePath);
189 | 
190 |       // Use streaming for large files
191 |       if (stats.size > this.streamThreshold) {
192 |         return await this.analyzeFileStream(filePath);
193 |       }
194 | 
195 |       return await this.analyzeFileStandard(filePath);
196 |     } catch (error) {
197 |       throw new Error(`File analysis failed for ${filePath}: ${error.message}`);
198 |     }
199 |   }
200 | }
201 | ```
202 | 
203 | ### Performance Benchmarks
204 | 
205 | Research-validated performance targets:
206 | 
207 | - **Small Repositories** (&lt;100 files): &lt;1 second analysis time
208 | - **Medium Repositories** (100-1000 files): &lt;10 seconds analysis time
209 | - **Large Repositories** (1000+ files): &lt;60 seconds analysis time
210 | - **Memory Usage**: Constant memory profile regardless of repository size
211 | 
212 | ## Code Execution with MCP (CE-MCP) Compatibility (2025-12-09)
213 | 
214 | ### Validation of Architectural Decisions
215 | 
216 | **Research Findings**: The emergence of Code Execution with MCP (CE-MCP) / Code Mode validates our architectural decisions:
217 | 
218 | 1. **Stateless Design is Optimal**: Our stateless operation model (see Design Principles above) is perfect for Code Mode workflows where clients orchestrate tools through generated code
219 | 2. **Tool-Based Architecture**: The modular tool design enables seamless code generation and orchestration by CE-MCP clients
220 | 3. **Zero Migration Required**: documcp is already fully compatible with Code Mode clients (Claude Code, pctx, Cloudflare Workers AI)
221 | 
222 | ### CE-MCP Performance Benefits
223 | 
224 | When used with Code Mode clients, documcp workflows achieve:
225 | 
226 | - **98.7% token reduction** through dynamic tool discovery
227 | - **75x cost reduction** via summary-only results
228 | - **60% faster execution** through parallel tool orchestration
229 | - **19.2% fewer API calls** via direct code-based coordination
230 | 
231 | ### Server vs Client Responsibilities
232 | 
233 | **documcp (Server) provides**:
234 | 
235 | - Standard MCP protocol tools (already implemented)
236 | - Zod-validated schemas for type-safe code generation
237 | - JSON-RPC interface for universal client compatibility
238 | 
239 | **Code Mode Clients handle**:
240 | 
241 | - Code generation (TypeScript/Python orchestration)
242 | - Sandboxed execution (Docker, isolates)
243 | - Tool discovery and filesystem navigation
244 | - Security enforcement (AgentBound-style frameworks)
245 | 
246 | ### Implementation Status
247 | 
248 | ✅ **Full CE-MCP compatibility validated** (2025-12-09)
249 | 
250 | - MCP SDK upgraded to v1.24.0 (PR #69)
251 | - All tests passing (91.67% coverage)
252 | - No architectural changes required
253 | 
254 | For detailed analysis, see [ADR-011: CE-MCP Compatibility](adr-0011-ce-mcp-compatibility.md).
255 | 
256 | ## Future Considerations
257 | 
258 | - Potential migration to WebAssembly for performance-critical components
259 | - Plugin architecture for extensible SSG support
260 | - Distributed analysis for large repository handling (validated by research)
261 | - Machine learning integration for improved recommendations
262 | - MCP Tasks API integration for long-running operations (SDK 1.24.0)
263 | 
264 | ## References
265 | 
266 | - [MCP TypeScript SDK Documentation](https://github.com/modelcontextprotocol/typescript-sdk)
267 | - [Model Context Protocol Specification](https://spec.modelcontextprotocol.io/)
268 | - [TypeScript Performance Best Practices](https://github.com/microsoft/TypeScript/wiki/Performance)
269 | 
```

--------------------------------------------------------------------------------
/src/tools/validate-documentation-freshness.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Validate Documentation Freshness Tool
  3 |  *
  4 |  * Validates documentation freshness, initializes metadata for files without it,
  5 |  * and updates timestamps based on code changes.
  6 |  */
  7 | 
  8 | import { z } from "zod";
  9 | import path from "path";
 10 | import { simpleGit } from "simple-git";
 11 | import {
 12 |   findMarkdownFiles,
 13 |   parseDocFrontmatter,
 14 |   updateDocFrontmatter,
 15 |   initializeFreshnessMetadata,
 16 |   STALENESS_PRESETS,
 17 |   type DocFreshnessMetadata,
 18 |   scanDocumentationFreshness,
 19 | } from "../utils/freshness-tracker.js";
 20 | import { type MCPToolResponse } from "../types/api.js";
 21 | import {
 22 |   storeFreshnessEvent,
 23 |   updateFreshnessEvent,
 24 | } from "../memory/freshness-kg-integration.js";
 25 | 
 26 | /**
 27 |  * Input schema for validate_documentation_freshness tool
 28 |  */
 29 | export const ValidateDocumentationFreshnessSchema = z.object({
 30 |   docsPath: z.string().describe("Path to documentation directory"),
 31 |   projectPath: z
 32 |     .string()
 33 |     .describe("Path to project root (for git integration)"),
 34 |   initializeMissing: z
 35 |     .boolean()
 36 |     .optional()
 37 |     .default(true)
 38 |     .describe("Initialize metadata for files without it"),
 39 |   updateExisting: z
 40 |     .boolean()
 41 |     .optional()
 42 |     .default(false)
 43 |     .describe("Update last_validated timestamp for all files"),
 44 |   updateFrequency: z
 45 |     .enum(["realtime", "active", "recent", "weekly", "monthly", "quarterly"])
 46 |     .optional()
 47 |     .default("monthly")
 48 |     .describe("Default update frequency for new metadata"),
 49 |   validateAgainstGit: z
 50 |     .boolean()
 51 |     .optional()
 52 |     .default(true)
 53 |     .describe("Validate against current git commit"),
 54 | });
 55 | 
 56 | export type ValidateDocumentationFreshnessInput = z.input<
 57 |   typeof ValidateDocumentationFreshnessSchema
 58 | >;
 59 | 
 60 | /**
 61 |  * Validation result for a single file
 62 |  */
 63 | interface FileValidationResult {
 64 |   filePath: string;
 65 |   relativePath: string;
 66 |   action: "initialized" | "updated" | "skipped" | "error";
 67 |   metadata?: DocFreshnessMetadata;
 68 |   error?: string;
 69 | }
 70 | 
 71 | /**
 72 |  * Validation report
 73 |  */
 74 | interface ValidationReport {
 75 |   validatedAt: string;
 76 |   docsPath: string;
 77 |   projectPath: string;
 78 |   totalFiles: number;
 79 |   initialized: number;
 80 |   updated: number;
 81 |   skipped: number;
 82 |   errors: number;
 83 |   currentCommit?: string;
 84 |   files: FileValidationResult[];
 85 | }
 86 | 
 87 | /**
 88 |  * Format validation report for display
 89 |  */
 90 | function formatValidationReport(report: ValidationReport): string {
 91 |   let output = "# Documentation Freshness Validation Report\n\n";
 92 |   output += `**Validated at**: ${new Date(
 93 |     report.validatedAt,
 94 |   ).toLocaleString()}\n`;
 95 |   output += `**Documentation path**: ${report.docsPath}\n`;
 96 | 
 97 |   if (report.currentCommit) {
 98 |     output += `**Current commit**: ${report.currentCommit.substring(0, 7)}\n`;
 99 |   }
100 | 
101 |   output += "\n## Summary\n\n";
102 |   output += `- **Total files**: ${report.totalFiles}\n`;
103 |   output += `- **Initialized**: ${report.initialized} files\n`;
104 |   output += `- **Updated**: ${report.updated} files\n`;
105 |   output += `- **Skipped**: ${report.skipped} files\n`;
106 | 
107 |   if (report.errors > 0) {
108 |     output += `- **Errors**: ${report.errors} files\n`;
109 |   }
110 | 
111 |   output += "\n## Actions Performed\n\n";
112 | 
113 |   // Group by action
114 |   const grouped = {
115 |     initialized: report.files.filter((f) => f.action === "initialized"),
116 |     updated: report.files.filter((f) => f.action === "updated"),
117 |     error: report.files.filter((f) => f.action === "error"),
118 |   };
119 | 
120 |   if (grouped.initialized.length > 0) {
121 |     output += `### ✨ Initialized (${grouped.initialized.length})\n\n`;
122 |     for (const file of grouped.initialized) {
123 |       output += `- ${file.relativePath}\n`;
124 |     }
125 |     output += "\n";
126 |   }
127 | 
128 |   if (grouped.updated.length > 0) {
129 |     output += `### 🔄 Updated (${grouped.updated.length})\n\n`;
130 |     for (const file of grouped.updated) {
131 |       output += `- ${file.relativePath}\n`;
132 |     }
133 |     output += "\n";
134 |   }
135 | 
136 |   if (grouped.error.length > 0) {
137 |     output += `### ❌ Errors (${grouped.error.length})\n\n`;
138 |     for (const file of grouped.error) {
139 |       output += `- ${file.relativePath}: ${file.error}\n`;
140 |     }
141 |     output += "\n";
142 |   }
143 | 
144 |   // Recommendations
145 |   output += "## Next Steps\n\n";
146 | 
147 |   if (report.initialized > 0) {
148 |     output += `→ ${report.initialized} files now have freshness tracking enabled\n`;
149 |   }
150 | 
151 |   if (report.updated > 0) {
152 |     output += `→ ${report.updated} files have been marked as validated\n`;
153 |   }
154 | 
155 |   output += `→ Run \`track_documentation_freshness\` to view current freshness status\n`;
156 | 
157 |   return output;
158 | }
159 | 
160 | /**
161 |  * Validate documentation freshness
162 |  */
163 | export async function validateDocumentationFreshness(
164 |   input: ValidateDocumentationFreshnessInput,
165 | ): Promise<MCPToolResponse> {
166 |   const startTime = Date.now();
167 | 
168 |   try {
169 |     const {
170 |       docsPath,
171 |       projectPath,
172 |       initializeMissing,
173 |       updateExisting,
174 |       updateFrequency,
175 |       validateAgainstGit,
176 |     } = input;
177 | 
178 |     // Get current git commit if requested
179 |     let currentCommit: string | undefined;
180 |     if (validateAgainstGit) {
181 |       try {
182 |         const git = simpleGit(projectPath);
183 |         const isRepo = await git.checkIsRepo();
184 | 
185 |         if (isRepo) {
186 |           const log = await git.log({ maxCount: 1 });
187 |           currentCommit = log.latest?.hash;
188 |         }
189 |       } catch (error) {
190 |         // Git not available, continue without it
191 |       }
192 |     }
193 | 
194 |     // Find all markdown files
195 |     const markdownFiles = await findMarkdownFiles(docsPath);
196 |     const results: FileValidationResult[] = [];
197 | 
198 |     for (const filePath of markdownFiles) {
199 |       const relativePath = path.relative(docsPath, filePath);
200 | 
201 |       try {
202 |         const frontmatter = await parseDocFrontmatter(filePath);
203 |         const hasMetadata = !!frontmatter.documcp?.last_updated;
204 | 
205 |         if (!hasMetadata && initializeMissing) {
206 |           // Initialize metadata
207 |           await initializeFreshnessMetadata(filePath, {
208 |             updateFrequency,
209 |             autoUpdated: false,
210 |           });
211 | 
212 |           // If git is available, set validated_against_commit
213 |           if (currentCommit) {
214 |             await updateDocFrontmatter(filePath, {
215 |               validated_against_commit: currentCommit,
216 |             });
217 |           }
218 | 
219 |           const updatedFrontmatter = await parseDocFrontmatter(filePath);
220 |           results.push({
221 |             filePath,
222 |             relativePath,
223 |             action: "initialized",
224 |             metadata: updatedFrontmatter.documcp,
225 |           });
226 |         } else if (hasMetadata && updateExisting) {
227 |           // Update existing metadata
228 |           const updateData: Partial<DocFreshnessMetadata> = {
229 |             last_validated: new Date().toISOString(),
230 |           };
231 | 
232 |           if (currentCommit) {
233 |             updateData.validated_against_commit = currentCommit;
234 |           }
235 | 
236 |           await updateDocFrontmatter(filePath, updateData);
237 | 
238 |           const updatedFrontmatter = await parseDocFrontmatter(filePath);
239 |           results.push({
240 |             filePath,
241 |             relativePath,
242 |             action: "updated",
243 |             metadata: updatedFrontmatter.documcp,
244 |           });
245 |         } else {
246 |           results.push({
247 |             filePath,
248 |             relativePath,
249 |             action: "skipped",
250 |             metadata: frontmatter.documcp,
251 |           });
252 |         }
253 |       } catch (error) {
254 |         results.push({
255 |           filePath,
256 |           relativePath,
257 |           action: "error",
258 |           error: error instanceof Error ? error.message : "Unknown error",
259 |         });
260 |       }
261 |     }
262 | 
263 |     // Generate report
264 |     const report: ValidationReport = {
265 |       validatedAt: new Date().toISOString(),
266 |       docsPath,
267 |       projectPath,
268 |       totalFiles: markdownFiles.length,
269 |       initialized: results.filter((r) => r.action === "initialized").length,
270 |       updated: results.filter((r) => r.action === "updated").length,
271 |       skipped: results.filter((r) => r.action === "skipped").length,
272 |       errors: results.filter((r) => r.action === "error").length,
273 |       currentCommit,
274 |       files: results,
275 |     };
276 | 
277 |     const formattedReport = formatValidationReport(report);
278 | 
279 |     // Store validation event in knowledge graph
280 |     let eventId: string | undefined;
281 |     if (report.initialized > 0 || report.updated > 0) {
282 |       try {
283 |         // Scan current state to get freshness metrics
284 |         const scanReport = await scanDocumentationFreshness(docsPath, {
285 |           warning: STALENESS_PRESETS.monthly,
286 |           stale: {
287 |             value: STALENESS_PRESETS.monthly.value * 2,
288 |             unit: STALENESS_PRESETS.monthly.unit,
289 |           },
290 |           critical: {
291 |             value: STALENESS_PRESETS.monthly.value * 3,
292 |             unit: STALENESS_PRESETS.monthly.unit,
293 |           },
294 |         });
295 | 
296 |         // Determine event type
297 |         const eventType = report.initialized > 0 ? "initialization" : "update";
298 | 
299 |         // Store in KG
300 |         eventId = await storeFreshnessEvent(
301 |           projectPath,
302 |           docsPath,
303 |           scanReport,
304 |           eventType,
305 |         );
306 | 
307 |         // Update event with validation details
308 |         await updateFreshnessEvent(eventId, {
309 |           filesInitialized: report.initialized,
310 |           filesUpdated: report.updated,
311 |           eventType,
312 |         });
313 |       } catch (error) {
314 |         // KG storage failed, but continue with the response
315 |         console.warn(
316 |           "Failed to store validation event in knowledge graph:",
317 |           error,
318 |         );
319 |       }
320 |     }
321 | 
322 |     const response: MCPToolResponse = {
323 |       success: true,
324 |       data: {
325 |         summary: `Validated ${report.totalFiles} files: ${report.initialized} initialized, ${report.updated} updated`,
326 |         report,
327 |         formattedReport,
328 |         kgEventId: eventId,
329 |       },
330 |       metadata: {
331 |         toolVersion: "1.0.0",
332 |         executionTime: Date.now() - startTime,
333 |         timestamp: new Date().toISOString(),
334 |       },
335 |       recommendations: [],
336 |     };
337 | 
338 |     return response;
339 |   } catch (error) {
340 |     return {
341 |       success: false,
342 |       error: {
343 |         code: "FRESHNESS_VALIDATION_FAILED",
344 |         message:
345 |           error instanceof Error
346 |             ? error.message
347 |             : "Unknown error validating documentation freshness",
348 |         resolution:
349 |           "Check that the documentation and project paths exist and are readable",
350 |       },
351 |       metadata: {
352 |         toolVersion: "1.0.0",
353 |         executionTime: Date.now() - startTime,
354 |         timestamp: new Date().toISOString(),
355 |       },
356 |     };
357 |   }
358 | }
359 | 
```

--------------------------------------------------------------------------------
/tests/tools/generate-llm-context.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import os from "os";
  5 | import {
  6 |   generateLLMContext,
  7 |   setToolDefinitions,
  8 |   GenerateLLMContextInputSchema,
  9 | } from "../../src/tools/generate-llm-context.js";
 10 | import { z } from "zod";
 11 | 
 12 | describe("generate_llm_context", () => {
 13 |   let tmpDir: string;
 14 | 
 15 |   beforeEach(async () => {
 16 |     tmpDir = await fs.mkdtemp(path.join(os.tmpdir(), "generate-llm-context-"));
 17 | 
 18 |     // Set up mock tool definitions
 19 |     const mockTools = [
 20 |       {
 21 |         name: "analyze_repository",
 22 |         description: "Analyze repository structure and dependencies",
 23 |         inputSchema: z.object({
 24 |           path: z.string(),
 25 |           depth: z.enum(["quick", "standard", "deep"]).optional(),
 26 |         }),
 27 |       },
 28 |       {
 29 |         name: "recommend_ssg",
 30 |         description: "Recommend static site generator",
 31 |         inputSchema: z.object({
 32 |           analysisId: z.string(),
 33 |           userId: z.string().optional(),
 34 |         }),
 35 |       },
 36 |       {
 37 |         name: "sync_code_to_docs",
 38 |         description: "Synchronize code with documentation",
 39 |         inputSchema: z.object({
 40 |           projectPath: z.string(),
 41 |           docsPath: z.string(),
 42 |           mode: z.enum(["detect", "preview", "apply", "auto"]).optional(),
 43 |         }),
 44 |       },
 45 |     ];
 46 |     setToolDefinitions(mockTools);
 47 |   });
 48 | 
 49 |   afterEach(async () => {
 50 |     await fs.rm(tmpDir, { recursive: true, force: true });
 51 |   });
 52 | 
 53 |   describe("Basic Generation", () => {
 54 |     it("should generate LLM context file with default options", async () => {
 55 |       const result = await generateLLMContext({
 56 |         projectPath: tmpDir,
 57 |       });
 58 | 
 59 |       // Check result structure
 60 |       expect(result.content).toBeDefined();
 61 |       expect(result.content[0].text).toContain("path");
 62 | 
 63 |       // Check file exists
 64 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
 65 |       const fileExists = await fs
 66 |         .access(outputPath)
 67 |         .then(() => true)
 68 |         .catch(() => false);
 69 |       expect(fileExists).toBe(true);
 70 | 
 71 |       // Check file content
 72 |       const content = await fs.readFile(outputPath, "utf-8");
 73 |       expect(content).toContain("# DocuMCP LLM Context Reference");
 74 |       expect(content).toContain("analyze_repository");
 75 |       expect(content).toContain("recommend_ssg");
 76 |       expect(content).toContain("sync_code_to_docs");
 77 |     });
 78 | 
 79 |     it("should include examples when requested", async () => {
 80 |       await generateLLMContext({
 81 |         projectPath: tmpDir,
 82 |         includeExamples: true,
 83 |       });
 84 | 
 85 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
 86 |       const content = await fs.readFile(outputPath, "utf-8");
 87 |       expect(content).toContain("**Example**:");
 88 |       expect(content).toContain("```typescript");
 89 |     });
 90 | 
 91 |     it("should generate concise format", async () => {
 92 |       await generateLLMContext({
 93 |         projectPath: tmpDir,
 94 |         format: "concise",
 95 |         includeExamples: false,
 96 |       });
 97 | 
 98 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
 99 |       const content = await fs.readFile(outputPath, "utf-8");
100 |       expect(content).toContain("# DocuMCP LLM Context Reference");
101 |       expect(content).not.toContain("**Parameters**:");
102 |     });
103 | 
104 |     it("should generate detailed format with parameters", async () => {
105 |       await generateLLMContext({
106 |         projectPath: tmpDir,
107 |         format: "detailed",
108 |       });
109 | 
110 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
111 |       const content = await fs.readFile(outputPath, "utf-8");
112 |       expect(content).toContain("# DocuMCP LLM Context Reference");
113 |       expect(content).toContain("**Parameters**:");
114 |     });
115 |   });
116 | 
117 |   describe("Content Sections", () => {
118 |     it("should include overview section", async () => {
119 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
120 |       await generateLLMContext({ projectPath: tmpDir });
121 | 
122 |       const content = await fs.readFile(outputPath, "utf-8");
123 |       expect(content).toContain("## Overview");
124 |       expect(content).toContain("DocuMCP is an intelligent MCP server");
125 |     });
126 | 
127 |     it("should include core tools section", async () => {
128 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
129 |       await generateLLMContext({ projectPath: tmpDir });
130 | 
131 |       const content = await fs.readFile(outputPath, "utf-8");
132 |       expect(content).toContain("## Core Documentation Tools");
133 |     });
134 | 
135 |     it("should include Phase 3 tools section", async () => {
136 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
137 |       await generateLLMContext({ projectPath: tmpDir });
138 | 
139 |       const content = await fs.readFile(outputPath, "utf-8");
140 |       expect(content).toContain(
141 |         "## Phase 3: Code-to-Docs Synchronization Tools",
142 |       );
143 |       expect(content).toContain("sync_code_to_docs");
144 |     });
145 | 
146 |     it("should include memory system section", async () => {
147 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
148 |       await generateLLMContext({ projectPath: tmpDir });
149 | 
150 |       const content = await fs.readFile(outputPath, "utf-8");
151 |       expect(content).toContain("## Memory Knowledge Graph System");
152 |       expect(content).toContain("### Entity Types");
153 |       expect(content).toContain("### Relationship Types");
154 |     });
155 | 
156 |     it("should include workflows section", async () => {
157 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
158 |       await generateLLMContext({ projectPath: tmpDir });
159 | 
160 |       const content = await fs.readFile(outputPath, "utf-8");
161 |       expect(content).toContain("## Common Workflows");
162 |       expect(content).toContain("### 1. New Documentation Site Setup");
163 |     });
164 | 
165 |     it("should include quick reference table", async () => {
166 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
167 |       await generateLLMContext({ projectPath: tmpDir });
168 | 
169 |       const content = await fs.readFile(outputPath, "utf-8");
170 |       expect(content).toContain("## Quick Reference Table");
171 |       expect(content).toContain("| Tool | Primary Use |");
172 |     });
173 |   });
174 | 
175 |   describe("Input Validation", () => {
176 |     it("should validate input schema", () => {
177 |       expect(() => {
178 |         GenerateLLMContextInputSchema.parse({
179 |           projectPath: "/test/path",
180 |           includeExamples: true,
181 |           format: "detailed",
182 |         });
183 |       }).not.toThrow();
184 |     });
185 | 
186 |     it("should use default values for optional fields", () => {
187 |       const result = GenerateLLMContextInputSchema.parse({
188 |         projectPath: "/test/path",
189 |       });
190 |       expect(result.projectPath).toBe("/test/path");
191 |       expect(result.includeExamples).toBe(true);
192 |       expect(result.format).toBe("detailed");
193 |     });
194 | 
195 |     it("should require projectPath", () => {
196 |       expect(() => {
197 |         GenerateLLMContextInputSchema.parse({});
198 |       }).toThrow();
199 |     });
200 | 
201 |     it("should reject invalid format", () => {
202 |       expect(() => {
203 |         GenerateLLMContextInputSchema.parse({
204 |           projectPath: "/test/path",
205 |           format: "invalid",
206 |         });
207 |       }).toThrow();
208 |     });
209 |   });
210 | 
211 |   describe("Error Handling", () => {
212 |     it("should handle write errors gracefully", async () => {
213 |       const invalidPath = "/invalid/path/that/does/not/exist";
214 |       const result = await generateLLMContext({
215 |         projectPath: invalidPath,
216 |       });
217 | 
218 |       expect(result.content[0].text).toContain("GENERATION_ERROR");
219 |       expect(result.isError).toBe(true);
220 |     });
221 |   });
222 | 
223 |   describe("File Output", () => {
224 |     it("should create LLM_CONTEXT.md in project root", async () => {
225 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
226 |       await generateLLMContext({ projectPath: tmpDir });
227 | 
228 |       const fileExists = await fs
229 |         .access(outputPath)
230 |         .then(() => true)
231 |         .catch(() => false);
232 |       expect(fileExists).toBe(true);
233 |     });
234 | 
235 |     it("should overwrite existing file", async () => {
236 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
237 | 
238 |       // Write first time
239 |       await generateLLMContext({ projectPath: tmpDir });
240 |       const firstContent = await fs.readFile(outputPath, "utf-8");
241 | 
242 |       // Wait a moment to ensure timestamp changes
243 |       await new Promise((resolve) => setTimeout(resolve, 10));
244 | 
245 |       // Write second time
246 |       await generateLLMContext({ projectPath: tmpDir });
247 |       const secondContent = await fs.readFile(outputPath, "utf-8");
248 | 
249 |       // Content should be different (timestamp changed)
250 |       expect(firstContent).not.toEqual(secondContent);
251 |     });
252 | 
253 |     it("should report correct file stats", async () => {
254 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
255 |       const result = await generateLLMContext({ projectPath: tmpDir });
256 | 
257 |       const data = JSON.parse(result.content[0].text);
258 |       expect(data.stats).toBeDefined();
259 |       expect(data.stats.totalTools).toBe(3);
260 |       expect(data.stats.fileSize).toBeGreaterThan(0);
261 |       expect(data.stats.sections).toBeInstanceOf(Array);
262 |     });
263 |   });
264 | 
265 |   describe("Tool Extraction", () => {
266 |     it("should extract tool names correctly", async () => {
267 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
268 |       await generateLLMContext({ projectPath: tmpDir });
269 | 
270 |       const content = await fs.readFile(outputPath, "utf-8");
271 |       expect(content).toContain("`analyze_repository`");
272 |       expect(content).toContain("`recommend_ssg`");
273 |       expect(content).toContain("`sync_code_to_docs`");
274 |     });
275 | 
276 |     it("should extract tool descriptions", async () => {
277 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
278 |       await generateLLMContext({ projectPath: tmpDir });
279 | 
280 |       const content = await fs.readFile(outputPath, "utf-8");
281 |       expect(content).toContain(
282 |         "Analyze repository structure and dependencies",
283 |       );
284 |       expect(content).toContain("Recommend static site generator");
285 |     });
286 | 
287 |     it("should handle tools with no examples", async () => {
288 |       const outputPath = path.join(tmpDir, "LLM_CONTEXT.md");
289 |       await generateLLMContext({ projectPath: tmpDir, includeExamples: true });
290 | 
291 |       const content = await fs.readFile(outputPath, "utf-8");
292 |       // recommend_ssg doesn't have an example defined
293 |       const ssgSection = content.match(
294 |         /### `recommend_ssg`[\s\S]*?(?=###|$)/,
295 |       )?.[0];
296 |       expect(ssgSection).toBeDefined();
297 |     });
298 |   });
299 | });
300 | 
```

--------------------------------------------------------------------------------
/docs/how-to/llm-integration.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | id: llm-integration
  3 | title: LLM Integration for Semantic Code Analysis
  4 | sidebar_label: LLM Integration
  5 | sidebar_position: 10
  6 | ---
  7 | 
  8 | # LLM Integration for Semantic Code Analysis
  9 | 
 10 | DocuMCP now includes an optional LLM integration layer that enables semantic analysis of code changes beyond AST-based syntax comparison. This feature supports the DocuMCP Orchestrator's requirements for intelligent documentation synchronization.
 11 | 
 12 | ## Overview
 13 | 
 14 | The LLM integration provides:
 15 | 
 16 | - **Semantic code change analysis**: Detect behavioral changes within the same function signature
 17 | - **Code execution simulation**: Validate documentation examples without running code
 18 | - **Intelligent documentation suggestions**: Generate context-aware update recommendations
 19 | - **Multi-provider support**: DeepSeek, OpenAI, Anthropic, and Ollama
 20 | - **Graceful fallback**: Automatic fallback to AST-only analysis when LLM is unavailable
 21 | 
 22 | ## Configuration
 23 | 
 24 | ### Environment Variables
 25 | 
 26 | Configure the LLM integration using environment variables:
 27 | 
 28 | ```bash
 29 | # Required: API key for your chosen provider
 30 | export DOCUMCP_LLM_API_KEY="your-api-key-here"
 31 | 
 32 | # Optional: Provider selection (default: deepseek)
 33 | export DOCUMCP_LLM_PROVIDER="deepseek"  # or "openai", "anthropic", "ollama"
 34 | 
 35 | # Optional: Model name (default: deepseek-chat)
 36 | export DOCUMCP_LLM_MODEL="deepseek-chat"
 37 | 
 38 | # Optional: Custom base URL (for self-hosted or alternative endpoints)
 39 | export DOCUMCP_LLM_BASE_URL="https://api.deepseek.com/v1"
 40 | ```
 41 | 
 42 | ### Supported Providers
 43 | 
 44 | #### DeepSeek (Default)
 45 | 
 46 | ```bash
 47 | export DOCUMCP_LLM_PROVIDER="deepseek"
 48 | export DOCUMCP_LLM_API_KEY="sk-..."
 49 | export DOCUMCP_LLM_MODEL="deepseek-chat"
 50 | ```
 51 | 
 52 | #### OpenAI
 53 | 
 54 | ```bash
 55 | export DOCUMCP_LLM_PROVIDER="openai"
 56 | export DOCUMCP_LLM_API_KEY="sk-..."
 57 | export DOCUMCP_LLM_MODEL="gpt-4"
 58 | ```
 59 | 
 60 | #### Anthropic
 61 | 
 62 | ```bash
 63 | export DOCUMCP_LLM_PROVIDER="anthropic"
 64 | export DOCUMCP_LLM_API_KEY="sk-ant-..."
 65 | export DOCUMCP_LLM_MODEL="claude-3-opus-20240229"
 66 | ```
 67 | 
 68 | #### Ollama (Local)
 69 | 
 70 | ```bash
 71 | export DOCUMCP_LLM_PROVIDER="ollama"
 72 | export DOCUMCP_LLM_BASE_URL="http://localhost:11434/v1"
 73 | export DOCUMCP_LLM_MODEL="codellama"
 74 | # No API key needed for local Ollama
 75 | ```
 76 | 
 77 | ## Usage
 78 | 
 79 | ### Semantic Code Analysis
 80 | 
 81 | ```typescript
 82 | import { SemanticAnalyzer } from "./utils/semantic-analyzer.js";
 83 | 
 84 | // Create analyzer with default configuration
 85 | const analyzer = new SemanticAnalyzer();
 86 | await analyzer.initialize();
 87 | 
 88 | // Analyze semantic impact of code changes
 89 | const codeBefore = `
 90 | function multiply(a: number, b: number): number {
 91 |   return a * b;
 92 | }
 93 | `;
 94 | 
 95 | const codeAfter = `
 96 | function multiply(a: number, b: number): number {
 97 |   return a + b;  // Bug: changed to addition!
 98 | }
 99 | `;
100 | 
101 | const analysis = await analyzer.analyzeSemanticImpact(
102 |   codeBefore,
103 |   codeAfter,
104 |   "multiply",
105 | );
106 | 
107 | console.log("Analysis mode:", analysis.analysisMode); // 'llm', 'ast', or 'hybrid'
108 | console.log("Behavioral change:", analysis.hasBehavioralChange); // true
109 | console.log("Breaking for examples:", analysis.breakingForExamples); // true
110 | console.log("Description:", analysis.changeDescription);
111 | console.log("Confidence:", analysis.confidence);
112 | console.log("Affected sections:", analysis.affectedDocSections);
113 | ```
114 | 
115 | ### Validating Documentation Examples
116 | 
117 | ```typescript
118 | // Validate that documentation examples work with current implementation
119 | const examples = [
120 |   "const result = multiply(6, 7);  // Should return 42",
121 |   "const doubled = multiply(21, 2);  // Should return 42",
122 | ];
123 | 
124 | const implementation = `
125 | function multiply(a: number, b: number): number {
126 |   return a * b;
127 | }
128 | `;
129 | 
130 | const validation = await analyzer.validateExamples(examples, implementation);
131 | 
132 | console.log("Valid:", validation.isValid);
133 | console.log("Confidence:", validation.overallConfidence);
134 | console.log("Manual review needed:", validation.requiresManualReview);
135 | 
136 | // Check individual examples
137 | validation.examples.forEach((ex, i) => {
138 |   console.log(`Example ${i + 1}:`);
139 |   console.log("  Valid:", ex.isValid);
140 |   console.log("  Issues:", ex.issues);
141 | });
142 | ```
143 | 
144 | ### Batch Analysis
145 | 
146 | ```typescript
147 | const changes = [
148 |   {
149 |     before: "function add(x: number, y: number) { return x + y; }",
150 |     after: "function add(x: number, y: number) { return x - y; }",
151 |     name: "add",
152 |   },
153 |   {
154 |     before: "function greet(name: string) { return `Hello ${name}`; }",
155 |     after: "function greet(name: string) { return `Hi ${name}!`; }",
156 |     name: "greet",
157 |   },
158 | ];
159 | 
160 | const results = await analyzer.analyzeBatch(changes);
161 | results.forEach((result, i) => {
162 |   console.log(`Change ${i + 1}:`, result.changeDescription);
163 | });
164 | ```
165 | 
166 | ### Custom Configuration
167 | 
168 | ```typescript
169 | import {
170 |   SemanticAnalyzer,
171 |   createSemanticAnalyzer,
172 | } from "./utils/semantic-analyzer.js";
173 | 
174 | // Disable LLM (AST-only mode)
175 | const astOnlyAnalyzer = createSemanticAnalyzer({
176 |   useLLM: false,
177 | });
178 | 
179 | // Custom confidence threshold for hybrid mode
180 | const strictAnalyzer = createSemanticAnalyzer({
181 |   confidenceThreshold: 0.9, // Higher threshold = more likely to use hybrid mode
182 | });
183 | 
184 | // Custom LLM configuration
185 | const customAnalyzer = createSemanticAnalyzer({
186 |   llmConfig: {
187 |     provider: "openai",
188 |     apiKey: "custom-key",
189 |     model: "gpt-4",
190 |   },
191 | });
192 | ```
193 | 
194 | ## Analysis Modes
195 | 
196 | The semantic analyzer operates in three modes:
197 | 
198 | ### LLM Mode
199 | 
200 | - **When**: LLM is available and confidence is above threshold (default: 0.7)
201 | - **Advantages**: Deep semantic understanding, detects behavioral changes
202 | - **Use case**: Critical code changes affecting public APIs
203 | 
204 | ### AST Mode
205 | 
206 | - **When**: LLM is unavailable or disabled
207 | - **Advantages**: Fast, reliable, no external dependencies
208 | - **Use case**: Quick syntax checks, CI/CD environments without LLM access
209 | 
210 | ### Hybrid Mode
211 | 
212 | - **When**: LLM confidence is below threshold
213 | - **Advantages**: Combines LLM insights with AST verification
214 | - **Use case**: Complex changes requiring both semantic and structural analysis
215 | 
216 | ## Rate Limiting
217 | 
218 | The LLM client includes built-in rate limiting to prevent API quota exhaustion:
219 | 
220 | - Default: 10 requests per minute
221 | - Automatic backoff when limit is reached
222 | - Configurable per-instance
223 | 
224 | ## Error Handling
225 | 
226 | The integration is designed to fail gracefully:
227 | 
228 | ```typescript
229 | // If LLM fails, analyzer falls back to AST mode
230 | const analyzer = new SemanticAnalyzer();
231 | const result = await analyzer.analyzeSemanticImpact(before, after);
232 | 
233 | // Check which mode was used
234 | if (result.analysisMode === "ast" && !result.llmAvailable) {
235 |   console.warn("LLM unavailable, using AST analysis only");
236 | }
237 | 
238 | // Low confidence analysis
239 | if (result.confidence < 0.5) {
240 |   console.warn("Low confidence analysis - manual review recommended");
241 | }
242 | ```
243 | 
244 | ## Best Practices
245 | 
246 | ### 1. Set Appropriate Thresholds
247 | 
248 | ```typescript
249 | // For critical code paths
250 | const criticalAnalyzer = createSemanticAnalyzer({
251 |   confidenceThreshold: 0.9, // High threshold
252 | });
253 | 
254 | // For routine changes
255 | const routineAnalyzer = createSemanticAnalyzer({
256 |   confidenceThreshold: 0.6, // Lower threshold
257 | });
258 | ```
259 | 
260 | ### 2. Check Availability Before Relying on LLM
261 | 
262 | ```typescript
263 | if (!analyzer.isLLMAvailable()) {
264 |   console.warn("LLM not configured - using AST analysis only");
265 | }
266 | ```
267 | 
268 | ### 3. Handle Low Confidence Results
269 | 
270 | ```typescript
271 | const result = await analyzer.analyzeSemanticImpact(before, after);
272 | 
273 | if (result.confidence < 0.7) {
274 |   // Trigger manual review workflow
275 |   console.log("Manual review required for:", result.changeDescription);
276 | }
277 | ```
278 | 
279 | ### 4. Use Batch Analysis for Multiple Changes
280 | 
281 | ```typescript
282 | // More efficient than individual calls
283 | const results = await analyzer.analyzeBatch(changes);
284 | ```
285 | 
286 | ### 5. Validate Examples Before Publishing
287 | 
288 | ```typescript
289 | const validation = await analyzer.validateExamples(examples, implementation);
290 | 
291 | if (!validation.isValid) {
292 |   console.error("Some examples may be invalid:");
293 |   validation.suggestions.forEach((s) => console.error("  -", s));
294 | 
295 |   // Don't publish until examples are fixed
296 |   throw new Error("Invalid documentation examples detected");
297 | }
298 | ```
299 | 
300 | ## Integration with DocuMCP Orchestrator
301 | 
302 | This LLM integration layer is designed to support the [DocuMCP Orchestrator](https://github.com/tosin2013/documcp-orchestrator) requirements:
303 | 
304 | - **ADR-009**: Content Accuracy Validation Framework
305 | - **ADR-010**: LLM-Validated Documentation Examples
306 | 
307 | The orchestrator uses these capabilities to:
308 | 
309 | 1. Detect when code changes require documentation updates
310 | 2. Validate that documentation examples match code behavior
311 | 3. Generate intelligent update suggestions
312 | 4. Maintain documentation accuracy over time
313 | 
314 | ## Troubleshooting
315 | 
316 | ### LLM Not Available
317 | 
318 | **Symptom**: `analyzer.isLLMAvailable()` returns `false`
319 | 
320 | **Solutions**:
321 | 
322 | - Check that `DOCUMCP_LLM_API_KEY` is set
323 | - Verify API key is valid
324 | - For Ollama: ensure server is running at specified base URL
325 | 
326 | ### Low Confidence Results
327 | 
328 | **Symptom**: `result.confidence < 0.7`
329 | 
330 | **Solutions**:
331 | 
332 | - Review the change manually
333 | - Use hybrid mode by setting lower threshold
334 | - Check if code change is particularly complex
335 | 
336 | ### Rate Limit Errors
337 | 
338 | **Symptom**: Requests timing out or failing
339 | 
340 | **Solutions**:
341 | 
342 | - Reduce number of concurrent requests
343 | - Increase rate limit window
344 | - Use batch analysis for multiple changes
345 | 
346 | ### Timeout Errors
347 | 
348 | **Symptom**: "LLM request timed out"
349 | 
350 | **Solutions**:
351 | 
352 | - Increase timeout in configuration
353 | - Check network connectivity to LLM provider
354 | - Consider using a faster model
355 | 
356 | ## Security Considerations
357 | 
358 | 1. **API Keys**: Never commit API keys to version control
359 | 2. **Code Privacy**: Be aware that code is sent to external LLM providers
360 | 3. **Rate Limits**: Monitor API usage to avoid unexpected costs
361 | 4. **Fallback**: System works without LLM for sensitive environments
362 | 
363 | ## Performance
364 | 
365 | - **LLM Analysis**: ~2-5 seconds per code change
366 | - **AST Analysis**: ~50-100ms per code change
367 | - **Hybrid Analysis**: ~2-5 seconds (LLM) + ~100ms (AST)
368 | - **Rate Limit**: 10 requests/minute (default)
369 | 
370 | ## Future Enhancements
371 | 
372 | Planned improvements:
373 | 
374 | - Caching of LLM responses for identical code changes
375 | - Support for additional LLM providers
376 | - Fine-tuned models for specific languages
377 | - Streaming responses for large code bases
378 | - Confidence calibration based on historical accuracy
379 | 
380 | ## Related Documentation
381 | 
382 | - [AST-based Code Analysis](../reference/ast-analyzer.md)
383 | - [Drift Detection](../reference/drift-detector.md)
384 | - [DocuMCP Orchestrator](https://github.com/tosin2013/documcp-orchestrator)
385 | - [ADR-009: Content Accuracy Validation](../adrs/adr-0009-content-accuracy-validation-framework.md)
386 | 
```

--------------------------------------------------------------------------------
/src/utils/llm-client.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * LLM Client for Semantic Code Analysis
  3 |  *
  4 |  * Provides a unified interface for multiple LLM providers (DeepSeek, OpenAI, Anthropic, Ollama)
  5 |  * with rate limiting, error handling, and fallback mechanisms.
  6 |  */
  7 | 
  8 | export interface LLMConfig {
  9 |   provider: "deepseek" | "openai" | "anthropic" | "ollama";
 10 |   apiKey?: string;
 11 |   baseUrl?: string;
 12 |   model: string;
 13 |   maxTokens?: number;
 14 |   timeout?: number;
 15 | }
 16 | 
 17 | export interface SemanticAnalysis {
 18 |   hasBehavioralChange: boolean;
 19 |   breakingForExamples: boolean;
 20 |   changeDescription: string;
 21 |   affectedDocSections: string[];
 22 |   confidence: number;
 23 | }
 24 | 
 25 | export interface SimulationResult {
 26 |   success: boolean;
 27 |   expectedOutput: string;
 28 |   actualOutput: string;
 29 |   matches: boolean;
 30 |   differences: string[];
 31 |   confidence: number;
 32 | }
 33 | 
 34 | export interface LLMResponse {
 35 |   content: string;
 36 |   usage?: {
 37 |     promptTokens: number;
 38 |     completionTokens: number;
 39 |     totalTokens: number;
 40 |   };
 41 | }
 42 | 
 43 | export interface LLMClient {
 44 |   complete(prompt: string): Promise<string>;
 45 |   analyzeCodeChange(before: string, after: string): Promise<SemanticAnalysis>;
 46 |   simulateExecution(
 47 |     example: string,
 48 |     implementation: string,
 49 |   ): Promise<SimulationResult>;
 50 | }
 51 | 
 52 | /**
 53 |  * Rate limiter for API requests
 54 |  */
 55 | class RateLimiter {
 56 |   private requests: number[] = [];
 57 |   private readonly maxRequests: number;
 58 |   private readonly windowMs: number;
 59 | 
 60 |   constructor(maxRequests: number = 10, windowMs: number = 60000) {
 61 |     this.maxRequests = maxRequests;
 62 |     this.windowMs = windowMs;
 63 |   }
 64 | 
 65 |   async acquire(): Promise<void> {
 66 |     const now = Date.now();
 67 |     this.requests = this.requests.filter((time) => now - time < this.windowMs);
 68 | 
 69 |     if (this.requests.length >= this.maxRequests) {
 70 |       const oldestRequest = this.requests[0];
 71 |       const waitTime = this.windowMs - (now - oldestRequest);
 72 |       await new Promise((resolve) => setTimeout(resolve, waitTime));
 73 |       return this.acquire();
 74 |     }
 75 | 
 76 |     this.requests.push(now);
 77 |   }
 78 | }
 79 | 
 80 | /**
 81 |  * DeepSeek LLM Client (OpenAI-compatible API)
 82 |  */
 83 | export class DeepSeekClient implements LLMClient {
 84 |   private config: LLMConfig;
 85 |   private rateLimiter: RateLimiter;
 86 |   private available: boolean = true;
 87 | 
 88 |   constructor(config: LLMConfig) {
 89 |     this.config = {
 90 |       baseUrl: config.baseUrl || "https://api.deepseek.com/v1",
 91 |       maxTokens: config.maxTokens || 4000,
 92 |       timeout: config.timeout || 30000,
 93 |       ...config,
 94 |     };
 95 |     this.rateLimiter = new RateLimiter(10, 60000);
 96 |   }
 97 | 
 98 |   /**
 99 |    * Check if the LLM service is available
100 |    */
101 |   isAvailable(): boolean {
102 |     return this.available && !!this.config.apiKey;
103 |   }
104 | 
105 |   /**
106 |    * Generic completion method
107 |    */
108 |   async complete(prompt: string): Promise<string> {
109 |     if (!this.isAvailable()) {
110 |       throw new Error(
111 |         "LLM client is not available. Check API key configuration.",
112 |       );
113 |     }
114 | 
115 |     await this.rateLimiter.acquire();
116 | 
117 |     const requestBody = {
118 |       model: this.config.model,
119 |       messages: [
120 |         {
121 |           role: "user",
122 |           content: prompt,
123 |         },
124 |       ],
125 |       max_tokens: this.config.maxTokens,
126 |       temperature: 0.7,
127 |     };
128 | 
129 |     try {
130 |       const controller = new AbortController();
131 |       const timeoutId = setTimeout(
132 |         () => controller.abort(),
133 |         this.config.timeout,
134 |       );
135 | 
136 |       const response = await fetch(`${this.config.baseUrl}/chat/completions`, {
137 |         method: "POST",
138 |         headers: {
139 |           "Content-Type": "application/json",
140 |           Authorization: `Bearer ${this.config.apiKey}`,
141 |         },
142 |         body: JSON.stringify(requestBody),
143 |         signal: controller.signal,
144 |       });
145 | 
146 |       clearTimeout(timeoutId);
147 | 
148 |       if (!response.ok) {
149 |         const errorText = await response.text();
150 |         throw new Error(`LLM API error: ${response.status} - ${errorText}`);
151 |       }
152 | 
153 |       const data = (await response.json()) as any;
154 |       return data.choices[0]?.message?.content || "";
155 |     } catch (error) {
156 |       if (error instanceof Error && error.name === "AbortError") {
157 |         throw new Error("LLM request timed out");
158 |       }
159 |       throw error;
160 |     }
161 |   }
162 | 
163 |   /**
164 |    * Analyze semantic impact of code changes
165 |    */
166 |   async analyzeCodeChange(
167 |     before: string,
168 |     after: string,
169 |   ): Promise<SemanticAnalysis> {
170 |     const prompt = `You are a code analysis expert. Compare these two code versions and analyze the semantic differences.
171 | 
172 | **Before:**
173 | \`\`\`
174 | ${before}
175 | \`\`\`
176 | 
177 | **After:**
178 | \`\`\`
179 | ${after}
180 | \`\`\`
181 | 
182 | Analyze and respond in JSON format with the following structure:
183 | {
184 |   "hasBehavioralChange": boolean (true if behavior changed, not just syntax),
185 |   "breakingForExamples": boolean (true if existing examples would break),
186 |   "changeDescription": string (brief description of the change),
187 |   "affectedDocSections": string[] (list of documentation sections that need updates),
188 |   "confidence": number (0-1 score indicating analysis confidence)
189 | }
190 | 
191 | Focus on:
192 | 1. Changes in function behavior (not just signature)
193 | 2. Changes in return values or side effects
194 | 3. Changes that would break existing usage examples
195 | 4. Changes that affect API contracts
196 | 5. Changes in error handling or edge cases`;
197 | 
198 |     try {
199 |       const response = await this.complete(prompt);
200 | 
201 |       // Extract JSON from response (handle markdown code blocks)
202 |       let jsonStr = response.trim();
203 |       if (jsonStr.startsWith("```json")) {
204 |         jsonStr = jsonStr.replace(/```json\n?/g, "").replace(/```\n?$/g, "");
205 |       } else if (jsonStr.startsWith("```")) {
206 |         jsonStr = jsonStr.replace(/```\n?/g, "");
207 |       }
208 | 
209 |       const analysis = JSON.parse(jsonStr) as SemanticAnalysis;
210 | 
211 |       // Validate and normalize the response
212 |       return {
213 |         hasBehavioralChange: Boolean(analysis.hasBehavioralChange),
214 |         breakingForExamples: Boolean(analysis.breakingForExamples),
215 |         changeDescription: analysis.changeDescription || "Code change detected",
216 |         affectedDocSections: Array.isArray(analysis.affectedDocSections)
217 |           ? analysis.affectedDocSections
218 |           : [],
219 |         confidence: this.normalizeConfidence(analysis.confidence),
220 |       };
221 |     } catch (error) {
222 |       // Return low-confidence fallback result on error
223 |       return {
224 |         hasBehavioralChange: false,
225 |         breakingForExamples: false,
226 |         changeDescription: `Analysis failed: ${
227 |           error instanceof Error ? error.message : "Unknown error"
228 |         }`,
229 |         affectedDocSections: [],
230 |         confidence: 0,
231 |       };
232 |     }
233 |   }
234 | 
235 |   /**
236 |    * Simulate execution of code to validate examples
237 |    */
238 |   async simulateExecution(
239 |     example: string,
240 |     implementation: string,
241 |   ): Promise<SimulationResult> {
242 |     const prompt = `You are a code execution simulator. Given a code example and its implementation, predict the execution result.
243 | 
244 | **Example Usage:**
245 | \`\`\`
246 | ${example}
247 | \`\`\`
248 | 
249 | **Implementation:**
250 | \`\`\`
251 | ${implementation}
252 | \`\`\`
253 | 
254 | Analyze the code flow without actually executing it. Respond in JSON format:
255 | {
256 |   "success": boolean (would the example execute successfully?),
257 |   "expectedOutput": string (what the example expects),
258 |   "actualOutput": string (what the implementation would produce),
259 |   "matches": boolean (do they match?),
260 |   "differences": string[] (list of differences if any),
261 |   "confidence": number (0-1 score for prediction confidence)
262 | }
263 | 
264 | Consider:
265 | 1. Function signatures and parameters
266 | 2. Return types and values
267 | 3. Error handling
268 | 4. Side effects
269 | 5. Dependencies and imports`;
270 | 
271 |     try {
272 |       const response = await this.complete(prompt);
273 | 
274 |       // Extract JSON from response
275 |       let jsonStr = response.trim();
276 |       if (jsonStr.startsWith("```json")) {
277 |         jsonStr = jsonStr.replace(/```json\n?/g, "").replace(/```\n?$/g, "");
278 |       } else if (jsonStr.startsWith("```")) {
279 |         jsonStr = jsonStr.replace(/```\n?/g, "");
280 |       }
281 | 
282 |       const result = JSON.parse(jsonStr) as SimulationResult;
283 | 
284 |       // Validate and normalize
285 |       return {
286 |         success: Boolean(result.success),
287 |         expectedOutput: result.expectedOutput || "",
288 |         actualOutput: result.actualOutput || "",
289 |         matches: Boolean(result.matches),
290 |         differences: Array.isArray(result.differences)
291 |           ? result.differences
292 |           : [],
293 |         confidence: this.normalizeConfidence(result.confidence),
294 |       };
295 |     } catch (error) {
296 |       // Return low-confidence failure result on error
297 |       return {
298 |         success: false,
299 |         expectedOutput: "Unable to determine",
300 |         actualOutput: "Unable to determine",
301 |         matches: false,
302 |         differences: [
303 |           `Simulation failed: ${
304 |             error instanceof Error ? error.message : "Unknown error"
305 |           }`,
306 |         ],
307 |         confidence: 0,
308 |       };
309 |     }
310 |   }
311 | 
312 |   /**
313 |    * Normalize confidence score to 0-1 range
314 |    */
315 |   private normalizeConfidence(confidence: unknown): number {
316 |     if (typeof confidence === "number") {
317 |       return Math.max(0, Math.min(1, confidence));
318 |     }
319 |     return 0.5; // Default confidence for invalid values
320 |   }
321 | }
322 | 
323 | /**
324 |  * Factory function to create LLM client based on configuration
325 |  */
326 | export function createLLMClient(config?: Partial<LLMConfig>): LLMClient | null {
327 |   // Check environment variables for configuration
328 |   const provider = (config?.provider ||
329 |     process.env.DOCUMCP_LLM_PROVIDER ||
330 |     "deepseek") as LLMConfig["provider"];
331 |   const apiKey = config?.apiKey || process.env.DOCUMCP_LLM_API_KEY;
332 |   const baseUrl = config?.baseUrl || process.env.DOCUMCP_LLM_BASE_URL;
333 |   const model =
334 |     config?.model || process.env.DOCUMCP_LLM_MODEL || "deepseek-chat";
335 | 
336 |   // If no API key, return null to indicate LLM is unavailable
337 |   if (!apiKey) {
338 |     return null;
339 |   }
340 | 
341 |   const fullConfig: LLMConfig = {
342 |     provider,
343 |     apiKey,
344 |     baseUrl,
345 |     model,
346 |     maxTokens: config?.maxTokens,
347 |     timeout: config?.timeout,
348 |   };
349 | 
350 |   switch (provider) {
351 |     case "deepseek":
352 |     case "openai":
353 |     case "anthropic":
354 |     case "ollama":
355 |       // For now, all use OpenAI-compatible API (DeepSeekClient)
356 |       // Future: implement provider-specific clients
357 |       return new DeepSeekClient(fullConfig);
358 |     default:
359 |       throw new Error(`Unsupported LLM provider: ${provider}`);
360 |   }
361 | }
362 | 
363 | /**
364 |  * Check if LLM is available based on environment configuration
365 |  * Note: OPENAI_API_KEY is checked as fallback for backward compatibility
366 |  */
367 | export function isLLMAvailable(): boolean {
368 |   return !!(process.env.DOCUMCP_LLM_API_KEY || process.env.OPENAI_API_KEY);
369 | }
370 | 
```

--------------------------------------------------------------------------------
/tests/tools/analyze-coverage.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // Additional tests to improve analyze-repository coverage
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import os from "os";
  5 | import { analyzeRepository } from "../../src/tools/analyze-repository";
  6 | 
  7 | describe("Analyze Repository Additional Coverage", () => {
  8 |   let tempDir: string;
  9 | 
 10 |   beforeAll(async () => {
 11 |     tempDir = path.join(os.tmpdir(), "analyze-coverage");
 12 |     await fs.mkdir(tempDir, { recursive: true });
 13 |   });
 14 | 
 15 |   afterAll(async () => {
 16 |     try {
 17 |       await fs.rm(tempDir, { recursive: true, force: true });
 18 |     } catch (error) {
 19 |       // Cleanup errors are okay
 20 |     }
 21 |   });
 22 | 
 23 |   describe("Different Repository Types", () => {
 24 |     it("should analyze Ruby project", async () => {
 25 |       const rubyDir = path.join(tempDir, "ruby-project");
 26 |       await fs.mkdir(rubyDir, { recursive: true });
 27 | 
 28 |       await fs.writeFile(
 29 |         path.join(rubyDir, "Gemfile"),
 30 |         `
 31 | source 'https://rubygems.org'
 32 | gem 'rails', '~> 7.0'
 33 | gem 'puma'
 34 | gem 'redis'
 35 |       `,
 36 |       );
 37 | 
 38 |       await fs.writeFile(path.join(rubyDir, "app.rb"), 'puts "Hello Ruby"');
 39 |       await fs.writeFile(path.join(rubyDir, "README.md"), "# Ruby Project");
 40 | 
 41 |       const result = await analyzeRepository({
 42 |         path: rubyDir,
 43 |         depth: "standard",
 44 |       });
 45 |       expect(result.content).toBeDefined();
 46 |       const analysis = JSON.parse(result.content[0].text);
 47 |       expect(analysis.dependencies.ecosystem).toBe("ruby");
 48 |     });
 49 | 
 50 |     it("should analyze Go project", async () => {
 51 |       const goDir = path.join(tempDir, "go-project");
 52 |       await fs.mkdir(goDir, { recursive: true });
 53 | 
 54 |       await fs.writeFile(
 55 |         path.join(goDir, "go.mod"),
 56 |         `
 57 | module example.com/myapp
 58 | go 1.21
 59 | require (
 60 |   github.com/gin-gonic/gin v1.9.0
 61 |   github.com/stretchr/testify v1.8.0
 62 | )
 63 |       `,
 64 |       );
 65 | 
 66 |       await fs.writeFile(path.join(goDir, "main.go"), "package main");
 67 |       await fs.writeFile(path.join(goDir, "README.md"), "# Go Project");
 68 | 
 69 |       const result = await analyzeRepository({
 70 |         path: goDir,
 71 |         depth: "standard",
 72 |       });
 73 |       expect(result.content).toBeDefined();
 74 |       const analysis = JSON.parse(result.content[0].text);
 75 |       expect(analysis.dependencies.ecosystem).toBe("go");
 76 |     });
 77 | 
 78 |     it("should analyze Java project", async () => {
 79 |       const javaDir = path.join(tempDir, "java-project");
 80 |       await fs.mkdir(javaDir, { recursive: true });
 81 | 
 82 |       await fs.writeFile(
 83 |         path.join(javaDir, "pom.xml"),
 84 |         `
 85 | <?xml version="1.0" encoding="UTF-8"?>
 86 | <project>
 87 |   <modelVersion>4.0.0</modelVersion>
 88 |   <groupId>com.example</groupId>
 89 |   <artifactId>myapp</artifactId>
 90 |   <version>1.0.0</version>
 91 |   <dependencies>
 92 |     <dependency>
 93 |       <groupId>org.springframework.boot</groupId>
 94 |       <artifactId>spring-boot-starter</artifactId>
 95 |     </dependency>
 96 |   </dependencies>
 97 | </project>
 98 |       `,
 99 |       );
100 | 
101 |       await fs.writeFile(path.join(javaDir, "App.java"), "public class App {}");
102 | 
103 |       const result = await analyzeRepository({
104 |         path: javaDir,
105 |         depth: "standard",
106 |       });
107 |       expect(result.content).toBeDefined();
108 |       const analysis = JSON.parse(result.content[0].text);
109 |       expect(analysis.dependencies.ecosystem).toBeDefined(); // May be 'java' or 'unknown' depending on detection
110 |     });
111 | 
112 |     it("should analyze project with Docker", async () => {
113 |       const dockerDir = path.join(tempDir, "docker-project");
114 |       await fs.mkdir(dockerDir, { recursive: true });
115 | 
116 |       await fs.writeFile(
117 |         path.join(dockerDir, "Dockerfile"),
118 |         `
119 | FROM node:20
120 | WORKDIR /app
121 | COPY . .
122 | RUN npm install
123 | CMD ["npm", "start"]
124 |       `,
125 |       );
126 | 
127 |       await fs.writeFile(
128 |         path.join(dockerDir, "docker-compose.yml"),
129 |         `
130 | version: '3'
131 | services:
132 |   app:
133 |     build: .
134 |     ports:
135 |       - "3000:3000"
136 |       `,
137 |       );
138 | 
139 |       await fs.writeFile(
140 |         path.join(dockerDir, "package.json"),
141 |         '{"name": "docker-app"}',
142 |       );
143 | 
144 |       const result = await analyzeRepository({
145 |         path: dockerDir,
146 |         depth: "standard",
147 |       });
148 |       expect(result.content).toBeDefined();
149 |       const analysis = JSON.parse(result.content[0].text);
150 | 
151 |       // Verify basic analysis works - Docker detection not implemented
152 |       expect(analysis.structure).toBeDefined();
153 |       expect(analysis.structure.totalFiles).toBe(3);
154 |       expect(analysis.dependencies.ecosystem).toBe("javascript");
155 |     });
156 | 
157 |     it("should analyze project with existing docs", async () => {
158 |       const docsDir = path.join(tempDir, "docs-project");
159 |       await fs.mkdir(path.join(docsDir, "docs"), { recursive: true });
160 |       await fs.mkdir(path.join(docsDir, "documentation"), { recursive: true });
161 | 
162 |       await fs.writeFile(
163 |         path.join(docsDir, "docs", "index.md"),
164 |         "# Documentation",
165 |       );
166 |       await fs.writeFile(
167 |         path.join(docsDir, "docs", "api.md"),
168 |         "# API Reference",
169 |       );
170 |       await fs.writeFile(
171 |         path.join(docsDir, "documentation", "guide.md"),
172 |         "# User Guide",
173 |       );
174 |       await fs.writeFile(
175 |         path.join(docsDir, "README.md"),
176 |         "# Project with Docs",
177 |       );
178 | 
179 |       const result = await analyzeRepository({
180 |         path: docsDir,
181 |         depth: "standard",
182 |       });
183 |       expect(result.content).toBeDefined();
184 |       const analysis = JSON.parse(result.content[0].text);
185 |       expect(analysis.structure.hasDocs).toBe(true);
186 |     });
187 |   });
188 | 
189 |   describe("Edge Cases and Error Handling", () => {
190 |     it("should handle empty repository", async () => {
191 |       const emptyDir = path.join(tempDir, "empty-repo");
192 |       await fs.mkdir(emptyDir, { recursive: true });
193 | 
194 |       const result = await analyzeRepository({
195 |         path: emptyDir,
196 |         depth: "quick",
197 |       });
198 |       expect(result.content).toBeDefined();
199 |       const analysis = JSON.parse(result.content[0].text);
200 |       expect(analysis.dependencies.ecosystem).toBe("unknown");
201 |     });
202 | 
203 |     it("should handle repository with only config files", async () => {
204 |       const configDir = path.join(tempDir, "config-only");
205 |       await fs.mkdir(configDir, { recursive: true });
206 | 
207 |       await fs.writeFile(path.join(configDir, ".gitignore"), "node_modules/");
208 |       await fs.writeFile(
209 |         path.join(configDir, ".editorconfig"),
210 |         "indent_style = space",
211 |       );
212 |       await fs.writeFile(path.join(configDir, "LICENSE"), "MIT License");
213 | 
214 |       const result = await analyzeRepository({
215 |         path: configDir,
216 |         depth: "standard",
217 |       });
218 |       expect(result.content).toBeDefined();
219 |       expect(result.content.length).toBeGreaterThan(0);
220 |     });
221 | 
222 |     it("should handle deep analysis depth", async () => {
223 |       const deepDir = path.join(tempDir, "deep-analysis");
224 |       await fs.mkdir(deepDir, { recursive: true });
225 | 
226 |       // Create nested structure
227 |       await fs.mkdir(path.join(deepDir, "src", "components", "ui"), {
228 |         recursive: true,
229 |       });
230 |       await fs.mkdir(path.join(deepDir, "src", "utils", "helpers"), {
231 |         recursive: true,
232 |       });
233 |       await fs.mkdir(path.join(deepDir, "tests", "unit"), { recursive: true });
234 | 
235 |       await fs.writeFile(
236 |         path.join(deepDir, "package.json"),
237 |         JSON.stringify({
238 |           name: "deep-project",
239 |           scripts: {
240 |             test: "jest",
241 |             build: "webpack",
242 |             lint: "eslint .",
243 |           },
244 |         }),
245 |       );
246 | 
247 |       await fs.writeFile(
248 |         path.join(deepDir, "src", "index.js"),
249 |         'console.log("app");',
250 |       );
251 |       await fs.writeFile(
252 |         path.join(deepDir, "src", "components", "ui", "Button.js"),
253 |         "export default Button;",
254 |       );
255 |       await fs.writeFile(
256 |         path.join(deepDir, "tests", "unit", "test.js"),
257 |         'test("sample", () => {});',
258 |       );
259 | 
260 |       const result = await analyzeRepository({ path: deepDir, depth: "deep" });
261 |       expect(result.content).toBeDefined();
262 |       const analysis = JSON.parse(result.content[0].text);
263 |       expect(analysis.structure.hasTests).toBe(true);
264 |     });
265 | 
266 |     it("should analyze repository with multiple ecosystems", async () => {
267 |       const multiDir = path.join(tempDir, "multi-ecosystem");
268 |       await fs.mkdir(multiDir, { recursive: true });
269 | 
270 |       // JavaScript
271 |       await fs.writeFile(
272 |         path.join(multiDir, "package.json"),
273 |         '{"name": "frontend"}',
274 |       );
275 | 
276 |       // Python
277 |       await fs.writeFile(
278 |         path.join(multiDir, "requirements.txt"),
279 |         "flask==2.0.0",
280 |       );
281 | 
282 |       // Ruby
283 |       await fs.writeFile(path.join(multiDir, "Gemfile"), 'gem "rails"');
284 | 
285 |       const result = await analyzeRepository({
286 |         path: multiDir,
287 |         depth: "standard",
288 |       });
289 |       expect(result.content).toBeDefined();
290 |       // Should detect the primary ecosystem (usually the one with most files/config)
291 |       const analysis = JSON.parse(result.content[0].text);
292 |       expect(["javascript", "python", "ruby"]).toContain(
293 |         analysis.dependencies.ecosystem,
294 |       );
295 |     });
296 |   });
297 | 
298 |   describe("Repository Complexity Analysis", () => {
299 |     it("should calculate complexity metrics", async () => {
300 |       const complexDir = path.join(tempDir, "complex-repo");
301 |       await fs.mkdir(path.join(complexDir, ".github", "workflows"), {
302 |         recursive: true,
303 |       });
304 | 
305 |       // Create various files to test complexity
306 |       await fs.writeFile(
307 |         path.join(complexDir, "package.json"),
308 |         JSON.stringify({
309 |           name: "complex-app",
310 |           dependencies: {
311 |             react: "^18.0.0",
312 |             express: "^4.0.0",
313 |             webpack: "^5.0.0",
314 |           },
315 |           devDependencies: {
316 |             jest: "^29.0.0",
317 |             eslint: "^8.0.0",
318 |           },
319 |         }),
320 |       );
321 | 
322 |       await fs.writeFile(
323 |         path.join(complexDir, ".github", "workflows", "ci.yml"),
324 |         `
325 | name: CI
326 | on: push
327 | jobs:
328 |   test:
329 |     runs-on: ubuntu-latest
330 |       `,
331 |       );
332 | 
333 |       await fs.writeFile(
334 |         path.join(complexDir, "README.md"),
335 |         "# Complex Project\n\nWith detailed documentation",
336 |       );
337 |       await fs.writeFile(
338 |         path.join(complexDir, "CONTRIBUTING.md"),
339 |         "# Contributing Guide",
340 |       );
341 | 
342 |       const result = await analyzeRepository({
343 |         path: complexDir,
344 |         depth: "deep",
345 |       });
346 |       expect(result.content).toBeDefined();
347 |       const analysis = JSON.parse(result.content[0].text);
348 |       expect(analysis.structure.hasCI).toBe(true);
349 |       expect(analysis.documentation.hasReadme).toBe(true);
350 |     });
351 |   });
352 | });
353 | 
```

--------------------------------------------------------------------------------
/src/utils/user-feedback-integration.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * User Feedback Integration for Priority Scoring (ADR-012 Phase 3)
  3 |  *
  4 |  * Integrates with issue tracking systems (GitHub Issues, GitLab Issues, etc.)
  5 |  * to incorporate user-reported documentation issues into priority scoring.
  6 |  */
  7 | 
  8 | import { DriftDetectionResult } from "./drift-detector.js";
  9 | 
 10 | export interface IssueTrackerConfig {
 11 |   provider: "github" | "gitlab" | "jira" | "linear";
 12 |   apiToken?: string;
 13 |   baseUrl?: string;
 14 |   owner?: string;
 15 |   repo?: string;
 16 |   project?: string;
 17 | }
 18 | 
 19 | export interface DocumentationIssue {
 20 |   id: string;
 21 |   title: string;
 22 |   body: string;
 23 |   state: "open" | "closed";
 24 |   labels: string[];
 25 |   createdAt: string;
 26 |   updatedAt: string;
 27 |   affectedFiles?: string[];
 28 |   affectedSymbols?: string[];
 29 |   severity?: "low" | "medium" | "high" | "critical";
 30 | }
 31 | 
 32 | export interface UserFeedbackScore {
 33 |   totalIssues: number;
 34 |   openIssues: number;
 35 |   criticalIssues: number;
 36 |   recentIssues: number; // Issues updated in last 30 days
 37 |   score: number; // 0-100
 38 | }
 39 | 
 40 | /**
 41 |  * User Feedback Integration for ADR-012
 42 |  *
 43 |  * Fetches documentation-related issues from issue trackers
 44 |  * and calculates user feedback scores for priority scoring.
 45 |  */
 46 | export class UserFeedbackIntegration {
 47 |   private config: IssueTrackerConfig | null = null;
 48 |   private cache: Map<
 49 |     string,
 50 |     { issues: DocumentationIssue[]; timestamp: number }
 51 |   > = new Map();
 52 |   private cacheTTL = 5 * 60 * 1000; // 5 minutes
 53 | 
 54 |   constructor(config?: IssueTrackerConfig) {
 55 |     this.config = config || null;
 56 |   }
 57 | 
 58 |   /**
 59 |    * Configure issue tracker connection
 60 |    */
 61 |   configure(config: IssueTrackerConfig): void {
 62 |     this.config = config;
 63 |     this.cache.clear(); // Clear cache when config changes
 64 |   }
 65 | 
 66 |   /**
 67 |    * Calculate user feedback score for a drift detection result
 68 |    */
 69 |   async calculateFeedbackScore(result: DriftDetectionResult): Promise<number> {
 70 |     if (!this.config) {
 71 |       return 0; // No feedback integration configured
 72 |     }
 73 | 
 74 |     try {
 75 |       const issues = await this.getDocumentationIssues(result.filePath);
 76 |       const feedback = this.analyzeIssues(issues, result);
 77 | 
 78 |       return feedback.score;
 79 |     } catch (error) {
 80 |       console.warn(
 81 |         `Failed to fetch user feedback for ${result.filePath}:`,
 82 |         error,
 83 |       );
 84 |       return 0; // Graceful degradation
 85 |     }
 86 |   }
 87 | 
 88 |   /**
 89 |    * Get documentation-related issues for a file
 90 |    */
 91 |   private async getDocumentationIssues(
 92 |     filePath: string,
 93 |   ): Promise<DocumentationIssue[]> {
 94 |     const cacheKey = `issues:${filePath}`;
 95 |     const cached = this.cache.get(cacheKey);
 96 | 
 97 |     // Return cached data if still valid
 98 |     if (cached && Date.now() - cached.timestamp < this.cacheTTL) {
 99 |       return cached.issues;
100 |     }
101 | 
102 |     if (!this.config) {
103 |       return [];
104 |     }
105 | 
106 |     let issues: DocumentationIssue[] = [];
107 | 
108 |     try {
109 |       switch (this.config.provider) {
110 |         case "github":
111 |           issues = await this.fetchGitHubIssues(filePath);
112 |           break;
113 |         case "gitlab":
114 |           issues = await this.fetchGitLabIssues(filePath);
115 |           break;
116 |         case "jira":
117 |           issues = await this.fetchJiraIssues(filePath);
118 |           break;
119 |         case "linear":
120 |           issues = await this.fetchLinearIssues(filePath);
121 |           break;
122 |       }
123 | 
124 |       // Cache the results
125 |       this.cache.set(cacheKey, {
126 |         issues,
127 |         timestamp: Date.now(),
128 |       });
129 |     } catch (error) {
130 |       console.warn(
131 |         `Failed to fetch issues from ${this.config.provider}:`,
132 |         error,
133 |       );
134 |     }
135 | 
136 |     return issues;
137 |   }
138 | 
139 |   /**
140 |    * Fetch GitHub Issues related to documentation
141 |    */
142 |   private async fetchGitHubIssues(
143 |     filePath: string,
144 |   ): Promise<DocumentationIssue[]> {
145 |     if (!this.config?.apiToken || !this.config.owner || !this.config.repo) {
146 |       return [];
147 |     }
148 | 
149 |     const url = `https://api.github.com/repos/${this.config.owner}/${this.config.repo}/issues?state=all&labels=documentation,docs`;
150 |     const headers: Record<string, string> = {
151 |       Accept: "application/vnd.github.v3+json",
152 |       "User-Agent": "DocuMCP/1.0",
153 |     };
154 | 
155 |     if (this.config.apiToken) {
156 |       headers.Authorization = `token ${this.config.apiToken}`;
157 |     }
158 | 
159 |     try {
160 |       const response = await fetch(url, { headers });
161 |       if (!response.ok) {
162 |         throw new Error(`GitHub API error: ${response.status}`);
163 |       }
164 | 
165 |       const data = (await response.json()) as any[];
166 |       return this.parseGitHubIssues(data, filePath);
167 |     } catch (error) {
168 |       console.warn("GitHub API fetch failed:", error);
169 |       return [];
170 |     }
171 |   }
172 | 
173 |   /**
174 |    * Parse GitHub Issues API response
175 |    */
176 |   private parseGitHubIssues(
177 |     data: any[],
178 |     filePath: string,
179 |   ): DocumentationIssue[] {
180 |     return data
181 |       .filter((issue) => !issue.pull_request) // Exclude PRs
182 |       .map((issue) => {
183 |         // Extract affected files/symbols from issue body
184 |         const affectedFiles = this.extractFileReferences(issue.body || "");
185 |         const affectedSymbols = this.extractSymbolReferences(issue.body || "");
186 | 
187 |         // Determine severity from labels
188 |         const severity = this.determineSeverityFromLabels(issue.labels || []);
189 | 
190 |         return {
191 |           id: issue.number.toString(),
192 |           title: issue.title,
193 |           body: issue.body || "",
194 |           state: (issue.state === "open" ? "open" : "closed") as
195 |             | "open"
196 |             | "closed",
197 |           labels: (issue.labels || []).map((l: any) => l.name || l),
198 |           createdAt: issue.created_at,
199 |           updatedAt: issue.updated_at,
200 |           affectedFiles,
201 |           affectedSymbols,
202 |           severity,
203 |         };
204 |       })
205 |       .filter((issue) => {
206 |         // Filter to issues that mention the file or its symbols
207 |         const fileMatches = issue.affectedFiles?.some(
208 |           (f) => filePath.includes(f) || f.includes(filePath),
209 |         );
210 |         const isDocumentationRelated = issue.labels.some((l: string) =>
211 |           ["documentation", "docs", "doc"].includes(l.toLowerCase()),
212 |         );
213 |         return fileMatches || isDocumentationRelated;
214 |       });
215 |   }
216 | 
217 |   /**
218 |    * Fetch GitLab Issues (placeholder)
219 |    */
220 |   private async fetchGitLabIssues(
221 |     _filePath: string,
222 |   ): Promise<DocumentationIssue[]> {
223 |     // TODO: Implement GitLab API integration
224 |     return [];
225 |   }
226 | 
227 |   /**
228 |    * Fetch Jira Issues (placeholder)
229 |    */
230 |   private async fetchJiraIssues(
231 |     _filePath: string,
232 |   ): Promise<DocumentationIssue[]> {
233 |     // TODO: Implement Jira API integration
234 |     return [];
235 |   }
236 | 
237 |   /**
238 |    * Fetch Linear Issues (placeholder)
239 |    */
240 |   private async fetchLinearIssues(
241 |     _filePath: string,
242 |   ): Promise<DocumentationIssue[]> {
243 |     // TODO: Implement Linear API integration
244 |     return [];
245 |   }
246 | 
247 |   /**
248 |    * Extract file references from issue body
249 |    */
250 |   private extractFileReferences(body: string): string[] {
251 |     const files: string[] = [];
252 |     // Match file paths in markdown code blocks or inline code
253 |     const filePatterns = [
254 |       /`([^`]+\.(ts|js|tsx|jsx|md|mdx))`/g,
255 |       /\[([^\]]+\.(ts|js|tsx|jsx|md|mdx))\]/g,
256 |       /(?:file|path|location):\s*([^\s]+\.(ts|js|tsx|jsx|md|mdx))/gi,
257 |     ];
258 | 
259 |     for (const pattern of filePatterns) {
260 |       const matches = body.matchAll(pattern);
261 |       for (const match of matches) {
262 |         if (match[1]) {
263 |           files.push(match[1]);
264 |         }
265 |       }
266 |     }
267 | 
268 |     return [...new Set(files)];
269 |   }
270 | 
271 |   /**
272 |    * Extract symbol references from issue body
273 |    */
274 |   private extractSymbolReferences(body: string): string[] {
275 |     const symbols: string[] = [];
276 |     // Match function/class names in code blocks
277 |     const symbolPatterns = [
278 |       /`([A-Za-z_][A-Za-z0-9_]*\(\)?)`/g,
279 |       /(?:function|class|method|API):\s*`?([A-Za-z_][A-Za-z0-9_]*)`?/gi,
280 |     ];
281 | 
282 |     for (const pattern of symbolPatterns) {
283 |       const matches = body.matchAll(pattern);
284 |       for (const match of matches) {
285 |         if (match[1]) {
286 |           symbols.push(match[1]);
287 |         }
288 |       }
289 |     }
290 | 
291 |     return [...new Set(symbols)];
292 |   }
293 | 
294 |   /**
295 |    * Determine severity from issue labels
296 |    */
297 |   private determineSeverityFromLabels(
298 |     labels: Array<{ name?: string } | string>,
299 |   ): "low" | "medium" | "high" | "critical" {
300 |     const labelNames = labels.map((l) =>
301 |       typeof l === "string" ? l : l.name || "",
302 |     );
303 |     const lowerLabels = labelNames.map((l) => l.toLowerCase());
304 | 
305 |     if (
306 |       lowerLabels.some((l) =>
307 |         ["critical", "p0", "severity: critical", "priority: critical"].includes(
308 |           l,
309 |         ),
310 |       )
311 |     ) {
312 |       return "critical";
313 |     }
314 |     if (
315 |       lowerLabels.some((l) =>
316 |         ["high", "p1", "severity: high", "priority: high"].includes(l),
317 |       )
318 |     ) {
319 |       return "high";
320 |     }
321 |     if (
322 |       lowerLabels.some((l) =>
323 |         ["medium", "p2", "severity: medium", "priority: medium"].includes(l),
324 |       )
325 |     ) {
326 |       return "medium";
327 |     }
328 |     return "low";
329 |   }
330 | 
331 |   /**
332 |    * Analyze issues and calculate feedback score
333 |    */
334 |   private analyzeIssues(
335 |     issues: DocumentationIssue[],
336 |     result: DriftDetectionResult,
337 |   ): UserFeedbackScore {
338 |     const now = Date.now();
339 |     const thirtyDaysAgo = now - 30 * 24 * 60 * 60 * 1000;
340 | 
341 |     const openIssues = issues.filter((i) => i.state === "open");
342 |     const criticalIssues = issues.filter(
343 |       (i) => i.severity === "critical" && i.state === "open",
344 |     );
345 |     const recentIssues = issues.filter(
346 |       (i) => new Date(i.updatedAt).getTime() > thirtyDaysAgo,
347 |     );
348 | 
349 |     // Calculate score based on issue metrics
350 |     let score = 0;
351 | 
352 |     // Critical open issues contribute heavily
353 |     score += criticalIssues.length * 30;
354 |     score = Math.min(score, 100);
355 | 
356 |     // Open issues contribute moderately
357 |     score += openIssues.length * 10;
358 |     score = Math.min(score, 100);
359 | 
360 |     // Recent activity indicates ongoing concern
361 |     if (recentIssues.length > 0) {
362 |       score += Math.min(recentIssues.length * 5, 20);
363 |       score = Math.min(score, 100);
364 |     }
365 | 
366 |     // Match issues to affected symbols for higher relevance
367 |     const affectedSymbols = new Set<string>();
368 |     for (const drift of result.drifts) {
369 |       for (const diff of drift.codeChanges) {
370 |         affectedSymbols.add(diff.name);
371 |       }
372 |     }
373 | 
374 |     const relevantIssues = issues.filter((issue) => {
375 |       if (!issue.affectedSymbols) return false;
376 |       return issue.affectedSymbols.some((symbol) =>
377 |         affectedSymbols.has(symbol),
378 |       );
379 |     });
380 | 
381 |     if (relevantIssues.length > 0) {
382 |       score += Math.min(relevantIssues.length * 15, 30);
383 |       score = Math.min(score, 100);
384 |     }
385 | 
386 |     return {
387 |       totalIssues: issues.length,
388 |       openIssues: openIssues.length,
389 |       criticalIssues: criticalIssues.length,
390 |       recentIssues: recentIssues.length,
391 |       score: Math.round(score),
392 |     };
393 |   }
394 | 
395 |   /**
396 |    * Clear cache (useful for testing or forced refresh)
397 |    */
398 |   clearCache(): void {
399 |     this.cache.clear();
400 |   }
401 | }
402 | 
```

--------------------------------------------------------------------------------
/docs/reference/api-overview.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | sidebar_position: 1
  3 | documcp:
  4 |   last_updated: "2025-11-20T00:46:21.959Z"
  5 |   last_validated: "2025-12-09T19:41:38.589Z"
  6 |   auto_updated: false
  7 |   update_frequency: monthly
  8 |   validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
  9 | ---
 10 | 
 11 | # API Overview
 12 | 
 13 | DocuMCP provides **45 specialized tools** organized into functional categories for intelligent documentation deployment via the Model Context Protocol (MCP).
 14 | 
 15 | ## 🎯 Quick Reference: LLM_CONTEXT.md
 16 | 
 17 | For AI assistants and LLMs, reference the **comprehensive context file**:
 18 | 
 19 | **File**: `/LLM_CONTEXT.md` (in project root)
 20 | 
 21 | This auto-generated file provides:
 22 | 
 23 | - All 45 tool descriptions with parameters
 24 | - Usage examples and code snippets
 25 | - Common workflow patterns
 26 | - Memory system documentation
 27 | - Phase 3 code-to-docs sync features
 28 | 
 29 | **Usage in AI assistants**:
 30 | 
 31 | ```
 32 | @LLM_CONTEXT.md help me deploy documentation to GitHub Pages
 33 | ```
 34 | 
 35 | ## 📚 Tool Categories
 36 | 
 37 | ### Core Documentation Tools (9 tools)
 38 | 
 39 | Essential tools for repository analysis, recommendations, and deployment:
 40 | 
 41 | | Tool                            | Purpose                                  | Key Parameters                     |
 42 | | ------------------------------- | ---------------------------------------- | ---------------------------------- |
 43 | | `analyze_repository`            | Analyze project structure & dependencies | `path`, `depth`                    |
 44 | | `recommend_ssg`                 | Recommend static site generator          | `analysisId`, `preferences`        |
 45 | | `generate_config`               | Generate SSG configuration files         | `ssg`, `projectName`, `outputPath` |
 46 | | `setup_structure`               | Create Diataxis documentation structure  | `path`, `ssg`                      |
 47 | | `deploy_pages`                  | Deploy to GitHub Pages with tracking     | `repository`, `ssg`, `userId`      |
 48 | | `verify_deployment`             | Verify deployment status                 | `repository`, `url`                |
 49 | | `populate_diataxis_content`     | Generate project-specific content        | `analysisId`, `docsPath`           |
 50 | | `update_existing_documentation` | Update existing docs intelligently       | `analysisId`, `docsPath`           |
 51 | | `validate_diataxis_content`     | Validate documentation quality           | `contentPath`, `validationType`    |
 52 | 
 53 | ### README Analysis & Generation (6 tools)
 54 | 
 55 | Specialized tools for README creation and optimization:
 56 | 
 57 | | Tool                        | Purpose                                   | Key Parameters                               |
 58 | | --------------------------- | ----------------------------------------- | -------------------------------------------- |
 59 | | `evaluate_readme_health`    | Assess README quality & onboarding        | `readme_path`, `project_type`                |
 60 | | `readme_best_practices`     | Analyze against best practices            | `readme_path`, `generate_template`           |
 61 | | `generate_readme_template`  | Create standardized README                | `projectName`, `description`, `templateType` |
 62 | | `validate_readme_checklist` | Validate against community standards      | `readmePath`, `strict`                       |
 63 | | `analyze_readme`            | Comprehensive length & structure analysis | `project_path`, `optimization_level`         |
 64 | | `optimize_readme`           | Restructure and condense content          | `readme_path`, `strategy`, `max_length`      |
 65 | 
 66 | ### Phase 3: Code-to-Docs Synchronization (2 tools)
 67 | 
 68 | Advanced AST-based code analysis and drift detection:
 69 | 
 70 | | Tool                          | Purpose                            | Key Parameters                    |
 71 | | ----------------------------- | ---------------------------------- | --------------------------------- |
 72 | | `sync_code_to_docs`           | Detect and fix documentation drift | `projectPath`, `docsPath`, `mode` |
 73 | | `generate_contextual_content` | Generate docs from code analysis   | `filePath`, `documentationType`   |
 74 | 
 75 | **Supported Languages**: TypeScript, JavaScript, Python, Go, Rust, Java, Ruby, Bash
 76 | 
 77 | **Drift Types Detected**: Outdated, Incorrect, Missing, Breaking
 78 | 
 79 | ### Memory & Analytics Tools (2 tools)
 80 | 
 81 | User preferences and deployment pattern analysis:
 82 | 
 83 | | Tool                  | Purpose                                | Key Parameters                      |
 84 | | --------------------- | -------------------------------------- | ----------------------------------- |
 85 | | `manage_preferences`  | Manage user preferences & SSG history  | `action`, `userId`, `preferences`   |
 86 | | `analyze_deployments` | Analyze deployment patterns & insights | `analysisType`, `ssg`, `periodDays` |
 87 | 
 88 | ### Validation & Testing Tools (4 tools)
 89 | 
 90 | Quality assurance and deployment testing:
 91 | 
 92 | | Tool                        | Purpose                              | Key Parameters                               |
 93 | | --------------------------- | ------------------------------------ | -------------------------------------------- |
 94 | | `validate_content`          | Validate links, code, and references | `contentPath`, `validationType`              |
 95 | | `check_documentation_links` | Comprehensive link validation        | `documentation_path`, `check_external_links` |
 96 | | `test_local_deployment`     | Test build and local server          | `repositoryPath`, `ssg`, `port`              |
 97 | | `setup_playwright_tests`    | Generate E2E test infrastructure     | `repositoryPath`, `ssg`, `projectName`       |
 98 | 
 99 | ### Utility Tools (3 tools)
100 | 
101 | Additional functionality and management:
102 | 
103 | | Tool                        | Purpose                           | Key Parameters                        |
104 | | --------------------------- | --------------------------------- | ------------------------------------- |
105 | | `detect_documentation_gaps` | Identify missing content          | `repositoryPath`, `documentationPath` |
106 | | `manage_sitemap`            | Generate and validate sitemap.xml | `action`, `docsPath`, `baseUrl`       |
107 | | `read_directory`            | List files within allowed roots   | `path`                                |
108 | 
109 | ### Advanced Memory Tools (19 tools)
110 | 
111 | Sophisticated memory, learning, and knowledge graph operations:
112 | 
113 | | Tool Category       | Tools                                                                  | Purpose                       |
114 | | ------------------- | ---------------------------------------------------------------------- | ----------------------------- |
115 | | **Memory Recall**   | `memory_recall`, `memory_contextual_search`                            | Retrieve and search memories  |
116 | | **Intelligence**    | `memory_intelligent_analysis`, `memory_enhanced_recommendation`        | AI-powered insights           |
117 | | **Knowledge Graph** | `memory_knowledge_graph`, `memory_learning_stats`                      | Graph queries and statistics  |
118 | | **Collaboration**   | `memory_agent_network`                                                 | Multi-agent memory sharing    |
119 | | **Insights**        | `memory_insights`, `memory_similar`, `memory_temporal_analysis`        | Pattern analysis              |
120 | | **Data Management** | `memory_export`, `memory_cleanup`, `memory_pruning`                    | Export, cleanup, optimization |
121 | | **Visualization**   | `memory_visualization`                                                 | Visual representations        |
122 | | **Advanced I/O**    | `memory_export_advanced`, `memory_import_advanced`, `memory_migration` | Complex data operations       |
123 | | **Metrics**         | `memory_optimization_metrics`                                          | Performance analysis          |
124 | 
125 | ## 🔗 Detailed Documentation
126 | 
127 | ### Full API Reference
128 | 
129 | - **[MCP Tools API](./mcp-tools.md)** - Complete tool descriptions with examples
130 | - **[TypeDoc API](../api/)** - Auto-generated API documentation for all classes, interfaces, and functions
131 | - **[LLM Context Reference](../../LLM_CONTEXT.md)** - Comprehensive tool reference for AI assistants
132 | 
133 | ### Configuration & Usage
134 | 
135 | - **[Configuration Options](./configuration.md)** - All configuration settings
136 | - **[CLI Commands](./cli.md)** - Command-line interface reference
137 | - **[Prompt Templates](./prompt-templates.md)** - Pre-built prompt examples
138 | 
139 | ## 🚀 Common Workflows
140 | 
141 | ### 1. New Documentation Site
142 | 
143 | ```
144 | analyze_repository → recommend_ssg → generate_config →
145 | setup_structure → populate_diataxis_content → deploy_pages
146 | ```
147 | 
148 | ### 2. Documentation Sync (Phase 3)
149 | 
150 | ```
151 | sync_code_to_docs (detect) → review drift →
152 | sync_code_to_docs (apply) → manual review
153 | ```
154 | 
155 | ### 3. Existing Docs Improvement
156 | 
157 | ```
158 | analyze_repository → update_existing_documentation →
159 | validate_diataxis_content → check_documentation_links
160 | ```
161 | 
162 | ### 4. README Enhancement
163 | 
164 | ```
165 | analyze_readme → evaluate_readme_health →
166 | readme_best_practices → optimize_readme
167 | ```
168 | 
169 | ## 📦 Memory Knowledge Graph
170 | 
171 | DocuMCP includes a persistent memory system that learns from every analysis:
172 | 
173 | ### Entity Types
174 | 
175 | - **Project**: Software projects with analysis history
176 | - **User**: User preferences and SSG patterns
177 | - **Configuration**: SSG deployment configs with success rates
178 | - **Documentation**: Documentation structures and patterns
179 | - **CodeFile**: Source code files with change tracking
180 | - **DocumentationSection**: Docs sections linked to code
181 | - **Technology**: Languages, frameworks, and tools
182 | 
183 | ### Relationship Types
184 | 
185 | - `project_uses_technology`: Links projects to tech stack
186 | - `user_prefers_ssg`: Tracks user SSG preferences
187 | - `project_deployed_with`: Records deployment outcomes
188 | - `similar_to`: Identifies similar projects
189 | - `documents`: Links code files to documentation
190 | - `outdated_for`: Flags out-of-sync documentation
191 | - `depends_on`: Tracks technology dependencies
192 | 
193 | ### Storage Location
194 | 
195 | - **Default**: `.documcp/memory/`
196 | - **Entities**: `.documcp/memory/knowledge-graph-entities.jsonl`
197 | - **Relationships**: `.documcp/memory/knowledge-graph-relationships.jsonl`
198 | - **Backups**: `.documcp/memory/backups/`
199 | - **Snapshots**: `.documcp/snapshots/` (for drift detection)
200 | 
201 | ## 🎓 Getting Started
202 | 
203 | 1. **Start with tutorials**: [Getting Started Guide](../tutorials/getting-started.md)
204 | 2. **Learn effective prompting**: [Prompting Guide](../how-to/prompting-guide.md)
205 | 3. **Reference LLM_CONTEXT.md**: Use `@LLM_CONTEXT.md` in AI assistants
206 | 4. **Explore workflows**: [Common Workflows](#common-workflows)
207 | 
208 | ## 📊 Tool Statistics
209 | 
210 | - **Total Tools**: 45
211 | - **Core Documentation**: 9 tools
212 | - **README Management**: 6 tools
213 | - **Phase 3 Sync**: 2 tools
214 | - **Memory & Analytics**: 2 tools
215 | - **Validation**: 4 tools
216 | - **Utilities**: 3 tools
217 | - **Advanced Memory**: 19 tools
218 | 
219 | ## 🔍 Search & Discovery
220 | 
221 | - **By functionality**: Use the category tables above
222 | - **By name**: See [MCP Tools API](./mcp-tools.md)
223 | - **By code**: Browse [TypeDoc API](../api/)
224 | - **For AI assistants**: Reference [LLM_CONTEXT.md](../../LLM_CONTEXT.md)
225 | 
226 | ---
227 | 
228 | _Documentation auto-generated from DocuMCP v0.3.2_
229 | 
```

--------------------------------------------------------------------------------
/docs/reference/deploy-pages.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | documcp:
  3 |   last_updated: "2025-11-20T00:46:21.961Z"
  4 |   last_validated: "2025-12-09T19:41:38.591Z"
  5 |   auto_updated: false
  6 |   update_frequency: monthly
  7 |   validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
  8 | ---
  9 | 
 10 | # Deploy Pages Tool Documentation
 11 | 
 12 | ## Overview
 13 | 
 14 | The `deploy_pages` tool provides automated GitHub Pages deployment setup with intelligent SSG (Static Site Generator) detection, optimized workflow generation, and comprehensive deployment tracking through the Knowledge Graph system.
 15 | 
 16 | ## Features
 17 | 
 18 | - **SSG Auto-Detection**: Automatically retrieves SSG recommendations from Knowledge Graph using analysisId
 19 | - **Optimized Workflows**: Generates SSG-specific GitHub Actions workflows with best practices
 20 | - **Package Manager Detection**: Supports npm, yarn, and pnpm with automatic lockfile detection
 21 | - **Documentation Folder Detection**: Intelligently detects docs folders (docs/, website/, documentation/)
 22 | - **Custom Domain Support**: Automatic CNAME file generation
 23 | - **Deployment Tracking**: Integrates with Knowledge Graph to track deployment success/failure
 24 | - **User Preference Learning**: Tracks SSG usage patterns for personalized recommendations
 25 | 
 26 | ## Usage
 27 | 
 28 | ### Basic Usage
 29 | 
 30 | ```javascript
 31 | // Deploy with explicit SSG
 32 | const result = await callTool("deploy_pages", {
 33 |   repository: "/path/to/project",
 34 |   ssg: "docusaurus",
 35 | });
 36 | ```
 37 | 
 38 | ### Advanced Usage with Knowledge Graph Integration
 39 | 
 40 | ```javascript
 41 | // Deploy using SSG from previous analysis
 42 | const result = await callTool("deploy_pages", {
 43 |   repository: "https://github.com/user/repo.git",
 44 |   analysisId: "repo-analysis-123", // SSG retrieved from KG
 45 |   projectPath: "/local/path",
 46 |   projectName: "My Documentation Site",
 47 |   customDomain: "docs.example.com",
 48 |   userId: "developer-1",
 49 | });
 50 | ```
 51 | 
 52 | ## Parameters
 53 | 
 54 | | Parameter      | Type     | Required | Description                                                                 |
 55 | | -------------- | -------- | -------- | --------------------------------------------------------------------------- |
 56 | | `repository`   | `string` | ✅       | Repository path (local) or URL (remote)                                     |
 57 | | `ssg`          | `enum`   | ⚠️\*     | Static site generator: `jekyll`, `hugo`, `docusaurus`, `mkdocs`, `eleventy` |
 58 | | `branch`       | `string` | ❌       | Target branch for deployment (default: `gh-pages`)                          |
 59 | | `customDomain` | `string` | ❌       | Custom domain for GitHub Pages                                              |
 60 | | `projectPath`  | `string` | ❌       | Local project path for tracking                                             |
 61 | | `projectName`  | `string` | ❌       | Project name for tracking                                                   |
 62 | | `analysisId`   | `string` | ❌       | Repository analysis ID for SSG retrieval                                    |
 63 | | `userId`       | `string` | ❌       | User ID for preference tracking (default: `default`)                        |
 64 | 
 65 | \*Required unless `analysisId` is provided for SSG retrieval from Knowledge Graph
 66 | 
 67 | ## SSG-Specific Workflows
 68 | 
 69 | ### Docusaurus
 70 | 
 71 | - Node.js setup with configurable version
 72 | - Package manager auto-detection (npm/yarn/pnpm)
 73 | - Build caching optimization
 74 | - Working directory support for monorepos
 75 | 
 76 | ### Hugo
 77 | 
 78 | - Extended Hugo version with latest releases
 79 | - Asset optimization and minification
 80 | - Submodule support for themes
 81 | - Custom build command detection
 82 | 
 83 | ### Jekyll
 84 | 
 85 | - Ruby environment with Bundler
 86 | - Gemfile dependency management
 87 | - Production environment variables
 88 | - Custom plugin support
 89 | 
 90 | ### MkDocs
 91 | 
 92 | - Python environment setup
 93 | - Requirements.txt dependency installation
 94 | - Direct GitHub Pages deployment
 95 | - Custom branch targeting
 96 | 
 97 | ### Eleventy (11ty)
 98 | 
 99 | - Node.js with flexible configuration
100 | - Custom output directory detection
101 | - Plugin ecosystem support
102 | - Development server integration
103 | 
104 | ## Generated Workflow Features
105 | 
106 | ### Security Best Practices
107 | 
108 | - **Minimal Permissions**: Only required `pages:write` and `id-token:write` permissions
109 | - **OIDC Token Authentication**: JWT-based deployment validation
110 | - **Environment Protection**: Production deployment safeguards
111 | - **Dependency Scanning**: Automated security vulnerability checks
112 | 
113 | ### Performance Optimizations
114 | 
115 | - **Build Caching**: Package manager and dependency caching
116 | - **Incremental Builds**: Only rebuild changed content when possible
117 | - **Asset Optimization**: Minification and compression
118 | - **Parallel Processing**: Multi-stage builds where applicable
119 | 
120 | ### Error Handling
121 | 
122 | - **Graceful Failures**: Comprehensive error reporting and recovery
123 | - **Debug Information**: Detailed logging for troubleshooting
124 | - **Health Checks**: Post-deployment validation
125 | - **Rollback Support**: Automated rollback on deployment failures
126 | 
127 | ## Knowledge Graph Integration
128 | 
129 | ### Deployment Tracking
130 | 
131 | ```typescript
132 | // Successful deployment tracking
133 | await trackDeployment(projectId, ssg, true, {
134 |   buildTime: executionTime,
135 |   branch: targetBranch,
136 |   customDomain: domain,
137 | });
138 | 
139 | // Failed deployment tracking
140 | await trackDeployment(projectId, ssg, false, {
141 |   errorMessage: error.message,
142 |   failureStage: "build|deploy|verification",
143 | });
144 | ```
145 | 
146 | ### SSG Retrieval Logic
147 | 
148 | 1. **Check Analysis ID**: Query project node in Knowledge Graph
149 | 2. **Get Recommendations**: Retrieve SSG recommendations sorted by confidence
150 | 3. **Fallback to History**: Use most recent successful deployment
151 | 4. **Smart Filtering**: Only consider successful deployments
152 | 
153 | ### User Preference Learning
154 | 
155 | - **Success Rate Tracking**: Monitor SSG deployment success rates
156 | - **Usage Pattern Analysis**: Track frequency of SSG selections
157 | - **Personalized Recommendations**: Weight future suggestions based on history
158 | - **Multi-User Support**: Separate preference tracking per userId
159 | 
160 | ## Examples
161 | 
162 | ### Complete Workflow Integration
163 | 
164 | ```javascript
165 | try {
166 |   // 1. Analyze repository
167 |   const analysis = await callTool("analyze_repository", {
168 |     path: "/path/to/project",
169 |   });
170 | 
171 |   // 2. Get SSG recommendation
172 |   const recommendation = await callTool("recommend_ssg", {
173 |     analysisId: analysis.analysisId,
174 |   });
175 | 
176 |   // 3. Deploy with recommended SSG
177 |   const deployment = await callTool("deploy_pages", {
178 |     repository: "/path/to/project",
179 |     analysisId: analysis.analysisId,
180 |     projectName: "My Project",
181 |     userId: "developer-1",
182 |   });
183 | 
184 |   console.log(`Deployed ${deployment.ssg} to ${deployment.branch}`);
185 | } catch (error) {
186 |   console.error("Deployment workflow failed:", error);
187 | }
188 | ```
189 | 
190 | ### Custom Domain Setup
191 | 
192 | ```javascript
193 | const result = await callTool("deploy_pages", {
194 |   repository: "/path/to/docs",
195 |   ssg: "hugo",
196 |   customDomain: "docs.mycompany.com",
197 |   branch: "main", // Deploy from main branch
198 | });
199 | 
200 | // CNAME file automatically created
201 | console.log(`CNAME created: ${result.cnameCreated}`);
202 | ```
203 | 
204 | ### Monorepo Documentation
205 | 
206 | ```javascript
207 | const result = await callTool("deploy_pages", {
208 |   repository: "/path/to/monorepo",
209 |   ssg: "docusaurus",
210 |   // Will detect docs/ folder automatically
211 |   projectPath: "/path/to/monorepo/packages/docs",
212 | });
213 | 
214 | console.log(`Docs folder: ${result.detectedConfig.docsFolder}`);
215 | console.log(`Build command: ${result.detectedConfig.buildCommand}`);
216 | ```
217 | 
218 | ## Response Format
219 | 
220 | ### Success Response
221 | 
222 | ```javascript
223 | {
224 |   repository: "/path/to/project",
225 |   ssg: "docusaurus",
226 |   branch: "gh-pages",
227 |   customDomain: "docs.example.com",
228 |   workflowPath: "deploy-docs.yml",
229 |   cnameCreated: true,
230 |   repoPath: "/path/to/project",
231 |   detectedConfig: {
232 |     docsFolder: "docs",
233 |     buildCommand: "npm run build",
234 |     outputPath: "./build",
235 |     packageManager: "npm",
236 |     workingDirectory: "docs"
237 |   }
238 | }
239 | ```
240 | 
241 | ### Error Response
242 | 
243 | ```javascript
244 | {
245 |   success: false,
246 |   error: {
247 |     code: "SSG_NOT_SPECIFIED",
248 |     message: "SSG parameter is required. Either provide it directly or ensure analysisId points to a project with SSG recommendations.",
249 |     resolution: "Run analyze_repository and recommend_ssg first, or specify the SSG parameter explicitly."
250 |   }
251 | }
252 | ```
253 | 
254 | ## Error Codes
255 | 
256 | | Code                         | Description                                       | Resolution                                          |
257 | | ---------------------------- | ------------------------------------------------- | --------------------------------------------------- |
258 | | `SSG_NOT_SPECIFIED`          | No SSG provided and none found in Knowledge Graph | Provide SSG parameter or run analysis first         |
259 | | `DEPLOYMENT_SETUP_FAILED`    | Failed to create workflow files                   | Check repository permissions and path accessibility |
260 | | `INVALID_REPOSITORY`         | Repository path or URL invalid                    | Verify repository exists and is accessible          |
261 | | `WORKFLOW_GENERATION_FAILED` | Failed to generate SSG-specific workflow          | Check SSG parameter and project structure           |
262 | 
263 | ## Best Practices
264 | 
265 | ### Repository Structure
266 | 
267 | - Place documentation in standard folders (`docs/`, `website/`, `documentation/`)
268 | - Include `package.json` for Node.js projects with proper scripts
269 | - Use lockfiles (`package-lock.json`, `yarn.lock`, `pnpm-lock.yaml`) for dependency consistency
270 | 
271 | ### Workflow Optimization
272 | 
273 | - Enable GitHub Pages in repository settings before first deployment
274 | - Use semantic versioning for documentation releases
275 | - Configure branch protection rules for production deployments
276 | - Monitor deployment logs for performance bottlenecks
277 | 
278 | ### Knowledge Graph Benefits
279 | 
280 | - Run `analyze_repository` before deployment for optimal SSG selection
281 | - Use consistent `userId` for personalized recommendations
282 | - Provide `projectName` and `projectPath` for deployment tracking
283 | - Review deployment history through Knowledge Graph queries
284 | 
285 | ## Troubleshooting
286 | 
287 | ### Common Issues
288 | 
289 | **Build Failures**
290 | 
291 | - Verify all dependencies are listed in `package.json` or `requirements.txt`
292 | - Check Node.js/Python version compatibility
293 | - Ensure build scripts are properly configured
294 | 
295 | **Permission Errors**
296 | 
297 | - Enable GitHub Actions in repository settings
298 | - Check workflow file permissions (should be automatically handled)
299 | - Verify GitHub Pages is enabled for the target branch
300 | 
301 | **Custom Domain Issues**
302 | 
303 | - Verify DNS configuration points to GitHub Pages
304 | - Allow 24-48 hours for DNS propagation
305 | - Check CNAME file is created in repository root
306 | 
307 | ### Debug Workflow
308 | 
309 | 1. Check GitHub Actions logs in repository
310 | 2. Verify workflow file syntax using GitHub workflow validator
311 | 3. Test build locally using same commands as workflow
312 | 4. Review Knowledge Graph deployment history for patterns
313 | 
314 | ## Related Tools
315 | 
316 | - [`analyze_repository`](../how-to/repository-analysis.md) - Repository analysis for SSG recommendations
317 | - [`recommend_ssg`](./mcp-tools.md#recommend_ssg) - SSG recommendation engine
318 | - [`verify_deployment`](./mcp-tools.md#verify_deployment) - Deployment verification and health checks
319 | - [`manage_preferences`](./mcp-tools.md#manage_preferences) - User preference management
320 | 
```

--------------------------------------------------------------------------------
/src/tools/track-documentation-freshness.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Track Documentation Freshness Tool
  3 |  *
  4 |  * Scans documentation directory for staleness markers,
  5 |  * identifies files needing updates based on configurable time thresholds.
  6 |  */
  7 | 
  8 | import { z } from "zod";
  9 | import {
 10 |   scanDocumentationFreshness,
 11 |   STALENESS_PRESETS,
 12 |   type StalenessThreshold,
 13 |   type FreshnessScanReport,
 14 | } from "../utils/freshness-tracker.js";
 15 | import { type MCPToolResponse } from "../types/api.js";
 16 | import {
 17 |   storeFreshnessEvent,
 18 |   getStalenessInsights,
 19 | } from "../memory/freshness-kg-integration.js";
 20 | 
 21 | /**
 22 |  * Input schema for track_documentation_freshness tool
 23 |  */
 24 | export const TrackDocumentationFreshnessSchema = z.object({
 25 |   docsPath: z.string().describe("Path to documentation directory"),
 26 |   projectPath: z
 27 |     .string()
 28 |     .optional()
 29 |     .describe("Path to project root (for knowledge graph tracking)"),
 30 |   warningThreshold: z
 31 |     .object({
 32 |       value: z.number().positive(),
 33 |       unit: z.enum(["minutes", "hours", "days"]),
 34 |     })
 35 |     .optional()
 36 |     .describe("Warning threshold (yellow flag)"),
 37 |   staleThreshold: z
 38 |     .object({
 39 |       value: z.number().positive(),
 40 |       unit: z.enum(["minutes", "hours", "days"]),
 41 |     })
 42 |     .optional()
 43 |     .describe("Stale threshold (orange flag)"),
 44 |   criticalThreshold: z
 45 |     .object({
 46 |       value: z.number().positive(),
 47 |       unit: z.enum(["minutes", "hours", "days"]),
 48 |     })
 49 |     .optional()
 50 |     .describe("Critical threshold (red flag)"),
 51 |   preset: z
 52 |     .enum(["realtime", "active", "recent", "weekly", "monthly", "quarterly"])
 53 |     .optional()
 54 |     .describe("Use predefined threshold preset"),
 55 |   includeFileList: z
 56 |     .boolean()
 57 |     .optional()
 58 |     .default(true)
 59 |     .describe("Include detailed file list in response"),
 60 |   sortBy: z
 61 |     .enum(["age", "path", "staleness"])
 62 |     .optional()
 63 |     .default("staleness")
 64 |     .describe("Sort order for file list"),
 65 |   storeInKG: z
 66 |     .boolean()
 67 |     .optional()
 68 |     .default(true)
 69 |     .describe(
 70 |       "Store tracking event in knowledge graph for historical analysis",
 71 |     ),
 72 | });
 73 | 
 74 | export type TrackDocumentationFreshnessInput = z.input<
 75 |   typeof TrackDocumentationFreshnessSchema
 76 | >;
 77 | 
 78 | /**
 79 |  * Format freshness report for display
 80 |  */
 81 | function formatFreshnessReport(
 82 |   report: FreshnessScanReport,
 83 |   includeFileList: boolean,
 84 |   sortBy: "age" | "path" | "staleness",
 85 | ): string {
 86 |   const {
 87 |     totalFiles,
 88 |     filesWithMetadata,
 89 |     filesWithoutMetadata,
 90 |     freshFiles,
 91 |     warningFiles,
 92 |     staleFiles,
 93 |     criticalFiles,
 94 |     files,
 95 |     thresholds,
 96 |   } = report;
 97 | 
 98 |   let output = "# Documentation Freshness Report\n\n";
 99 |   output += `**Scanned at**: ${new Date(report.scannedAt).toLocaleString()}\n`;
100 |   output += `**Documentation path**: ${report.docsPath}\n\n`;
101 | 
102 |   // Summary statistics
103 |   output += "## Summary Statistics\n\n";
104 |   output += `- **Total files**: ${totalFiles}\n`;
105 |   output += `- **With metadata**: ${filesWithMetadata} (${Math.round(
106 |     (filesWithMetadata / totalFiles) * 100,
107 |   )}%)\n`;
108 |   output += `- **Without metadata**: ${filesWithoutMetadata}\n\n`;
109 | 
110 |   // Freshness breakdown
111 |   output += "## Freshness Breakdown\n\n";
112 |   output += `- ✅ **Fresh**: ${freshFiles} files\n`;
113 |   output += `- 🟡 **Warning**: ${warningFiles} files (older than ${thresholds.warning.value} ${thresholds.warning.unit})\n`;
114 |   output += `- 🟠 **Stale**: ${staleFiles} files (older than ${thresholds.stale.value} ${thresholds.stale.unit})\n`;
115 |   output += `- 🔴 **Critical**: ${criticalFiles} files (older than ${thresholds.critical.value} ${thresholds.critical.unit})\n`;
116 |   output += `- ❓ **Unknown**: ${filesWithoutMetadata} files (no metadata)\n\n`;
117 | 
118 |   // Recommendations
119 |   if (filesWithoutMetadata > 0 || criticalFiles > 0 || staleFiles > 0) {
120 |     output += "## Recommendations\n\n";
121 | 
122 |     if (filesWithoutMetadata > 0) {
123 |       output += `⚠️ **${filesWithoutMetadata} files lack freshness metadata**. Run \`validate_documentation_freshness\` to initialize metadata.\n\n`;
124 |     }
125 | 
126 |     if (criticalFiles > 0) {
127 |       output += `🔴 **${criticalFiles} files are critically stale**. Immediate review and update recommended.\n\n`;
128 |     } else if (staleFiles > 0) {
129 |       output += `🟠 **${staleFiles} files are stale**. Consider reviewing and updating soon.\n\n`;
130 |     }
131 |   }
132 | 
133 |   // File list
134 |   if (includeFileList && files.length > 0) {
135 |     output += "## File Details\n\n";
136 | 
137 |     // Sort files
138 |     const sortedFiles = [...files];
139 |     switch (sortBy) {
140 |       case "age":
141 |         sortedFiles.sort((a, b) => (b.ageInMs || 0) - (a.ageInMs || 0));
142 |         break;
143 |       case "path":
144 |         sortedFiles.sort((a, b) =>
145 |           a.relativePath.localeCompare(b.relativePath),
146 |         );
147 |         break;
148 |       case "staleness": {
149 |         const order = {
150 |           critical: 0,
151 |           stale: 1,
152 |           warning: 2,
153 |           fresh: 3,
154 |           unknown: 4,
155 |         };
156 |         sortedFiles.sort(
157 |           (a, b) => order[a.stalenessLevel] - order[b.stalenessLevel],
158 |         );
159 |         break;
160 |       }
161 |     }
162 | 
163 |     // Group by staleness level
164 |     const grouped = {
165 |       critical: sortedFiles.filter((f) => f.stalenessLevel === "critical"),
166 |       stale: sortedFiles.filter((f) => f.stalenessLevel === "stale"),
167 |       warning: sortedFiles.filter((f) => f.stalenessLevel === "warning"),
168 |       fresh: sortedFiles.filter((f) => f.stalenessLevel === "fresh"),
169 |       unknown: sortedFiles.filter((f) => f.stalenessLevel === "unknown"),
170 |     };
171 | 
172 |     for (const [level, levelFiles] of Object.entries(grouped)) {
173 |       if (levelFiles.length === 0) continue;
174 | 
175 |       const icon = {
176 |         critical: "🔴",
177 |         stale: "🟠",
178 |         warning: "🟡",
179 |         fresh: "✅",
180 |         unknown: "❓",
181 |       }[level];
182 | 
183 |       output += `### ${icon} ${
184 |         level.charAt(0).toUpperCase() + level.slice(1)
185 |       } (${levelFiles.length})\n\n`;
186 | 
187 |       for (const file of levelFiles) {
188 |         output += `- **${file.relativePath}**`;
189 | 
190 |         if (file.ageFormatted) {
191 |           output += ` - Last updated ${file.ageFormatted} ago`;
192 |         }
193 | 
194 |         if (file.metadata?.validated_against_commit) {
195 |           output += ` (commit: ${file.metadata.validated_against_commit.substring(
196 |             0,
197 |             7,
198 |           )})`;
199 |         }
200 | 
201 |         if (!file.hasMetadata) {
202 |           output += " - ⚠️ No metadata";
203 |         }
204 | 
205 |         output += "\n";
206 |       }
207 | 
208 |       output += "\n";
209 |     }
210 |   }
211 | 
212 |   return output;
213 | }
214 | 
215 | /**
216 |  * Track documentation freshness
217 |  */
218 | export async function trackDocumentationFreshness(
219 |   input: TrackDocumentationFreshnessInput,
220 | ): Promise<MCPToolResponse> {
221 |   const startTime = Date.now();
222 | 
223 |   try {
224 |     const {
225 |       docsPath,
226 |       projectPath,
227 |       warningThreshold,
228 |       staleThreshold,
229 |       criticalThreshold,
230 |       preset,
231 |       includeFileList,
232 |       sortBy,
233 |       storeInKG,
234 |     } = input;
235 | 
236 |     // Determine thresholds
237 |     let thresholds: {
238 |       warning?: StalenessThreshold;
239 |       stale?: StalenessThreshold;
240 |       critical?: StalenessThreshold;
241 |     } = {};
242 | 
243 |     if (preset) {
244 |       // Use preset thresholds
245 |       const presetThreshold = STALENESS_PRESETS[preset];
246 |       thresholds = {
247 |         warning: presetThreshold,
248 |         stale: { value: presetThreshold.value * 2, unit: presetThreshold.unit },
249 |         critical: {
250 |           value: presetThreshold.value * 3,
251 |           unit: presetThreshold.unit,
252 |         },
253 |       };
254 |     } else {
255 |       // Use custom thresholds
256 |       if (warningThreshold) thresholds.warning = warningThreshold;
257 |       if (staleThreshold) thresholds.stale = staleThreshold;
258 |       if (criticalThreshold) thresholds.critical = criticalThreshold;
259 |     }
260 | 
261 |     // Scan documentation
262 |     const report = await scanDocumentationFreshness(docsPath, thresholds);
263 | 
264 |     // Store in knowledge graph if requested and projectPath provided
265 |     let kgInsights:
266 |       | Awaited<ReturnType<typeof getStalenessInsights>>
267 |       | undefined;
268 |     if (storeInKG !== false && projectPath) {
269 |       try {
270 |         await storeFreshnessEvent(projectPath, docsPath, report, "scan");
271 |         kgInsights = await getStalenessInsights(projectPath);
272 |       } catch (error) {
273 |         // KG storage failed, but continue with the response
274 |         console.warn(
275 |           "Failed to store freshness event in knowledge graph:",
276 |           error,
277 |         );
278 |       }
279 |     }
280 | 
281 |     // Format response
282 |     const formattedReport = formatFreshnessReport(
283 |       report,
284 |       includeFileList ?? true,
285 |       sortBy ?? "staleness",
286 |     );
287 | 
288 |     // Add KG insights to formatted report if available
289 |     let enhancedReport = formattedReport;
290 |     if (kgInsights && kgInsights.totalEvents > 0) {
291 |       enhancedReport += "\n## Historical Insights\n\n";
292 |       enhancedReport += `- **Total tracking events**: ${kgInsights.totalEvents}\n`;
293 |       enhancedReport += `- **Average improvement score**: ${(
294 |         kgInsights.averageImprovementScore * 100
295 |       ).toFixed(1)}%\n`;
296 |       enhancedReport += `- **Trend**: ${
297 |         kgInsights.trend === "improving"
298 |           ? "📈 Improving"
299 |           : kgInsights.trend === "declining"
300 |             ? "📉 Declining"
301 |             : "➡️ Stable"
302 |       }\n\n`;
303 | 
304 |       if (kgInsights.recommendations.length > 0) {
305 |         enhancedReport += "### Knowledge Graph Insights\n\n";
306 |         for (const rec of kgInsights.recommendations) {
307 |           enhancedReport += `${rec}\n\n`;
308 |         }
309 |       }
310 |     }
311 | 
312 |     // Convert KG insights to Recommendation objects
313 |     const recommendations =
314 |       kgInsights?.recommendations.map((rec) => {
315 |         // Determine type based on content
316 |         let type: "info" | "warning" | "critical" = "info";
317 |         if (rec.includes("🔴") || rec.includes("critical")) {
318 |           type = "critical";
319 |         } else if (
320 |           rec.includes("🟠") ||
321 |           rec.includes("⚠️") ||
322 |           rec.includes("warning")
323 |         ) {
324 |           type = "warning";
325 |         }
326 | 
327 |         return {
328 |           type,
329 |           title: "Documentation Freshness Insight",
330 |           description: rec,
331 |         };
332 |       }) || [];
333 | 
334 |     const response: MCPToolResponse = {
335 |       success: true,
336 |       data: {
337 |         summary: `Scanned ${report.totalFiles} files: ${report.criticalFiles} critical, ${report.staleFiles} stale, ${report.warningFiles} warnings, ${report.freshFiles} fresh`,
338 |         report,
339 |         thresholds: thresholds,
340 |         formattedReport: enhancedReport,
341 |         kgInsights,
342 |       },
343 |       metadata: {
344 |         toolVersion: "1.0.0",
345 |         executionTime: Date.now() - startTime,
346 |         timestamp: new Date().toISOString(),
347 |       },
348 |       recommendations,
349 |     };
350 | 
351 |     return response;
352 |   } catch (error) {
353 |     return {
354 |       success: false,
355 |       error: {
356 |         code: "FRESHNESS_TRACKING_FAILED",
357 |         message:
358 |           error instanceof Error
359 |             ? error.message
360 |             : "Unknown error tracking documentation freshness",
361 |         resolution: "Check that the documentation path exists and is readable",
362 |       },
363 |       metadata: {
364 |         toolVersion: "1.0.0",
365 |         executionTime: Date.now() - startTime,
366 |         timestamp: new Date().toISOString(),
367 |       },
368 |     };
369 |   }
370 | }
371 | 
```

--------------------------------------------------------------------------------
/docs/tutorials/development-setup.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | documcp:
  3 |   last_updated: "2025-11-20T00:46:21.970Z"
  4 |   last_validated: "2025-12-09T19:41:38.601Z"
  5 |   auto_updated: false
  6 |   update_frequency: monthly
  7 |   validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
  8 | ---
  9 | 
 10 | # Setting Up Your Development Environment
 11 | 
 12 | This tutorial covers setting up a development environment for ongoing documentation work with DocuMCP, including local testing, content workflows, and maintenance automation.
 13 | 
 14 | ## What You'll Set Up
 15 | 
 16 | By the end of this tutorial, you'll have:
 17 | 
 18 | - Local documentation development environment
 19 | - Live reload and preview capabilities
 20 | - Content validation and testing workflow
 21 | - Automated quality checks
 22 | - Integration with your existing development tools
 23 | 
 24 | ## Prerequisites
 25 | 
 26 | - Completed [Getting Started](getting-started.md) and [First Deployment](first-deployment.md)
 27 | - Node.js 20.0.0+ installed
 28 | - Your preferred code editor (VS Code recommended)
 29 | - Git and GitHub CLI (optional but recommended)
 30 | 
 31 | ## Development Environment Setup
 32 | 
 33 | ### Step 1: Local Development Server
 34 | 
 35 | Set up local development with live reload:
 36 | 
 37 | ```bash
 38 | # Test local deployment before pushing to GitHub
 39 | "test my documentation build locally with live reload"
 40 | ```
 41 | 
 42 | This will:
 43 | 
 44 | - Install development dependencies
 45 | - Start local server (typically on http://localhost:3000)
 46 | - Enable live reload for instant preview
 47 | - Validate build process
 48 | 
 49 | **For different SSGs:**
 50 | 
 51 | **Docusaurus:**
 52 | 
 53 | ```bash
 54 | npm run start
 55 | # Opens http://localhost:3000 with live reload
 56 | ```
 57 | 
 58 | **MkDocs:**
 59 | 
 60 | ```bash
 61 | mkdocs serve
 62 | # Opens http://127.0.0.1:8000 with auto-reload
 63 | ```
 64 | 
 65 | **Hugo:**
 66 | 
 67 | ```bash
 68 | hugo server -D
 69 | # Opens http://localhost:1313 with live reload
 70 | ```
 71 | 
 72 | **Jekyll:**
 73 | 
 74 | ```bash
 75 | bundle exec jekyll serve --livereload
 76 | # Opens http://localhost:4000 with live reload
 77 | ```
 78 | 
 79 | ### Step 2: Content Validation Workflow
 80 | 
 81 | Set up automated content validation:
 82 | 
 83 | ```bash
 84 | # Validate all documentation content
 85 | "validate my documentation content for accuracy and completeness"
 86 | ```
 87 | 
 88 | This checks:
 89 | 
 90 | - **Link validation**: Internal and external links
 91 | - **Code syntax**: All code blocks and examples
 92 | - **Image references**: Missing or broken images
 93 | - **Content structure**: Diataxis compliance
 94 | - **SEO optimization**: Meta tags, headings
 95 | 
 96 | ### Step 3: Quality Assurance Integration
 97 | 
 98 | Integrate quality checks into your workflow:
 99 | 
100 | ```bash
101 | # Set up comprehensive documentation quality checks
102 | "check all documentation links and validate content quality"
103 | ```
104 | 
105 | **Available validation levels:**
106 | 
107 | - **Basic**: Link checking and syntax validation
108 | - **Comprehensive**: Full content analysis with Diataxis compliance
109 | - **Advanced**: Performance testing and SEO analysis
110 | 
111 | ### Step 4: Development Scripts Setup
112 | 
113 | Add these scripts to your `package.json`:
114 | 
115 | ```json
116 | {
117 |   "scripts": {
118 |     "docs:dev": "docusaurus start",
119 |     "docs:build": "docusaurus build",
120 |     "docs:serve": "docusaurus serve",
121 |     "docs:validate": "npm run docs:check-links && npm run docs:test-build",
122 |     "docs:check-links": "markdown-link-check docs/**/*.md",
123 |     "docs:test-build": "npm run docs:build && npm run docs:serve -- --no-open",
124 |     "docs:deploy": "npm run docs:validate && npm run docs:build"
125 |   }
126 | }
127 | ```
128 | 
129 | ## Editor Configuration
130 | 
131 | ### VS Code Setup
132 | 
133 | Create `.vscode/settings.json`:
134 | 
135 | ```json
136 | {
137 |   "markdownlint.config": {
138 |     "MD013": false,
139 |     "MD033": false
140 |   },
141 |   "files.associations": {
142 |     "*.mdx": "mdx"
143 |   },
144 |   "editor.wordWrap": "on",
145 |   "editor.quickSuggestions": {
146 |     "strings": true
147 |   },
148 |   "[markdown]": {
149 |     "editor.defaultFormatter": "esbenp.prettier-vscode",
150 |     "editor.quickSuggestions": {
151 |       "comments": "off",
152 |       "strings": "off",
153 |       "other": "off"
154 |     }
155 |   }
156 | }
157 | ```
158 | 
159 | **Recommended VS Code Extensions:**
160 | 
161 | - Markdown All in One
162 | - markdownlint
163 | - Prettier - Code formatter
164 | - GitLens
165 | - Live Server (for static preview)
166 | 
167 | ### Content Writing Workflow
168 | 
169 | Establish a content creation workflow:
170 | 
171 | 1. **Create branch** for documentation changes
172 | 2. **Write content** using Diataxis principles
173 | 3. **Test locally** with live server
174 | 4. **Validate content** using DocuMCP tools
175 | 5. **Review and refine** based on validation feedback
176 | 6. **Commit and push** to trigger deployment
177 | 
178 | ## Automated Quality Checks
179 | 
180 | ### Pre-commit Hooks
181 | 
182 | Set up automated checks before commits:
183 | 
184 | ```bash
185 | # Install husky for git hooks
186 | npm install --save-dev husky
187 | 
188 | # Set up pre-commit hook
189 | npx husky add .husky/pre-commit "npm run docs:validate"
190 | ```
191 | 
192 | Create `.husky/pre-commit`:
193 | 
194 | ```bash
195 | #!/usr/bin/env sh
196 | . "$(dirname -- "$0")/_/husky.sh"
197 | 
198 | echo "🔍 Validating documentation..."
199 | npm run docs:validate
200 | 
201 | echo "📝 Checking markdown formatting..."
202 | npx prettier --check "docs/**/*.md"
203 | 
204 | echo "🔗 Validating links..."
205 | npm run docs:check-links
206 | ```
207 | 
208 | ### GitHub Actions Integration
209 | 
210 | Enhance your deployment workflow with quality gates:
211 | 
212 | ```yaml
213 | # .github/workflows/docs-quality.yml
214 | name: Documentation Quality
215 | 
216 | on:
217 |   pull_request:
218 |     paths: ["docs/**", "*.md"]
219 | 
220 | jobs:
221 |   quality-check:
222 |     runs-on: ubuntu-latest
223 |     steps:
224 |       - uses: actions/checkout@v4
225 | 
226 |       - name: Setup Node.js
227 |         uses: actions/setup-node@v4
228 |         with:
229 |           node-version: "20"
230 |           cache: "npm"
231 | 
232 |       - name: Install dependencies
233 |         run: npm ci
234 | 
235 |       - name: Validate documentation
236 |         run: |
237 |           npm run docs:validate
238 |           npm run docs:check-links
239 | 
240 |       - name: Test build
241 |         run: npm run docs:build
242 | 
243 |       - name: Comment PR
244 |         uses: actions/github-script@v7
245 |         with:
246 |           script: |
247 |             github.rest.issues.createComment({
248 |               issue_number: context.issue.number,
249 |               owner: context.repo.owner,
250 |               repo: context.repo.repo,
251 |               body: '✅ Documentation quality checks passed!'
252 |             });
253 | ```
254 | 
255 | ## Content Management Strategies
256 | 
257 | ### Diataxis Organization
258 | 
259 | Organize content following Diataxis principles:
260 | 
261 | **Directory Structure:**
262 | 
263 | ```
264 | docs/
265 | ├── tutorials/           # Learning-oriented (beginner-friendly)
266 | │   ├── getting-started.md
267 | │   ├── first-project.md
268 | │   └── advanced-concepts.md
269 | ├── how-to-guides/      # Problem-solving (practical steps)
270 | │   ├── troubleshooting.md
271 | │   ├── configuration.md
272 | │   └── deployment.md
273 | ├── reference/          # Information-oriented (comprehensive)
274 | │   ├── api-reference.md
275 | │   ├── cli-commands.md
276 | │   └── configuration-options.md
277 | └── explanation/        # Understanding-oriented (concepts)
278 |     ├── architecture.md
279 |     ├── design-decisions.md
280 |     └── best-practices.md
281 | ```
282 | 
283 | ### Content Templates
284 | 
285 | Create content templates for consistency:
286 | 
287 | **Tutorial Template:**
288 | 
289 | ```markdown
290 | # [Action] Tutorial
291 | 
292 | ## What You'll Learn
293 | 
294 | - Objective 1
295 | - Objective 2
296 | 
297 | ## Prerequisites
298 | 
299 | - Requirement 1
300 | - Requirement 2
301 | 
302 | ## Step-by-Step Instructions
303 | 
304 | ### Step 1: [Action]
305 | 
306 | Instructions...
307 | 
308 | ### Step 2: [Action]
309 | 
310 | Instructions...
311 | 
312 | ## Verification
313 | 
314 | How to confirm success...
315 | 
316 | ## Next Steps
317 | 
318 | Where to go next...
319 | ```
320 | 
321 | **How-to Guide Template:**
322 | 
323 | ```markdown
324 | # How to [Solve Problem]
325 | 
326 | ## Problem
327 | 
328 | Clear problem statement...
329 | 
330 | ## Solution
331 | 
332 | Step-by-step solution...
333 | 
334 | ## Alternative Approaches
335 | 
336 | Other ways to solve this...
337 | 
338 | ## Troubleshooting
339 | 
340 | Common issues and fixes...
341 | ```
342 | 
343 | ## Performance Optimization
344 | 
345 | ### Build Performance
346 | 
347 | Optimize build times:
348 | 
349 | ```bash
350 | # Enable build caching
351 | export GATSBY_CACHE_DIR=.cache
352 | export GATSBY_PUBLIC_DIR=public
353 | 
354 | # Parallel processing
355 | export NODE_OPTIONS="--max-old-space-size=8192"
356 | ```
357 | 
358 | **For large sites:**
359 | 
360 | - Enable incremental builds
361 | - Use build caching
362 | - Optimize image processing
363 | - Minimize plugin usage
364 | 
365 | ### Development Server Performance
366 | 
367 | Speed up local development:
368 | 
369 | ```bash
370 | # Fast refresh mode (Docusaurus)
371 | npm run start -- --fast-refresh
372 | 
373 | # Hot reload with polling (for file system issues)
374 | npm run start -- --poll
375 | 
376 | # Open specific page
377 | npm run start -- --host 0.0.0.0 --port 3001
378 | ```
379 | 
380 | ## Maintenance Automation
381 | 
382 | ### Scheduled Content Validation
383 | 
384 | Set up scheduled validation:
385 | 
386 | ```yaml
387 | # .github/workflows/scheduled-validation.yml
388 | name: Scheduled Documentation Validation
389 | 
390 | on:
391 |   schedule:
392 |     - cron: "0 2 * * 1" # Every Monday at 2 AM
393 | 
394 | jobs:
395 |   validate:
396 |     runs-on: ubuntu-latest
397 |     steps:
398 |       - uses: actions/checkout@v4
399 | 
400 |       - name: Setup Node.js
401 |         uses: actions/setup-node@v4
402 |         with:
403 |           node-version: "20"
404 | 
405 |       - name: Full validation
406 |         run: |
407 |           "check all documentation links with external validation"
408 |           "validate all content for accuracy and completeness"
409 | 
410 |       - name: Create issue on failure
411 |         if: failure()
412 |         uses: actions/github-script@v7
413 |         with:
414 |           script: |
415 |             github.rest.issues.create({
416 |               owner: context.repo.owner,
417 |               repo: context.repo.repo,
418 |               title: 'Scheduled Documentation Validation Failed',
419 |               body: 'The weekly documentation validation found issues. Check the workflow logs.',
420 |               labels: ['documentation', 'maintenance']
421 |             });
422 | ```
423 | 
424 | ### Dependency Updates
425 | 
426 | Automate dependency maintenance:
427 | 
428 | ```yaml
429 | # .github/dependabot.yml
430 | version: 2
431 | updates:
432 |   - package-ecosystem: "npm"
433 |     directory: "/"
434 |     schedule:
435 |       interval: "weekly"
436 |     open-pull-requests-limit: 5
437 |     labels:
438 |       - "dependencies"
439 |       - "documentation"
440 | ```
441 | 
442 | ## Collaboration Workflow
443 | 
444 | ### Team Development
445 | 
446 | For team documentation:
447 | 
448 | 1. **Branching strategy**: Feature branches for documentation changes
449 | 2. **Review process**: PR reviews for all documentation updates
450 | 3. **Style guide**: Consistent writing and formatting standards
451 | 4. **Content ownership**: Assign sections to team members
452 | 
453 | ### Review Checklist
454 | 
455 | Documentation PR review checklist:
456 | 
457 | - [ ] Content follows Diataxis principles
458 | - [ ] All links work (internal and external)
459 | - [ ] Code examples are tested and accurate
460 | - [ ] Images are optimized and accessible
461 | - [ ] SEO metadata is complete
462 | - [ ] Mobile responsiveness verified
463 | - [ ] Build succeeds locally and in CI
464 | 
465 | ## Next Steps
466 | 
467 | Your development environment is now ready! Next:
468 | 
469 | 1. **[Learn advanced prompting](../how-to/prompting-guide.md)** for DocuMCP
470 | 2. **[Set up monitoring](../how-to/site-monitoring.md)** for your live site
471 | 3. **[Optimize for performance](../how-to/performance-optimization.md)**
472 | 4. **[Configure custom domains](../how-to/custom-domains.md)** (optional)
473 | 
474 | ## Troubleshooting
475 | 
476 | **Common development issues:**
477 | 
478 | **Port conflicts:**
479 | 
480 | ```bash
481 | # Change default port
482 | npm run start -- --port 3001
483 | ```
484 | 
485 | **Memory issues:**
486 | 
487 | ```bash
488 | # Increase Node.js memory limit
489 | export NODE_OPTIONS="--max-old-space-size=8192"
490 | ```
491 | 
492 | **File watching problems:**
493 | 
494 | ```bash
495 | # Enable polling for file changes
496 | npm run start -- --poll
497 | ```
498 | 
499 | **Cache issues:**
500 | 
501 | ```bash
502 | # Clear build cache
503 | rm -rf .docusaurus .cache public
504 | npm run start
505 | ```
506 | 
507 | ## Summary
508 | 
509 | You now have:
510 | ✅ Local development environment with live reload
511 | ✅ Content validation and quality checking
512 | ✅ Automated pre-commit hooks
513 | ✅ CI/CD integration for quality gates
514 | ✅ Performance optimization
515 | ✅ Maintenance automation
516 | ✅ Team collaboration workflow
517 | 
518 | Your documentation development environment is production-ready!
519 | 
```

--------------------------------------------------------------------------------
/tests/tools/recommend-ssg-preferences.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Tests for Phase 2.2: User Preference Integration
  3 |  * Tests recommend_ssg tool with user preference learning and application
  4 |  */
  5 | 
  6 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
  7 | import { promises as fs } from "fs";
  8 | import { join } from "path";
  9 | import { tmpdir } from "os";
 10 | import {
 11 |   initializeKnowledgeGraph,
 12 |   createOrUpdateProject,
 13 | } from "../../src/memory/kg-integration.js";
 14 | import { recommendSSG } from "../../src/tools/recommend-ssg.js";
 15 | import { MemoryManager } from "../../src/memory/manager.js";
 16 | import {
 17 |   getUserPreferenceManager,
 18 |   clearPreferenceManagerCache,
 19 | } from "../../src/memory/user-preferences.js";
 20 | 
 21 | describe("recommendSSG with User Preferences (Phase 2.2)", () => {
 22 |   let testDir: string;
 23 |   let originalEnv: string | undefined;
 24 |   let memoryManager: MemoryManager;
 25 | 
 26 |   // Helper to create analysis memory entry in correct format
 27 |   const createAnalysisMemory = async (analysisData: any) => {
 28 |     return await memoryManager.remember("analysis", analysisData);
 29 |   };
 30 | 
 31 |   beforeEach(async () => {
 32 |     // Create temporary test directory
 33 |     testDir = join(tmpdir(), `recommend-ssg-preferences-test-${Date.now()}`);
 34 |     await fs.mkdir(testDir, { recursive: true });
 35 | 
 36 |     // Set environment variable for storage
 37 |     originalEnv = process.env.DOCUMCP_STORAGE_DIR;
 38 |     process.env.DOCUMCP_STORAGE_DIR = testDir;
 39 | 
 40 |     // Initialize KG and memory
 41 |     await initializeKnowledgeGraph(testDir);
 42 |     memoryManager = new MemoryManager(testDir);
 43 |     await memoryManager.initialize();
 44 | 
 45 |     // Clear preference manager cache
 46 |     clearPreferenceManagerCache();
 47 |   });
 48 | 
 49 |   afterEach(async () => {
 50 |     // Restore environment
 51 |     if (originalEnv) {
 52 |       process.env.DOCUMCP_STORAGE_DIR = originalEnv;
 53 |     } else {
 54 |       delete process.env.DOCUMCP_STORAGE_DIR;
 55 |     }
 56 | 
 57 |     // Clean up test directory
 58 |     try {
 59 |       await fs.rm(testDir, { recursive: true, force: true });
 60 |     } catch (error) {
 61 |       console.warn("Failed to clean up test directory:", error);
 62 |     }
 63 | 
 64 |     // Clear preference manager cache
 65 |     clearPreferenceManagerCache();
 66 |   });
 67 | 
 68 |   describe("User Preference Application", () => {
 69 |     it("should apply user preferences when auto-apply is enabled", async () => {
 70 |       // Set up user preferences
 71 |       const userId = "test-user-1";
 72 |       const manager = await getUserPreferenceManager(userId);
 73 |       await manager.updatePreferences({
 74 |         preferredSSGs: ["hugo", "eleventy"],
 75 |         autoApplyPreferences: true,
 76 |       });
 77 | 
 78 |       // Create analysis that would normally recommend Docusaurus
 79 |       const memoryEntry = await createAnalysisMemory({
 80 |         path: "/test/js-project",
 81 |         dependencies: {
 82 |           ecosystem: "javascript",
 83 |           languages: ["javascript", "typescript"],
 84 |         },
 85 |         structure: { totalFiles: 60 },
 86 |       });
 87 | 
 88 |       // Get recommendation
 89 |       const result = await recommendSSG({
 90 |         analysisId: memoryEntry.id,
 91 |         userId,
 92 |       });
 93 | 
 94 |       const content = result.content[0];
 95 |       expect(content.type).toBe("text");
 96 |       const data = JSON.parse(content.text);
 97 | 
 98 |       // Should recommend Hugo (user's top preference)
 99 |       expect(data.recommended).toBe("hugo");
100 |       expect(data.reasoning[0]).toContain("Switched to hugo");
101 |       expect(data.reasoning[0]).toContain("usage history");
102 |     });
103 | 
104 |     it("should not apply preferences when auto-apply is disabled", async () => {
105 |       const userId = "test-user-2";
106 |       const manager = await getUserPreferenceManager(userId);
107 |       await manager.updatePreferences({
108 |         preferredSSGs: ["jekyll"],
109 |         autoApplyPreferences: false,
110 |       });
111 | 
112 |       const memoryEntry = await createAnalysisMemory({
113 |         path: "/test/js-project",
114 |         dependencies: {
115 |           ecosystem: "javascript",
116 |           languages: ["javascript"],
117 |         },
118 |         structure: { totalFiles: 60 },
119 |       });
120 | 
121 |       const result = await recommendSSG({
122 |         analysisId: memoryEntry.id,
123 |         userId,
124 |       });
125 | 
126 |       const content = result.content[0];
127 |       const data = JSON.parse(content.text);
128 | 
129 |       // Should use default recommendation, not user preference
130 |       expect(data.recommended).toBe("docusaurus");
131 |       expect(data.reasoning[0]).not.toContain("Switched");
132 |     });
133 | 
134 |     it("should keep recommendation if it matches user preference", async () => {
135 |       const userId = "test-user-3";
136 |       const manager = await getUserPreferenceManager(userId);
137 |       await manager.updatePreferences({
138 |         preferredSSGs: ["mkdocs"],
139 |         autoApplyPreferences: true,
140 |       });
141 | 
142 |       const memoryEntry = await createAnalysisMemory({
143 |         path: "/test/python-project",
144 |         dependencies: {
145 |           ecosystem: "python",
146 |           languages: ["python"],
147 |         },
148 |         structure: { totalFiles: 40 },
149 |       });
150 | 
151 |       const result = await recommendSSG({
152 |         analysisId: memoryEntry.id,
153 |         userId,
154 |       });
155 | 
156 |       const content = result.content[0];
157 |       const data = JSON.parse(content.text);
158 | 
159 |       // Should recommend mkdocs (matches both analysis and preference)
160 |       expect(data.recommended).toBe("mkdocs");
161 |       // Either "Matches" or "Switched to" is acceptable - both indicate preference was applied
162 |       expect(data.reasoning[0]).toMatch(
163 |         /Matches your preferred SSG|Switched to mkdocs/,
164 |       );
165 |     });
166 | 
167 |     it("should switch to user preference even if not ideal for ecosystem", async () => {
168 |       const userId = "test-user-4";
169 |       const manager = await getUserPreferenceManager(userId);
170 |       await manager.updatePreferences({
171 |         preferredSSGs: ["mkdocs", "jekyll"], // Python/Ruby SSGs
172 |         autoApplyPreferences: true,
173 |       });
174 | 
175 |       const memoryEntry = await createAnalysisMemory({
176 |         path: "/test/js-project",
177 |         dependencies: {
178 |           ecosystem: "javascript",
179 |           languages: ["javascript"],
180 |         },
181 |         structure: { totalFiles: 60 },
182 |       });
183 | 
184 |       const result = await recommendSSG({
185 |         analysisId: memoryEntry.id,
186 |         userId,
187 |       });
188 | 
189 |       const content = result.content[0];
190 |       const data = JSON.parse(content.text);
191 | 
192 |       // Should switch to mkdocs (user's top preference)
193 |       // User preferences override ecosystem recommendations
194 |       expect(data.recommended).toBe("mkdocs");
195 |       expect(data.reasoning[0]).toContain("Switched to mkdocs");
196 |       expect(data.reasoning[0]).toContain("usage history");
197 |     });
198 |   });
199 | 
200 |   describe("Preference Tracking Integration", () => {
201 |     it("should use default user when no userId provided", async () => {
202 |       const memoryEntry = await createAnalysisMemory({
203 |         path: "/test/project",
204 |         dependencies: {
205 |           ecosystem: "javascript",
206 |           languages: ["javascript"],
207 |         },
208 |         structure: { totalFiles: 50 },
209 |       });
210 | 
211 |       // Should not throw error with no userId
212 |       const result = await recommendSSG({
213 |         analysisId: memoryEntry.id,
214 |       });
215 | 
216 |       const content = result.content[0];
217 |       expect(content.type).toBe("text");
218 |       const data = JSON.parse(content.text);
219 |       expect(data.recommended).toBeDefined();
220 |     });
221 | 
222 |     it("should work with multiple users independently", async () => {
223 |       const user1 = "user1";
224 |       const user2 = "user2";
225 | 
226 |       // Set different preferences for each user
227 |       const manager1 = await getUserPreferenceManager(user1);
228 |       await manager1.updatePreferences({
229 |         preferredSSGs: ["hugo"],
230 |         autoApplyPreferences: true,
231 |       });
232 | 
233 |       const manager2 = await getUserPreferenceManager(user2);
234 |       await manager2.updatePreferences({
235 |         preferredSSGs: ["eleventy"],
236 |         autoApplyPreferences: true,
237 |       });
238 | 
239 |       const memoryEntry = await createAnalysisMemory({
240 |         path: "/test/project",
241 |         dependencies: {
242 |           ecosystem: "javascript",
243 |           languages: ["javascript"],
244 |         },
245 |         structure: { totalFiles: 50 },
246 |       });
247 | 
248 |       // Get recommendations for both users
249 |       const result1 = await recommendSSG({
250 |         analysisId: memoryEntry.id,
251 |         userId: user1,
252 |       });
253 |       const result2 = await recommendSSG({
254 |         analysisId: memoryEntry.id,
255 |         userId: user2,
256 |       });
257 | 
258 |       const data1 = JSON.parse(result1.content[0].text);
259 |       const data2 = JSON.parse(result2.content[0].text);
260 | 
261 |       // Each user should get their preferred SSG
262 |       expect(data1.recommended).toBe("hugo");
263 |       expect(data2.recommended).toBe("eleventy");
264 |     });
265 |   });
266 | 
267 |   describe("Confidence Adjustment", () => {
268 |     it("should boost confidence when preference is applied", async () => {
269 |       const userId = "test-user-5";
270 |       const manager = await getUserPreferenceManager(userId);
271 |       await manager.updatePreferences({
272 |         preferredSSGs: ["eleventy"],
273 |         autoApplyPreferences: true,
274 |       });
275 | 
276 |       const memoryEntry = await createAnalysisMemory({
277 |         path: "/test/js-project",
278 |         dependencies: {
279 |           ecosystem: "javascript",
280 |           languages: ["javascript"],
281 |         },
282 |         structure: { totalFiles: 60 },
283 |       });
284 | 
285 |       const result = await recommendSSG({
286 |         analysisId: memoryEntry.id,
287 |         userId,
288 |       });
289 | 
290 |       const content = result.content[0];
291 |       const data = JSON.parse(content.text);
292 | 
293 |       // Confidence should be boosted when preference is applied
294 |       // Base confidence varies by SSG, but preference adds +0.05 boost
295 |       expect(data.confidence).toBeGreaterThan(0.7);
296 |       expect(data.reasoning[0]).toContain("🎯");
297 |     });
298 |   });
299 | 
300 |   describe("Edge Cases", () => {
301 |     it("should handle empty preferred SSGs list", async () => {
302 |       const userId = "test-user-6";
303 |       const manager = await getUserPreferenceManager(userId);
304 |       await manager.updatePreferences({
305 |         preferredSSGs: [],
306 |         autoApplyPreferences: true,
307 |       });
308 | 
309 |       const memoryEntry = await createAnalysisMemory({
310 |         path: "/test/project",
311 |         dependencies: {
312 |           ecosystem: "javascript",
313 |           languages: ["javascript"],
314 |         },
315 |         structure: { totalFiles: 50 },
316 |       });
317 | 
318 |       const result = await recommendSSG({
319 |         analysisId: memoryEntry.id,
320 |         userId,
321 |       });
322 | 
323 |       const content = result.content[0];
324 |       const data = JSON.parse(content.text);
325 | 
326 |       // Should use default recommendation
327 |       expect(data.recommended).toBe("docusaurus");
328 |       expect(data.reasoning[0]).not.toContain("Switched");
329 |     });
330 | 
331 |     it("should handle preference manager initialization failure gracefully", async () => {
332 |       const memoryEntry = await createAnalysisMemory({
333 |         path: "/test/project",
334 |         dependencies: {
335 |           ecosystem: "javascript",
336 |           languages: ["javascript"],
337 |         },
338 |         structure: { totalFiles: 50 },
339 |       });
340 | 
341 |       // Should not throw even with invalid userId
342 |       const result = await recommendSSG({
343 |         analysisId: memoryEntry.id,
344 |         userId: "any-user-id",
345 |       });
346 | 
347 |       const content = result.content[0];
348 |       expect(content.type).toBe("text");
349 |       const data = JSON.parse(content.text);
350 |       expect(data.recommended).toBeDefined();
351 |     });
352 |   });
353 | });
354 | 
```

--------------------------------------------------------------------------------
/tests/prompts/guided-workflow-prompts.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { generateTechnicalWriterPrompts } from "../../src/prompts/technical-writer-prompts.js";
  2 | import { promises as fs } from "fs";
  3 | import { join } from "path";
  4 | import { tmpdir } from "os";
  5 | 
  6 | describe("Guided Workflow Prompts", () => {
  7 |   let tempDir: string;
  8 | 
  9 |   beforeEach(async () => {
 10 |     tempDir = join(
 11 |       tmpdir(),
 12 |       `test-prompts-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
 13 |     );
 14 |     await fs.mkdir(tempDir, { recursive: true });
 15 | 
 16 |     // Create a test project structure
 17 |     await fs.writeFile(
 18 |       join(tempDir, "package.json"),
 19 |       JSON.stringify({
 20 |         name: "test-project",
 21 |         version: "1.0.0",
 22 |         dependencies: { react: "^18.0.0" },
 23 |         scripts: { test: "jest", build: "webpack" },
 24 |       }),
 25 |     );
 26 |     await fs.writeFile(
 27 |       join(tempDir, "README.md"),
 28 |       "# Test Project\n\nThis is a test project.",
 29 |     );
 30 |     await fs.mkdir(join(tempDir, "src"));
 31 |     await fs.writeFile(join(tempDir, "src/index.js"), 'console.log("hello");');
 32 |     await fs.mkdir(join(tempDir, "tests"));
 33 |     await fs.writeFile(
 34 |       join(tempDir, "tests/index.test.js"),
 35 |       'test("basic", () => {});',
 36 |     );
 37 |   });
 38 | 
 39 |   afterEach(async () => {
 40 |     try {
 41 |       await fs.rm(tempDir, { recursive: true });
 42 |     } catch {
 43 |       // Ignore cleanup errors
 44 |     }
 45 |   });
 46 | 
 47 |   describe("analyze-and-recommend prompt", () => {
 48 |     it("should generate comprehensive analysis and recommendation prompt", async () => {
 49 |       const messages = await generateTechnicalWriterPrompts(
 50 |         "analyze-and-recommend",
 51 |         tempDir,
 52 |         {
 53 |           analysis_depth: "standard",
 54 |           preferences: "performance and ease of use",
 55 |         },
 56 |       );
 57 | 
 58 |       expect(messages).toHaveLength(1);
 59 |       expect(messages[0]).toHaveProperty("role", "user");
 60 |       expect(messages[0]).toHaveProperty("content");
 61 |       expect(messages[0].content).toHaveProperty("type", "text");
 62 |       expect(messages[0].content.text).toContain(
 63 |         "Execute a complete repository analysis",
 64 |       );
 65 |       expect(messages[0].content.text).toContain("SSG recommendation workflow");
 66 |       expect(messages[0].content.text).toContain("Analysis Depth: standard");
 67 |       expect(messages[0].content.text).toContain(
 68 |         "Preferences: performance and ease of use",
 69 |       );
 70 |       expect(messages[0].content.text).toContain("Repository Analysis");
 71 |       expect(messages[0].content.text).toContain("Implementation Guidance");
 72 |       expect(messages[0].content.text).toContain("Best Practices");
 73 |     });
 74 | 
 75 |     it("should use default values when optional parameters are not provided", async () => {
 76 |       const messages = await generateTechnicalWriterPrompts(
 77 |         "analyze-and-recommend",
 78 |         tempDir,
 79 |         {},
 80 |       );
 81 | 
 82 |       expect(messages[0].content.text).toContain("Analysis Depth: standard");
 83 |       expect(messages[0].content.text).toContain(
 84 |         "balanced approach with good community support",
 85 |       );
 86 |     });
 87 | 
 88 |     it("should include project context information", async () => {
 89 |       const messages = await generateTechnicalWriterPrompts(
 90 |         "analyze-and-recommend",
 91 |         tempDir,
 92 |         {
 93 |           analysis_depth: "deep",
 94 |         },
 95 |       );
 96 | 
 97 |       expect(messages[0].content.text).toContain("Type: node_application");
 98 |       expect(messages[0].content.text).toContain("Has Tests: true");
 99 |       expect(messages[0].content.text).toContain("Package Manager: npm");
100 |     });
101 |   });
102 | 
103 |   describe("setup-documentation prompt", () => {
104 |     it("should generate comprehensive documentation setup prompt", async () => {
105 |       const messages = await generateTechnicalWriterPrompts(
106 |         "setup-documentation",
107 |         tempDir,
108 |         {
109 |           ssg_type: "docusaurus",
110 |           include_examples: true,
111 |         },
112 |       );
113 | 
114 |       expect(messages).toHaveLength(1);
115 |       expect(messages[0]).toHaveProperty("role", "user");
116 |       expect(messages[0].content.text).toContain(
117 |         "Create a comprehensive documentation structure",
118 |       );
119 |       expect(messages[0].content.text).toContain("SSG Type: docusaurus");
120 |       expect(messages[0].content.text).toContain("Include Examples: true");
121 |       expect(messages[0].content.text).toContain(
122 |         "Diataxis Framework Implementation",
123 |       );
124 |       expect(messages[0].content.text).toContain(
125 |         "Tutorials: Learning-oriented content",
126 |       );
127 |       expect(messages[0].content.text).toContain(
128 |         "How-to Guides: Problem-solving content",
129 |       );
130 |       expect(messages[0].content.text).toContain(
131 |         "Reference: Information-oriented content",
132 |       );
133 |       expect(messages[0].content.text).toContain(
134 |         "Explanations: Understanding-oriented content",
135 |       );
136 |       expect(messages[0].content.text).toContain("Configuration Setup");
137 |       expect(messages[0].content.text).toContain("GitHub Pages deployment");
138 |       expect(messages[0].content.text).toContain("with examples");
139 |     });
140 | 
141 |     it("should handle minimal configuration", async () => {
142 |       const messages = await generateTechnicalWriterPrompts(
143 |         "setup-documentation",
144 |         tempDir,
145 |         {
146 |           include_examples: false,
147 |         },
148 |       );
149 | 
150 |       expect(messages[0].content.text).toContain(
151 |         "SSG Type: recommended based on project analysis",
152 |       );
153 |       expect(messages[0].content.text).toContain("Include Examples: false");
154 |       expect(messages[0].content.text).toContain("templates");
155 |       expect(messages[0].content.text).not.toContain("with examples");
156 |     });
157 | 
158 |     it("should include current documentation gaps", async () => {
159 |       const messages = await generateTechnicalWriterPrompts(
160 |         "setup-documentation",
161 |         tempDir,
162 |         {},
163 |       );
164 | 
165 |       expect(messages[0].content.text).toContain("Current Documentation Gaps:");
166 |       expect(messages[0].content.text).toContain("Development Integration");
167 |       expect(messages[0].content.text).toContain(
168 |         "production-ready documentation system",
169 |       );
170 |     });
171 |   });
172 | 
173 |   describe("troubleshoot-deployment prompt", () => {
174 |     it("should generate comprehensive troubleshooting prompt", async () => {
175 |       const messages = await generateTechnicalWriterPrompts(
176 |         "troubleshoot-deployment",
177 |         tempDir,
178 |         {
179 |           repository: "owner/repo",
180 |           deployment_url: "https://owner.github.io/repo",
181 |           issue_description: "build failing on GitHub Actions",
182 |         },
183 |       );
184 | 
185 |       expect(messages).toHaveLength(1);
186 |       expect(messages[0]).toHaveProperty("role", "user");
187 |       expect(messages[0].content.text).toContain(
188 |         "Diagnose and fix GitHub Pages deployment issues",
189 |       );
190 |       expect(messages[0].content.text).toContain("Repository: owner/repo");
191 |       expect(messages[0].content.text).toContain(
192 |         "Expected URL: https://owner.github.io/repo",
193 |       );
194 |       expect(messages[0].content.text).toContain(
195 |         "Issue Description: build failing on GitHub Actions",
196 |       );
197 |       expect(messages[0].content.text).toContain("Troubleshooting Checklist");
198 |       expect(messages[0].content.text).toContain("Repository Settings");
199 |       expect(messages[0].content.text).toContain("Build Configuration");
200 |       expect(messages[0].content.text).toContain("Content Issues");
201 |       expect(messages[0].content.text).toContain("Deployment Workflow");
202 |       expect(messages[0].content.text).toContain("Performance and Security");
203 |       expect(messages[0].content.text).toContain("Root cause analysis");
204 |       expect(messages[0].content.text).toContain("Systematic Testing");
205 |     });
206 | 
207 |     it("should use default values for optional parameters", async () => {
208 |       const messages = await generateTechnicalWriterPrompts(
209 |         "troubleshoot-deployment",
210 |         tempDir,
211 |         {
212 |           repository: "test/repo",
213 |         },
214 |       );
215 | 
216 |       expect(messages[0].content.text).toContain(
217 |         "Expected URL: GitHub Pages URL",
218 |       );
219 |       expect(messages[0].content.text).toContain(
220 |         "Issue Description: deployment not working as expected",
221 |       );
222 |     });
223 | 
224 |     it("should include project context for troubleshooting", async () => {
225 |       const messages = await generateTechnicalWriterPrompts(
226 |         "troubleshoot-deployment",
227 |         tempDir,
228 |         {
229 |           repository: "test/repo",
230 |         },
231 |       );
232 | 
233 |       expect(messages[0].content.text).toContain("Project Context");
234 |       expect(messages[0].content.text).toContain("Type: node_application");
235 |       expect(messages[0].content.text).toContain("Diagnostic Approach");
236 |       expect(messages[0].content.text).toContain("Systematic Testing");
237 |     });
238 |   });
239 | 
240 |   describe("Error handling", () => {
241 |     it("should throw error for unknown prompt type", async () => {
242 |       await expect(
243 |         generateTechnicalWriterPrompts("unknown-prompt-type", tempDir, {}),
244 |       ).rejects.toThrow("Unknown prompt type: unknown-prompt-type");
245 |     });
246 | 
247 |     it("should handle missing project directory gracefully", async () => {
248 |       const nonExistentDir = join(tmpdir(), "non-existent-dir");
249 | 
250 |       // Should not throw, but may have reduced context
251 |       const messages = await generateTechnicalWriterPrompts(
252 |         "analyze-and-recommend",
253 |         nonExistentDir,
254 |         {},
255 |       );
256 | 
257 |       expect(messages).toHaveLength(1);
258 |       expect(messages[0].content.text).toContain("repository analysis");
259 |     });
260 | 
261 |     it("should handle malformed package.json gracefully", async () => {
262 |       await fs.writeFile(join(tempDir, "package.json"), "invalid json content");
263 | 
264 |       const messages = await generateTechnicalWriterPrompts(
265 |         "setup-documentation",
266 |         tempDir,
267 |         {},
268 |       );
269 | 
270 |       expect(messages).toHaveLength(1);
271 |       expect(messages[0].content.text).toContain("documentation structure");
272 |     });
273 |   });
274 | 
275 |   describe("Prompt content validation", () => {
276 |     it("should generate prompts with consistent structure", async () => {
277 |       const promptTypes = [
278 |         "analyze-and-recommend",
279 |         "setup-documentation",
280 |         "troubleshoot-deployment",
281 |       ];
282 | 
283 |       for (const promptType of promptTypes) {
284 |         const args =
285 |           promptType === "troubleshoot-deployment"
286 |             ? { repository: "test/repo" }
287 |             : {};
288 | 
289 |         const messages = await generateTechnicalWriterPrompts(
290 |           promptType,
291 |           tempDir,
292 |           args,
293 |         );
294 | 
295 |         expect(messages).toHaveLength(1);
296 |         expect(messages[0]).toHaveProperty("role", "user");
297 |         expect(messages[0]).toHaveProperty("content");
298 |         expect(messages[0].content).toHaveProperty("type", "text");
299 |         expect(messages[0].content.text).toBeTruthy();
300 |         expect(messages[0].content.text.length).toBeGreaterThan(100);
301 |       }
302 |     });
303 | 
304 |     it("should include project-specific information in all prompts", async () => {
305 |       const promptTypes = ["analyze-and-recommend", "setup-documentation"];
306 | 
307 |       for (const promptType of promptTypes) {
308 |         const messages = await generateTechnicalWriterPrompts(
309 |           promptType,
310 |           tempDir,
311 |           {},
312 |         );
313 | 
314 |         expect(messages[0].content.text).toContain("Project Context");
315 |         expect(messages[0].content.text).toContain("Type:");
316 |         expect(messages[0].content.text).toContain("Languages:");
317 |       }
318 |     });
319 |   });
320 | });
321 | 
```
Page 5/33FirstPrevNextLast