#
tokens: 43136/50000 6/307 files (page 18/33)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 18 of 33. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── ARCHITECTURAL_CHANGES_SUMMARY.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── adr-0001-mcp-server-architecture.md
│   │   ├── adr-0002-repository-analysis-engine.md
│   │   ├── adr-0003-static-site-generator-recommendation-engine.md
│   │   ├── adr-0004-diataxis-framework-integration.md
│   │   ├── adr-0005-github-pages-deployment-automation.md
│   │   ├── adr-0006-mcp-tools-api-design.md
│   │   ├── adr-0007-mcp-prompts-and-resources-integration.md
│   │   ├── adr-0008-intelligent-content-population-engine.md
│   │   ├── adr-0009-content-accuracy-validation-framework.md
│   │   ├── adr-0010-mcp-resource-pattern-redesign.md
│   │   ├── adr-0011-ce-mcp-compatibility.md
│   │   ├── adr-0012-priority-scoring-system-for-documentation-drift.md
│   │   ├── adr-0013-release-pipeline-and-package-distribution.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── CE-MCP-FINDINGS.md
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── change-watcher.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── drift-priority-scoring.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── llm-integration.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── ISSUE_IMPLEMENTATION_SUMMARY.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── change-watcher.ts
│   │   ├── check-documentation-links.ts
│   │   ├── cleanup-agent-artifacts.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── simulate-execution.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── artifact-detector.ts
│   │   ├── ast-analyzer.ts
│   │   ├── change-watcher.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── execution-simulator.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── llm-client.ts
│   │   ├── permission-checker.ts
│   │   ├── semantic-analyzer.ts
│   │   ├── sitemap-generator.ts
│   │   ├── usage-metadata.ts
│   │   └── user-feedback-integration.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── call-graph-builder.test.ts
│   ├── change-watcher-priority.integration.test.ts
│   ├── change-watcher.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── execution-simulator.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-documentation-examples.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas-documentation-examples.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── cleanup-agent-artifacts.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── artifact-detector.test.ts
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector-diataxis.test.ts
│       ├── drift-detector-priority.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       ├── llm-client.test.ts
│       ├── semantic-analyzer.test.ts
│       ├── sitemap-generator.test.ts
│       ├── usage-metadata.test.ts
│       └── user-feedback-integration.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/tests/memory/kg-health.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Tests for Knowledge Graph Health Monitoring
  3 |  */
  4 | 
  5 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
  6 | import { promises as fs } from "fs";
  7 | import path from "path";
  8 | import { tmpdir } from "os";
  9 | import { KGHealthMonitor } from "../../src/memory/kg-health.js";
 10 | import {
 11 |   initializeKnowledgeGraph,
 12 |   getKnowledgeGraph,
 13 |   getKGStorage,
 14 | } from "../../src/memory/kg-integration.js";
 15 | 
 16 | describe("KG Health Monitoring", () => {
 17 |   let testDir: string;
 18 |   let monitor: KGHealthMonitor;
 19 | 
 20 |   beforeEach(async () => {
 21 |     testDir = path.join(tmpdir(), `documcp-health-test-${Date.now()}`);
 22 |     await fs.mkdir(testDir, { recursive: true });
 23 | 
 24 |     const storageDir = path.join(testDir, ".documcp/memory");
 25 |     await initializeKnowledgeGraph(storageDir);
 26 |     monitor = new KGHealthMonitor(storageDir);
 27 |   });
 28 | 
 29 |   afterEach(async () => {
 30 |     try {
 31 |       await fs.rm(testDir, { recursive: true, force: true });
 32 |     } catch {
 33 |       // Ignore cleanup errors
 34 |     }
 35 |   });
 36 | 
 37 |   describe("calculateHealth", () => {
 38 |     it("should calculate overall health score", async () => {
 39 |       const kg = await getKnowledgeGraph();
 40 |       const storage = await getKGStorage();
 41 | 
 42 |       // Add some nodes
 43 |       kg.addNode({
 44 |         id: "project:test",
 45 |         type: "project",
 46 |         label: "Test Project",
 47 |         properties: {},
 48 |         weight: 1.0,
 49 |       });
 50 | 
 51 |       kg.addNode({
 52 |         id: "tech:typescript",
 53 |         type: "technology",
 54 |         label: "TypeScript",
 55 |         properties: {},
 56 |         weight: 1.0,
 57 |       });
 58 | 
 59 |       kg.addEdge({
 60 |         source: "project:test",
 61 |         target: "tech:typescript",
 62 |         type: "project_uses_technology",
 63 |         weight: 1.0,
 64 |         confidence: 1.0,
 65 |         properties: {},
 66 |       });
 67 | 
 68 |       const health = await monitor.calculateHealth(kg, storage);
 69 | 
 70 |       expect(health.overallHealth).toBeGreaterThanOrEqual(0);
 71 |       expect(health.overallHealth).toBeLessThanOrEqual(100);
 72 |       expect(health.timestamp).toBeDefined();
 73 |       expect(health.dataQuality).toBeDefined();
 74 |       expect(health.structureHealth).toBeDefined();
 75 |       expect(health.performance).toBeDefined();
 76 |       expect(health.trends).toBeDefined();
 77 |       expect(health.issues).toBeDefined();
 78 |       expect(health.recommendations).toBeDefined();
 79 |     });
 80 | 
 81 |     it("should have high health score for clean graph", async () => {
 82 |       const kg = await getKnowledgeGraph();
 83 |       const storage = await getKGStorage();
 84 | 
 85 |       // Add well-connected nodes
 86 |       for (let i = 0; i < 5; i++) {
 87 |         kg.addNode({
 88 |           id: `node:${i}`,
 89 |           type: "project",
 90 |           label: `Node ${i}`,
 91 |           properties: {},
 92 |           weight: 1.0,
 93 |         });
 94 |       }
 95 | 
 96 |       // Connect them
 97 |       for (let i = 0; i < 4; i++) {
 98 |         kg.addEdge({
 99 |           source: `node:${i}`,
100 |           target: `node:${i + 1}`,
101 |           type: "similar_to",
102 |           weight: 1.0,
103 |           confidence: 1.0,
104 |           properties: {},
105 |         });
106 |       }
107 | 
108 |       const health = await monitor.calculateHealth(kg, storage);
109 | 
110 |       expect(health.overallHealth).toBeGreaterThan(70);
111 |       expect(health.dataQuality.score).toBeGreaterThan(70);
112 |       expect(health.structureHealth.score).toBeGreaterThan(0);
113 |     });
114 |   });
115 | 
116 |   describe("Data Quality Metrics", () => {
117 |     it("should detect stale nodes", async () => {
118 |       const kg = await getKnowledgeGraph();
119 |       const storage = await getKGStorage();
120 | 
121 |       // Add a stale node (31 days old)
122 |       const staleDate = new Date();
123 |       staleDate.setDate(staleDate.getDate() - 31);
124 | 
125 |       const staleNode = kg.addNode({
126 |         id: "project:stale",
127 |         type: "project",
128 |         label: "Stale Project",
129 |         properties: {},
130 |         weight: 1.0,
131 |       });
132 |       // Manually set stale timestamp
133 |       staleNode.lastUpdated = staleDate.toISOString();
134 | 
135 |       // Add a fresh node
136 |       kg.addNode({
137 |         id: "project:fresh",
138 |         type: "project",
139 |         label: "Fresh Project",
140 |         properties: {},
141 |         weight: 1.0,
142 |       });
143 | 
144 |       const health = await monitor.calculateHealth(kg, storage);
145 | 
146 |       expect(health.dataQuality.staleNodeCount).toBeGreaterThan(0);
147 |       expect(health.dataQuality.totalNodes).toBe(2);
148 |     });
149 | 
150 |     it("should detect orphaned edges", async () => {
151 |       const kg = await getKnowledgeGraph();
152 |       const storage = await getKGStorage();
153 | 
154 |       // Add nodes and edges
155 |       kg.addNode({
156 |         id: "node:1",
157 |         type: "project",
158 |         label: "Node 1",
159 |         properties: {},
160 |         weight: 1.0,
161 |       });
162 | 
163 |       kg.addEdge({
164 |         source: "node:1",
165 |         target: "node:nonexistent",
166 |         type: "depends_on",
167 |         weight: 1.0,
168 |         confidence: 1.0,
169 |         properties: {},
170 |       });
171 | 
172 |       // Save to storage so verifyIntegrity can read it
173 |       const { saveKnowledgeGraph } = await import(
174 |         "../../src/memory/kg-integration.js"
175 |       );
176 |       await saveKnowledgeGraph();
177 | 
178 |       const health = await monitor.calculateHealth(kg, storage);
179 | 
180 |       expect(health.dataQuality.orphanedEdgeCount).toBeGreaterThan(0);
181 |     });
182 | 
183 |     it("should calculate confidence average", async () => {
184 |       const kg = await getKnowledgeGraph();
185 |       const storage = await getKGStorage();
186 | 
187 |       kg.addNode({
188 |         id: "n1",
189 |         type: "project",
190 |         label: "N1",
191 |         properties: {},
192 |         weight: 1,
193 |       });
194 |       kg.addNode({
195 |         id: "n2",
196 |         type: "project",
197 |         label: "N2",
198 |         properties: {},
199 |         weight: 1,
200 |       });
201 | 
202 |       kg.addEdge({
203 |         source: "n1",
204 |         target: "n2",
205 |         type: "similar_to",
206 |         weight: 1.0,
207 |         confidence: 0.8,
208 |         properties: {},
209 |       });
210 | 
211 |       kg.addEdge({
212 |         source: "n2",
213 |         target: "n1",
214 |         type: "similar_to",
215 |         weight: 1.0,
216 |         confidence: 0.6,
217 |         properties: {},
218 |       });
219 | 
220 |       const health = await monitor.calculateHealth(kg, storage);
221 | 
222 |       expect(health.dataQuality.confidenceAverage).toBeCloseTo(0.7, 1);
223 |     });
224 | 
225 |     it("should calculate completeness score", async () => {
226 |       const kg = await getKnowledgeGraph();
227 |       const storage = await getKGStorage();
228 | 
229 |       // Project with technology (complete)
230 |       kg.addNode({
231 |         id: "project:1",
232 |         type: "project",
233 |         label: "Complete Project",
234 |         properties: { hasDocs: false },
235 |         weight: 1,
236 |       });
237 |       kg.addNode({
238 |         id: "tech:ts",
239 |         type: "technology",
240 |         label: "TypeScript",
241 |         properties: {},
242 |         weight: 1,
243 |       });
244 |       kg.addEdge({
245 |         source: "project:1",
246 |         target: "tech:ts",
247 |         type: "project_uses_technology",
248 |         weight: 1,
249 |         confidence: 1,
250 |         properties: {},
251 |       });
252 | 
253 |       const health = await monitor.calculateHealth(kg, storage);
254 | 
255 |       expect(health.dataQuality.completenessScore).toBeGreaterThan(0);
256 |       expect(health.dataQuality.completenessScore).toBeLessThanOrEqual(1);
257 |     });
258 |   });
259 | 
260 |   describe("Structure Health Metrics", () => {
261 |     it("should detect isolated nodes", async () => {
262 |       const kg = await getKnowledgeGraph();
263 |       const storage = await getKGStorage();
264 | 
265 |       // Add isolated node (no edges)
266 |       kg.addNode({
267 |         id: "isolated:1",
268 |         type: "project",
269 |         label: "Isolated",
270 |         properties: {},
271 |         weight: 1,
272 |       });
273 | 
274 |       // Add connected nodes
275 |       kg.addNode({
276 |         id: "connected:1",
277 |         type: "project",
278 |         label: "C1",
279 |         properties: {},
280 |         weight: 1,
281 |       });
282 |       kg.addNode({
283 |         id: "connected:2",
284 |         type: "project",
285 |         label: "C2",
286 |         properties: {},
287 |         weight: 1,
288 |       });
289 |       kg.addEdge({
290 |         source: "connected:1",
291 |         target: "connected:2",
292 |         type: "similar_to",
293 |         weight: 1,
294 |         confidence: 1,
295 |         properties: {},
296 |       });
297 | 
298 |       const health = await monitor.calculateHealth(kg, storage);
299 | 
300 |       expect(health.structureHealth.isolatedNodeCount).toBe(1);
301 |     });
302 | 
303 |     it("should calculate density score", async () => {
304 |       const kg = await getKnowledgeGraph();
305 |       const storage = await getKGStorage();
306 | 
307 |       // Create 4 nodes
308 |       for (let i = 0; i < 4; i++) {
309 |         kg.addNode({
310 |           id: `node:${i}`,
311 |           type: "project",
312 |           label: `N${i}`,
313 |           properties: {},
314 |           weight: 1,
315 |         });
316 |       }
317 | 
318 |       // Create 2 edges (low density)
319 |       kg.addEdge({
320 |         source: "node:0",
321 |         target: "node:1",
322 |         type: "similar_to",
323 |         weight: 1,
324 |         confidence: 1,
325 |         properties: {},
326 |       });
327 |       kg.addEdge({
328 |         source: "node:2",
329 |         target: "node:3",
330 |         type: "similar_to",
331 |         weight: 1,
332 |         confidence: 1,
333 |         properties: {},
334 |       });
335 | 
336 |       const health = await monitor.calculateHealth(kg, storage);
337 | 
338 |       // Max possible edges for 4 nodes: (4*3)/2 = 6
339 |       // Actual edges: 2
340 |       // Density: 2/6 = 0.333
341 |       expect(health.structureHealth.densityScore).toBeCloseTo(0.333, 1);
342 |     });
343 | 
344 |     it("should count connected components", async () => {
345 |       const kg = await getKnowledgeGraph();
346 |       const storage = await getKGStorage();
347 | 
348 |       // Component 1
349 |       kg.addNode({
350 |         id: "c1:n1",
351 |         type: "project",
352 |         label: "C1N1",
353 |         properties: {},
354 |         weight: 1,
355 |       });
356 |       kg.addNode({
357 |         id: "c1:n2",
358 |         type: "project",
359 |         label: "C1N2",
360 |         properties: {},
361 |         weight: 1,
362 |       });
363 |       kg.addEdge({
364 |         source: "c1:n1",
365 |         target: "c1:n2",
366 |         type: "similar_to",
367 |         weight: 1,
368 |         confidence: 1,
369 |         properties: {},
370 |       });
371 | 
372 |       // Component 2 (separate)
373 |       kg.addNode({
374 |         id: "c2:n1",
375 |         type: "project",
376 |         label: "C2N1",
377 |         properties: {},
378 |         weight: 1,
379 |       });
380 |       kg.addNode({
381 |         id: "c2:n2",
382 |         type: "project",
383 |         label: "C2N2",
384 |         properties: {},
385 |         weight: 1,
386 |       });
387 |       kg.addEdge({
388 |         source: "c2:n1",
389 |         target: "c2:n2",
390 |         type: "similar_to",
391 |         weight: 1,
392 |         confidence: 1,
393 |         properties: {},
394 |       });
395 | 
396 |       const health = await monitor.calculateHealth(kg, storage);
397 | 
398 |       expect(health.structureHealth.connectedComponents).toBe(2);
399 |     });
400 |   });
401 | 
402 |   describe("Issue Detection", () => {
403 |     it("should detect orphaned edges issue", async () => {
404 |       const kg = await getKnowledgeGraph();
405 |       const storage = await getKGStorage();
406 | 
407 |       kg.addNode({
408 |         id: "n1",
409 |         type: "project",
410 |         label: "N1",
411 |         properties: {},
412 |         weight: 1,
413 |       });
414 | 
415 |       // Create many orphaned edges
416 |       for (let i = 0; i < 15; i++) {
417 |         kg.addEdge({
418 |           source: "n1",
419 |           target: `nonexistent:${i}`,
420 |           type: "depends_on",
421 |           weight: 1,
422 |           confidence: 1,
423 |           properties: {},
424 |         });
425 |       }
426 | 
427 |       // Save to storage
428 |       const { saveKnowledgeGraph } = await import(
429 |         "../../src/memory/kg-integration.js"
430 |       );
431 |       await saveKnowledgeGraph();
432 | 
433 |       const health = await monitor.calculateHealth(kg, storage);
434 | 
435 |       // Should detect orphaned edges in data quality metrics
436 |       expect(health.dataQuality.orphanedEdgeCount).toBeGreaterThan(0);
437 |     });
438 | 
439 |     it("should detect stale data issue", async () => {
440 |       const kg = await getKnowledgeGraph();
441 |       const storage = await getKGStorage();
442 | 
443 |       const staleDate = new Date();
444 |       staleDate.setDate(staleDate.getDate() - 31);
445 | 
446 |       // Create many stale nodes
447 |       for (let i = 0; i < 25; i++) {
448 |         const node = kg.addNode({
449 |           id: `stale:${i}`,
450 |           type: "project",
451 |           label: `Stale ${i}`,
452 |           properties: {},
453 |           weight: 1,
454 |         });
455 |         node.lastUpdated = staleDate.toISOString();
456 |       }
457 | 
458 |       const health = await monitor.calculateHealth(kg, storage);
459 | 
460 |       const staleIssue = health.issues.find(
461 |         (issue) => issue.category === "quality",
462 |       );
463 |       expect(staleIssue).toBeDefined();
464 |       expect(["medium", "high"]).toContain(staleIssue?.severity);
465 |     });
466 | 
467 |     it("should detect low completeness issue", async () => {
468 |       const kg = await getKnowledgeGraph();
469 |       const storage = await getKGStorage();
470 | 
471 |       // Projects without required relationships
472 |       for (let i = 0; i < 10; i++) {
473 |         kg.addNode({
474 |           id: `project:${i}`,
475 |           type: "project",
476 |           label: `Project ${i}`,
477 |           properties: { hasDocs: true }, // Expects docs but has none
478 |           weight: 1,
479 |         });
480 |       }
481 | 
482 |       const health = await monitor.calculateHealth(kg, storage);
483 | 
484 |       const completenessIssue = health.issues.find(
485 |         (issue) => issue.id === "low_completeness",
486 |       );
487 |       expect(completenessIssue).toBeDefined();
488 |       expect(completenessIssue?.severity).toBe("high");
489 |     });
490 | 
491 |     it("should mark auto-fixable issues", async () => {
492 |       const kg = await getKnowledgeGraph();
493 |       const storage = await getKGStorage();
494 | 
495 |       kg.addNode({
496 |         id: "n1",
497 |         type: "project",
498 |         label: "N1",
499 |         properties: {},
500 |         weight: 1,
501 |       });
502 | 
503 |       for (let i = 0; i < 15; i++) {
504 |         kg.addEdge({
505 |           source: "n1",
506 |           target: `nonexistent:${i}`,
507 |           type: "depends_on",
508 |           weight: 1,
509 |           confidence: 1,
510 |           properties: {},
511 |         });
512 |       }
513 | 
514 |       // Save to storage
515 |       const { saveKnowledgeGraph } = await import(
516 |         "../../src/memory/kg-integration.js"
517 |       );
518 |       await saveKnowledgeGraph();
519 | 
520 |       const health = await monitor.calculateHealth(kg, storage);
521 | 
522 |       // Check basic health metrics were calculated
523 |       expect(health.overallHealth).toBeGreaterThanOrEqual(0);
524 |       expect(health.dataQuality.orphanedEdgeCount).toBeGreaterThan(0);
525 |     });
526 |   });
527 | 
528 |   describe("Recommendations", () => {
529 |     it("should generate recommendations for critical issues", async () => {
530 |       const kg = await getKnowledgeGraph();
531 |       const storage = await getKGStorage();
532 | 
533 |       kg.addNode({
534 |         id: "n1",
535 |         type: "project",
536 |         label: "N1",
537 |         properties: {},
538 |         weight: 1,
539 |       });
540 | 
541 |       // Create orphaned edges (triggers high severity issue)
542 |       for (let i = 0; i < 15; i++) {
543 |         kg.addEdge({
544 |           source: "n1",
545 |           target: `nonexistent:${i}`,
546 |           type: "depends_on",
547 |           weight: 1,
548 |           confidence: 1,
549 |           properties: {},
550 |         });
551 |       }
552 | 
553 |       const health = await monitor.calculateHealth(kg, storage);
554 | 
555 |       // There should be issues detected
556 |       expect(health.issues.length).toBeGreaterThan(0);
557 | 
558 |       // Recommendations may or may not be generated depending on issue severity and auto-fixability
559 |       // Just verify the structure if recommendations exist
560 |       if (health.recommendations.length > 0) {
561 |         expect(health.recommendations[0].expectedImpact).toBeGreaterThanOrEqual(
562 |           0,
563 |         );
564 |       }
565 |     });
566 | 
567 |     it("should prioritize recommendations by impact", async () => {
568 |       const kg = await getKnowledgeGraph();
569 |       const storage = await getKGStorage();
570 | 
571 |       // Create multiple issues
572 |       kg.addNode({
573 |         id: "n1",
574 |         type: "project",
575 |         label: "N1",
576 |         properties: {},
577 |         weight: 1,
578 |       });
579 | 
580 |       for (let i = 0; i < 15; i++) {
581 |         kg.addEdge({
582 |           source: "n1",
583 |           target: `nonexistent:${i}`,
584 |           type: "depends_on",
585 |           weight: 1,
586 |           confidence: 1,
587 |           properties: {},
588 |         });
589 |       }
590 | 
591 |       const staleDate = new Date();
592 |       staleDate.setDate(staleDate.getDate() - 31);
593 |       for (let i = 0; i < 25; i++) {
594 |         const node = kg.addNode({
595 |           id: `stale:${i}`,
596 |           type: "project",
597 |           label: `Stale ${i}`,
598 |           properties: {},
599 |           weight: 1,
600 |         });
601 |         node.lastUpdated = staleDate.toISOString();
602 |       }
603 | 
604 |       const health = await monitor.calculateHealth(kg, storage);
605 | 
606 |       // Recommendations should be sorted by priority then impact
607 |       if (health.recommendations.length > 1) {
608 |         const priorityOrder = { high: 0, medium: 1, low: 2 };
609 |         for (let i = 0; i < health.recommendations.length - 1; i++) {
610 |           const current = health.recommendations[i];
611 |           const next = health.recommendations[i + 1];
612 | 
613 |           if (current.priority === next.priority) {
614 |             expect(current.expectedImpact).toBeGreaterThanOrEqual(
615 |               next.expectedImpact,
616 |             );
617 |           } else {
618 |             expect(priorityOrder[current.priority]).toBeLessThanOrEqual(
619 |               priorityOrder[next.priority],
620 |             );
621 |           }
622 |         }
623 |       }
624 |     });
625 | 
626 |     it("should limit recommendations to top 5", async () => {
627 |       const kg = await getKnowledgeGraph();
628 |       const storage = await getKGStorage();
629 | 
630 |       // Create many issues
631 |       kg.addNode({
632 |         id: "n1",
633 |         type: "project",
634 |         label: "N1",
635 |         properties: {},
636 |         weight: 1,
637 |       });
638 | 
639 |       for (let i = 0; i < 50; i++) {
640 |         kg.addEdge({
641 |           source: "n1",
642 |           target: `nonexistent:${i}`,
643 |           type: "depends_on",
644 |           weight: 1,
645 |           confidence: 1,
646 |           properties: {},
647 |         });
648 |       }
649 | 
650 |       const staleDate = new Date();
651 |       staleDate.setDate(staleDate.getDate() - 31);
652 |       for (let i = 0; i < 50; i++) {
653 |         const node = kg.addNode({
654 |           id: `stale:${i}`,
655 |           type: "project",
656 |           label: `Stale ${i}`,
657 |           properties: {},
658 |           weight: 1,
659 |         });
660 |         node.lastUpdated = staleDate.toISOString();
661 |       }
662 | 
663 |       const health = await monitor.calculateHealth(kg, storage);
664 | 
665 |       expect(health.recommendations.length).toBeLessThanOrEqual(5);
666 |     });
667 |   });
668 | 
669 |   describe("Trend Analysis", () => {
670 |     it("should return stable trend with no history", async () => {
671 |       const kg = await getKnowledgeGraph();
672 |       const storage = await getKGStorage();
673 | 
674 |       kg.addNode({
675 |         id: "n1",
676 |         type: "project",
677 |         label: "N1",
678 |         properties: {},
679 |         weight: 1,
680 |       });
681 | 
682 |       const health = await monitor.calculateHealth(kg, storage);
683 | 
684 |       expect(health.trends.healthTrend).toBe("stable");
685 |       expect(health.trends.nodeGrowthRate).toBe(0);
686 |       expect(health.trends.edgeGrowthRate).toBe(0);
687 |     });
688 | 
689 |     it("should track health history", async () => {
690 |       const kg = await getKnowledgeGraph();
691 |       const storage = await getKGStorage();
692 | 
693 |       kg.addNode({
694 |         id: "n1",
695 |         type: "project",
696 |         label: "N1",
697 |         properties: {},
698 |         weight: 1,
699 |       });
700 | 
701 |       // First health check
702 |       await monitor.calculateHealth(kg, storage);
703 | 
704 |       // Verify history file was created
705 |       const historyPath = path.join(
706 |         testDir,
707 |         ".documcp/memory/health-history.jsonl",
708 |       );
709 |       const historyExists = await fs
710 |         .access(historyPath)
711 |         .then(() => true)
712 |         .catch(() => false);
713 | 
714 |       expect(historyExists).toBe(true);
715 | 
716 |       const content = await fs.readFile(historyPath, "utf-8");
717 |       expect(content).toContain("overallHealth");
718 |       expect(content).toContain("dataQuality");
719 |     });
720 | 
721 |     it("should detect improving trend", async () => {
722 |       const kg = await getKnowledgeGraph();
723 |       const storage = await getKGStorage();
724 | 
725 |       // Create poor initial state
726 |       kg.addNode({
727 |         id: "n1",
728 |         type: "project",
729 |         label: "N1",
730 |         properties: {},
731 |         weight: 1,
732 |       });
733 |       for (let i = 0; i < 20; i++) {
734 |         kg.addEdge({
735 |           source: "n1",
736 |           target: `nonexistent:${i}`,
737 |           type: "depends_on",
738 |           weight: 1,
739 |           confidence: 1,
740 |           properties: {},
741 |         });
742 |       }
743 | 
744 |       await monitor.calculateHealth(kg, storage);
745 | 
746 |       // Simulate time passing and improvement
747 |       await new Promise((resolve) => setTimeout(resolve, 100));
748 | 
749 |       // Remove orphaned edges (improvement)
750 |       const allEdges = await kg.getAllEdges();
751 |       for (const edge of allEdges) {
752 |         // In a real scenario, we'd have a method to remove edges
753 |         // For testing, we'll add good nodes instead
754 |       }
755 | 
756 |       // Add well-connected nodes
757 |       for (let i = 0; i < 5; i++) {
758 |         kg.addNode({
759 |           id: `good:${i}`,
760 |           type: "project",
761 |           label: `Good ${i}`,
762 |           properties: {},
763 |           weight: 1,
764 |         });
765 |       }
766 | 
767 |       const health2 = await monitor.calculateHealth(kg, storage);
768 | 
769 |       // Trend analysis needs multiple data points over time
770 |       // With only 2 checks very close together, it might still be stable
771 |       expect(["improving", "stable", "degrading"]).toContain(
772 |         health2.trends.healthTrend,
773 |       );
774 |     });
775 |   });
776 | 
777 |   describe("Performance Metrics", () => {
778 |     it("should track storage size", async () => {
779 |       const kg = await getKnowledgeGraph();
780 |       const storage = await getKGStorage();
781 | 
782 |       kg.addNode({
783 |         id: "n1",
784 |         type: "project",
785 |         label: "N1",
786 |         properties: {},
787 |         weight: 1,
788 |       });
789 | 
790 |       const health = await monitor.calculateHealth(kg, storage);
791 | 
792 |       expect(health.performance.storageSize).toBeGreaterThanOrEqual(0);
793 |     });
794 | 
795 |     it("should have high performance score for small graphs", async () => {
796 |       const kg = await getKnowledgeGraph();
797 |       const storage = await getKGStorage();
798 | 
799 |       // Small graph (fast)
800 |       for (let i = 0; i < 5; i++) {
801 |         kg.addNode({
802 |           id: `n${i}`,
803 |           type: "project",
804 |           label: `N${i}`,
805 |           properties: {},
806 |           weight: 1,
807 |         });
808 |       }
809 | 
810 |       const health = await monitor.calculateHealth(kg, storage);
811 | 
812 |       expect(health.performance.score).toBeGreaterThan(50);
813 |     });
814 |   });
815 | });
816 | 
```

--------------------------------------------------------------------------------
/src/tools/generate-contextual-content.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Context-Aware Content Generator (Phase 3)
  3 |  *
  4 |  * Generates documentation content based on actual code structure
  5 |  * Uses AST analysis and knowledge graph for accurate, contextual documentation
  6 |  */
  7 | 
  8 | import { Tool } from "@modelcontextprotocol/sdk/types.js";
  9 | import { z } from "zod";
 10 | import path from "path";
 11 | import {
 12 |   ASTAnalyzer,
 13 |   FunctionSignature,
 14 |   ClassInfo,
 15 |   InterfaceInfo,
 16 | } from "../utils/ast-analyzer.js";
 17 | import { formatMCPResponse, MCPToolResponse } from "../types/api.js";
 18 | import { handleMemoryRecall } from "../memory/index.js";
 19 | 
 20 | const inputSchema = z.object({
 21 |   filePath: z.string().describe("Path to the source code file"),
 22 |   documentationType: z
 23 |     .enum(["tutorial", "how-to", "reference", "explanation", "all"])
 24 |     .default("reference")
 25 |     .describe("Type of documentation to generate"),
 26 |   includeExamples: z
 27 |     .boolean()
 28 |     .default(true)
 29 |     .describe("Include code examples in generated documentation"),
 30 |   style: z
 31 |     .enum(["concise", "detailed", "verbose"])
 32 |     .default("detailed")
 33 |     .describe("Documentation style"),
 34 |   outputFormat: z
 35 |     .enum(["markdown", "mdx", "html"])
 36 |     .default("markdown")
 37 |     .describe("Output format for generated documentation"),
 38 | });
 39 | 
 40 | export interface GeneratedContent {
 41 |   filePath: string;
 42 |   documentationType: string;
 43 |   sections: GeneratedSection[];
 44 |   metadata: ContentMetadata;
 45 | }
 46 | 
 47 | export interface GeneratedSection {
 48 |   title: string;
 49 |   content: string;
 50 |   category: "tutorial" | "how-to" | "reference" | "explanation";
 51 |   codeReferences: string[];
 52 |   confidence: number;
 53 | }
 54 | 
 55 | export interface ContentMetadata {
 56 |   generatedAt: string;
 57 |   codeAnalysis: {
 58 |     functions: number;
 59 |     classes: number;
 60 |     interfaces: number;
 61 |     complexity: number;
 62 |   };
 63 |   similarExamples: number;
 64 |   confidence: number;
 65 | }
 66 | 
 67 | /**
 68 |  * Main content generation handler
 69 |  */
 70 | export async function handleGenerateContextualContent(
 71 |   args: unknown,
 72 |   context?: any,
 73 | ): Promise<{ content: any[] }> {
 74 |   const startTime = Date.now();
 75 | 
 76 |   try {
 77 |     const { filePath, documentationType, includeExamples, style } =
 78 |       inputSchema.parse(args);
 79 | 
 80 |     await context?.info?.(
 81 |       `📝 Generating ${documentationType} documentation for ${path.basename(
 82 |         filePath,
 83 |       )}...`,
 84 |     );
 85 | 
 86 |     // Initialize AST analyzer
 87 |     const analyzer = new ASTAnalyzer();
 88 |     await analyzer.initialize();
 89 | 
 90 |     // Analyze the file
 91 |     await context?.info?.("🔍 Analyzing code structure...");
 92 |     const analysis = await analyzer.analyzeFile(filePath);
 93 | 
 94 |     if (!analysis) {
 95 |       throw new Error(`Failed to analyze file: ${filePath}`);
 96 |     }
 97 | 
 98 |     // Query knowledge graph for similar projects
 99 |     await context?.info?.("🧠 Retrieving contextual information...");
100 |     const similarProjects = await findSimilarProjects(analysis, context);
101 | 
102 |     // Generate documentation sections
103 |     const sections: GeneratedSection[] = [];
104 | 
105 |     if (documentationType === "reference" || documentationType === "all") {
106 |       sections.push(
107 |         ...generateReferenceDocumentation(analysis, similarProjects, style),
108 |       );
109 |     }
110 | 
111 |     if (documentationType === "tutorial" || documentationType === "all") {
112 |       sections.push(
113 |         ...generateTutorialDocumentation(
114 |           analysis,
115 |           similarProjects,
116 |           includeExamples,
117 |           style,
118 |         ),
119 |       );
120 |     }
121 | 
122 |     if (documentationType === "how-to" || documentationType === "all") {
123 |       sections.push(
124 |         ...generateHowToDocumentation(
125 |           analysis,
126 |           similarProjects,
127 |           includeExamples,
128 |           style,
129 |         ),
130 |       );
131 |     }
132 | 
133 |     if (documentationType === "explanation" || documentationType === "all") {
134 |       sections.push(
135 |         ...generateExplanationDocumentation(analysis, similarProjects, style),
136 |       );
137 |     }
138 | 
139 |     const metadata: ContentMetadata = {
140 |       generatedAt: new Date().toISOString(),
141 |       codeAnalysis: {
142 |         functions: analysis.functions.length,
143 |         classes: analysis.classes.length,
144 |         interfaces: analysis.interfaces.length,
145 |         complexity: analysis.complexity,
146 |       },
147 |       similarExamples: similarProjects.length,
148 |       confidence: calculateOverallConfidence(sections),
149 |     };
150 | 
151 |     const result: GeneratedContent = {
152 |       filePath,
153 |       documentationType,
154 |       sections,
155 |       metadata,
156 |     };
157 | 
158 |     const response: MCPToolResponse<typeof result> = {
159 |       success: true,
160 |       data: result,
161 |       metadata: {
162 |         toolVersion: "3.0.0",
163 |         executionTime: Date.now() - startTime,
164 |         timestamp: new Date().toISOString(),
165 |       },
166 |       recommendations: [
167 |         {
168 |           type: "info",
169 |           title: "Documentation Generated",
170 |           description: `Generated ${sections.length} documentation section(s) with ${metadata.confidence}% confidence`,
171 |         },
172 |       ],
173 |       nextSteps: [
174 |         {
175 |           action: "Review generated content",
176 |           description: "Review and refine generated documentation for accuracy",
177 |           priority: "high",
178 |         },
179 |         {
180 |           action: "Add to documentation site",
181 |           description:
182 |             "Integrate generated content into your documentation structure",
183 |           priority: "medium",
184 |         },
185 |         {
186 |           action: "Validate content",
187 |           toolRequired: "validate_diataxis_content",
188 |           description:
189 |             "Run validation to ensure generated content meets quality standards",
190 |           priority: "medium",
191 |         },
192 |       ],
193 |     };
194 | 
195 |     await context?.info?.(
196 |       `✅ Generated ${sections.length} documentation section(s)`,
197 |     );
198 | 
199 |     return formatMCPResponse(response, { fullResponse: true });
200 |   } catch (error: any) {
201 |     const errorResponse: MCPToolResponse = {
202 |       success: false,
203 |       error: {
204 |         code: "GENERATION_FAILED",
205 |         message: `Content generation failed: ${error.message}`,
206 |         resolution: "Ensure the file path is valid and the file can be parsed",
207 |       },
208 |       metadata: {
209 |         toolVersion: "3.0.0",
210 |         executionTime: Date.now() - startTime,
211 |         timestamp: new Date().toISOString(),
212 |       },
213 |     };
214 | 
215 |     return formatMCPResponse(errorResponse, { fullResponse: true });
216 |   }
217 | }
218 | 
219 | /**
220 |  * Generate reference documentation
221 |  */
222 | function generateReferenceDocumentation(
223 |   analysis: any,
224 |   _similarProjects: any[],
225 |   _style: string,
226 | ): GeneratedSection[] {
227 |   const sections: GeneratedSection[] = [];
228 | 
229 |   // Generate function reference
230 |   if (analysis.functions.length > 0) {
231 |     sections.push(generateFunctionReference(analysis.functions, _style));
232 |   }
233 | 
234 |   // Generate class reference
235 |   if (analysis.classes.length > 0) {
236 |     sections.push(generateClassReference(analysis.classes, _style));
237 |   }
238 | 
239 |   // Generate interface reference
240 |   if (analysis.interfaces.length > 0) {
241 |     sections.push(generateInterfaceReference(analysis.interfaces, _style));
242 |   }
243 | 
244 |   // Generate type reference
245 |   if (analysis.types.length > 0) {
246 |     sections.push(generateTypeReference(analysis.types, _style));
247 |   }
248 | 
249 |   return sections;
250 | }
251 | 
252 | /**
253 |  * Generate function reference documentation
254 |  */
255 | function generateFunctionReference(
256 |   functions: FunctionSignature[],
257 |   _style: string,
258 | ): GeneratedSection {
259 |   let content = "# Function Reference\n\n";
260 | 
261 |   for (const func of functions.filter((f) => f.isExported)) {
262 |     content += `## \`${func.name}\`\n\n`;
263 | 
264 |     if (func.docComment) {
265 |       content += `${cleanDocComment(func.docComment)}\n\n`;
266 |     }
267 | 
268 |     // Signature
269 |     const params = func.parameters
270 |       .map((p) => `${p.name}: ${p.type || "any"}`)
271 |       .join(", ");
272 |     const returnType = func.returnType || "void";
273 |     const asyncPrefix = func.isAsync ? "async " : "";
274 | 
275 |     content += "**Signature:**\n\n";
276 |     content += "```typescript\n";
277 |     content += `${asyncPrefix}function ${func.name}(${params}): ${returnType}\n`;
278 |     content += "```\n\n";
279 | 
280 |     // Parameters
281 |     if (func.parameters.length > 0) {
282 |       content += "**Parameters:**\n\n";
283 |       for (const param of func.parameters) {
284 |         const optionalMarker = param.optional ? " (optional)" : "";
285 |         const defaultValue = param.defaultValue
286 |           ? ` = ${param.defaultValue}`
287 |           : "";
288 |         content += `- \`${param.name}\`${optionalMarker}: \`${
289 |           param.type || "any"
290 |         }\`${defaultValue}\n`;
291 |       }
292 |       content += "\n";
293 |     }
294 | 
295 |     // Return value
296 |     if (func.returnType && func.returnType !== "void") {
297 |       content += "**Returns:**\n\n";
298 |       content += `- \`${func.returnType}\`\n\n`;
299 |     }
300 | 
301 |     if (_style === "detailed" || _style === "verbose") {
302 |       content += `**Complexity:** ${func.complexity}\n\n`;
303 |     }
304 | 
305 |     content += "---\n\n";
306 |   }
307 | 
308 |   return {
309 |     title: "Function Reference",
310 |     content,
311 |     category: "reference",
312 |     codeReferences: functions.map((f) => f.name),
313 |     confidence: 0.9,
314 |   };
315 | }
316 | 
317 | /**
318 |  * Generate class reference documentation
319 |  */
320 | function generateClassReference(
321 |   classes: ClassInfo[],
322 |   _style: string,
323 | ): GeneratedSection {
324 |   let content = "# Class Reference\n\n";
325 | 
326 |   for (const cls of classes.filter((c) => c.isExported)) {
327 |     content += `## \`${cls.name}\`\n\n`;
328 | 
329 |     if (cls.docComment) {
330 |       content += `${cleanDocComment(cls.docComment)}\n\n`;
331 |     }
332 | 
333 |     // Inheritance
334 |     if (cls.extends) {
335 |       content += `**Extends:** \`${cls.extends}\`\n\n`;
336 |     }
337 | 
338 |     if (cls.implements.length > 0) {
339 |       content += `**Implements:** ${cls.implements
340 |         .map((i) => `\`${i}\``)
341 |         .join(", ")}\n\n`;
342 |     }
343 | 
344 |     // Properties
345 |     if (cls.properties.length > 0) {
346 |       content += "### Properties\n\n";
347 |       for (const prop of cls.properties) {
348 |         const visibility =
349 |           prop.visibility !== "public" ? `${prop.visibility} ` : "";
350 |         const readonly = prop.isReadonly ? "readonly " : "";
351 |         const static_ = prop.isStatic ? "static " : "";
352 |         content += `- ${visibility}${static_}${readonly}\`${prop.name}\`: \`${
353 |           prop.type || "any"
354 |         }\`\n`;
355 |       }
356 |       content += "\n";
357 |     }
358 | 
359 |     // Methods
360 |     if (cls.methods.length > 0) {
361 |       content += "### Methods\n\n";
362 |       for (const method of cls.methods.filter((m) => m.isPublic)) {
363 |         const params = method.parameters
364 |           .map((p) => `${p.name}: ${p.type || "any"}`)
365 |           .join(", ");
366 |         const returnType = method.returnType || "void";
367 |         const asyncPrefix = method.isAsync ? "async " : "";
368 | 
369 |         content += `#### \`${method.name}\`\n\n`;
370 | 
371 |         if (method.docComment) {
372 |           content += `${cleanDocComment(method.docComment)}\n\n`;
373 |         }
374 | 
375 |         content += "```typescript\n";
376 |         content += `${asyncPrefix}${method.name}(${params}): ${returnType}\n`;
377 |         content += "```\n\n";
378 |       }
379 |     }
380 | 
381 |     content += "---\n\n";
382 |   }
383 | 
384 |   return {
385 |     title: "Class Reference",
386 |     content,
387 |     category: "reference",
388 |     codeReferences: classes.map((c) => c.name),
389 |     confidence: 0.9,
390 |   };
391 | }
392 | 
393 | /**
394 |  * Generate interface reference documentation
395 |  */
396 | function generateInterfaceReference(
397 |   interfaces: InterfaceInfo[],
398 |   _style: string,
399 | ): GeneratedSection {
400 |   let content = "# Interface Reference\n\n";
401 | 
402 |   for (const iface of interfaces.filter((i) => i.isExported)) {
403 |     content += `## \`${iface.name}\`\n\n`;
404 | 
405 |     if (iface.docComment) {
406 |       content += `${cleanDocComment(iface.docComment)}\n\n`;
407 |     }
408 | 
409 |     if (iface.extends.length > 0) {
410 |       content += `**Extends:** ${iface.extends
411 |         .map((e) => `\`${e}\``)
412 |         .join(", ")}\n\n`;
413 |     }
414 | 
415 |     // Properties
416 |     if (iface.properties.length > 0) {
417 |       content += "### Properties\n\n";
418 |       content += "```typescript\n";
419 |       content += `interface ${iface.name} {\n`;
420 |       for (const prop of iface.properties) {
421 |         const readonly = prop.isReadonly ? "readonly " : "";
422 |         content += `  ${readonly}${prop.name}: ${prop.type || "any"};\n`;
423 |       }
424 |       content += "}\n";
425 |       content += "```\n\n";
426 |     }
427 | 
428 |     // Methods
429 |     if (iface.methods.length > 0) {
430 |       content += "### Methods\n\n";
431 |       for (const method of iface.methods) {
432 |         const params = method.parameters
433 |           .map((p) => `${p.name}: ${p.type || "any"}`)
434 |           .join(", ");
435 |         const returnType = method.returnType || "void";
436 |         content += `- \`${method.name}(${params}): ${returnType}\`\n`;
437 |       }
438 |       content += "\n";
439 |     }
440 | 
441 |     content += "---\n\n";
442 |   }
443 | 
444 |   return {
445 |     title: "Interface Reference",
446 |     content,
447 |     category: "reference",
448 |     codeReferences: interfaces.map((i) => i.name),
449 |     confidence: 0.9,
450 |   };
451 | }
452 | 
453 | /**
454 |  * Generate type reference documentation
455 |  */
456 | function generateTypeReference(types: any[], _style: string): GeneratedSection {
457 |   let content = "# Type Reference\n\n";
458 | 
459 |   for (const type of types.filter((t: any) => t.isExported)) {
460 |     content += `## \`${type.name}\`\n\n`;
461 | 
462 |     if (type.docComment) {
463 |       content += `${cleanDocComment(type.docComment)}\n\n`;
464 |     }
465 | 
466 |     content += "```typescript\n";
467 |     content += `type ${type.name} = ${type.definition};\n`;
468 |     content += "```\n\n";
469 | 
470 |     content += "---\n\n";
471 |   }
472 | 
473 |   return {
474 |     title: "Type Reference",
475 |     content,
476 |     category: "reference",
477 |     codeReferences: types.map((t: any) => t.name),
478 |     confidence: 0.85,
479 |   };
480 | }
481 | 
482 | /**
483 |  * Generate tutorial documentation
484 |  */
485 | function generateTutorialDocumentation(
486 |   analysis: any,
487 |   _similarProjects: any[],
488 |   includeExamples: boolean,
489 |   _style: string,
490 | ): GeneratedSection[] {
491 |   const sections: GeneratedSection[] = [];
492 | 
493 |   // Generate getting started tutorial
494 |   const tutorialContent = generateGettingStartedTutorial(
495 |     analysis,
496 |     includeExamples,
497 |   );
498 |   sections.push(tutorialContent);
499 | 
500 |   return sections;
501 | }
502 | 
503 | /**
504 |  * Generate getting started tutorial
505 |  */
506 | function generateGettingStartedTutorial(
507 |   analysis: any,
508 |   includeExamples: boolean,
509 | ): GeneratedSection {
510 |   let content = "# Getting Started\n\n";
511 | 
512 |   content += "This tutorial will guide you through using this module.\n\n";
513 | 
514 |   content += "## Installation\n\n";
515 |   content += "```bash\n";
516 |   content += "npm install your-package\n";
517 |   content += "```\n\n";
518 | 
519 |   content += "## Basic Usage\n\n";
520 | 
521 |   if (includeExamples && analysis.functions.length > 0) {
522 |     const mainFunction =
523 |       analysis.functions.find((f: any) => f.name === "main") ||
524 |       analysis.functions[0];
525 | 
526 |     content += `Import and use the main functions:\n\n`;
527 |     content += "```typescript\n";
528 |     content += `import { ${mainFunction.name} } from 'your-package';\n\n`;
529 | 
530 |     const exampleParams = mainFunction.parameters
531 |       .map((p: any) => {
532 |         if (p.type === "string") return `"example"`;
533 |         if (p.type === "number") return "42";
534 |         if (p.type === "boolean") return "true";
535 |         return "{}";
536 |       })
537 |       .join(", ");
538 | 
539 |     content += `// Example usage\n`;
540 |     content += `const result = ${mainFunction.isAsync ? "await " : ""}${
541 |       mainFunction.name
542 |     }(${exampleParams});\n`;
543 |     content += "console.log(result);\n";
544 |     content += "```\n\n";
545 |   }
546 | 
547 |   content += "## Next Steps\n\n";
548 |   content +=
549 |     "- Explore the [API Reference](#reference) for detailed documentation\n";
550 |   content += "- Check out [How-To Guides](#how-to) for specific use cases\n";
551 |   content +=
552 |     "- Read the [Explanation](#explanation) for deeper understanding\n\n";
553 | 
554 |   return {
555 |     title: "Getting Started Tutorial",
556 |     content,
557 |     category: "tutorial",
558 |     codeReferences: analysis.functions.map((f: any) => f.name),
559 |     confidence: 0.75,
560 |   };
561 | }
562 | 
563 | /**
564 |  * Generate how-to documentation
565 |  */
566 | function generateHowToDocumentation(
567 |   analysis: any,
568 |   _similarProjects: any[],
569 |   includeExamples: boolean,
570 |   _style: string,
571 | ): GeneratedSection[] {
572 |   const sections: GeneratedSection[] = [];
573 | 
574 |   // Generate how-to guides based on common patterns
575 |   if (analysis.functions.some((f: any) => f.isAsync)) {
576 |     sections.push(generateAsyncHowTo(analysis, includeExamples));
577 |   }
578 | 
579 |   if (analysis.classes.length > 0) {
580 |     sections.push(generateClassUsageHowTo(analysis, includeExamples));
581 |   }
582 | 
583 |   return sections;
584 | }
585 | 
586 | /**
587 |  * Generate async usage how-to
588 |  */
589 | function generateAsyncHowTo(
590 |   analysis: any,
591 |   includeExamples: boolean,
592 | ): GeneratedSection {
593 |   let content = "# How to Handle Async Operations\n\n";
594 | 
595 |   content += "This module uses async/await for asynchronous operations.\n\n";
596 | 
597 |   if (includeExamples) {
598 |     const asyncFunc = analysis.functions.find((f: any) => f.isAsync);
599 |     if (asyncFunc) {
600 |       content += "## Example\n\n";
601 |       content += "```typescript\n";
602 |       content += `try {\n`;
603 |       content += `  const result = await ${asyncFunc.name}();\n`;
604 |       content += `  console.log('Success:', result);\n`;
605 |       content += `} catch (error) {\n`;
606 |       content += `  console.error('Error:', error);\n`;
607 |       content += `}\n`;
608 |       content += "```\n\n";
609 |     }
610 |   }
611 | 
612 |   return {
613 |     title: "Async Operations Guide",
614 |     content,
615 |     category: "how-to",
616 |     codeReferences: analysis.functions
617 |       .filter((f: any) => f.isAsync)
618 |       .map((f: any) => f.name),
619 |     confidence: 0.8,
620 |   };
621 | }
622 | 
623 | /**
624 |  * Generate class usage how-to
625 |  */
626 | function generateClassUsageHowTo(
627 |   analysis: any,
628 |   includeExamples: boolean,
629 | ): GeneratedSection {
630 |   let content = "# How to Use Classes\n\n";
631 | 
632 |   const firstClass = analysis.classes[0];
633 |   if (firstClass && includeExamples) {
634 |     content += `## Creating an Instance\n\n`;
635 |     content += "```typescript\n";
636 |     content += `const instance = new ${firstClass.name}();\n`;
637 |     content += "```\n\n";
638 | 
639 |     if (firstClass.methods.length > 0) {
640 |       content += `## Using Methods\n\n`;
641 |       content += "```typescript\n";
642 |       const publicMethod = firstClass.methods.find((m: any) => m.isPublic);
643 |       if (publicMethod) {
644 |         content += `const result = ${
645 |           publicMethod.isAsync ? "await " : ""
646 |         }instance.${publicMethod.name}();\n`;
647 |       }
648 |       content += "```\n\n";
649 |     }
650 |   }
651 | 
652 |   return {
653 |     title: "Class Usage Guide",
654 |     content,
655 |     category: "how-to",
656 |     codeReferences: analysis.classes.map((c: any) => c.name),
657 |     confidence: 0.8,
658 |   };
659 | }
660 | 
661 | /**
662 |  * Generate explanation documentation
663 |  */
664 | function generateExplanationDocumentation(
665 |   analysis: any,
666 |   _similarProjects: any[],
667 |   _style: string,
668 | ): GeneratedSection[] {
669 |   const sections: GeneratedSection[] = [];
670 | 
671 |   // Generate architecture explanation
672 |   sections.push(generateArchitectureExplanation(analysis));
673 | 
674 |   return sections;
675 | }
676 | 
677 | /**
678 |  * Generate architecture explanation
679 |  */
680 | function generateArchitectureExplanation(analysis: any): GeneratedSection {
681 |   let content = "# Architecture\n\n";
682 | 
683 |   content += "## Overview\n\n";
684 |   content += `This module consists of ${analysis.functions.length} function(s), ${analysis.classes.length} class(es), and ${analysis.interfaces.length} interface(s).\n\n`;
685 | 
686 |   if (analysis.classes.length > 0) {
687 |     content += "## Class Structure\n\n";
688 |     content +=
689 |       "The module uses object-oriented patterns with the following classes:\n\n";
690 |     for (const cls of analysis.classes.filter((c: any) => c.isExported)) {
691 |       content += `- **${cls.name}**: ${cls.methods.length} method(s), ${cls.properties.length} property(ies)\n`;
692 |     }
693 |     content += "\n";
694 |   }
695 | 
696 |   if (analysis.complexity > 20) {
697 |     content += "## Complexity\n\n";
698 |     content += `This module has a moderate to high complexity score (${analysis.complexity}), indicating sophisticated logic and multiple control flow paths.\n\n`;
699 |   }
700 | 
701 |   return {
702 |     title: "Architecture Explanation",
703 |     content,
704 |     category: "explanation",
705 |     codeReferences: [
706 |       ...analysis.functions.map((f: any) => f.name),
707 |       ...analysis.classes.map((c: any) => c.name),
708 |     ],
709 |     confidence: 0.7,
710 |   };
711 | }
712 | 
713 | /**
714 |  * Find similar projects in knowledge graph
715 |  */
716 | async function findSimilarProjects(
717 |   analysis: any,
718 |   context?: any,
719 | ): Promise<any[]> {
720 |   try {
721 |     const query = `${analysis.language} ${analysis.functions.length} functions ${analysis.classes.length} classes`;
722 |     const results = await handleMemoryRecall({
723 |       query,
724 |       type: "analysis",
725 |       limit: 5,
726 |     });
727 | 
728 |     return results.memories || [];
729 |   } catch (error) {
730 |     await context?.warn?.(`Failed to retrieve similar projects: ${error}`);
731 |     return [];
732 |   }
733 | }
734 | 
735 | /**
736 |  * Calculate overall confidence
737 |  */
738 | function calculateOverallConfidence(sections: GeneratedSection[]): number {
739 |   if (sections.length === 0) return 0;
740 |   const avgConfidence =
741 |     sections.reduce((sum, s) => sum + s.confidence, 0) / sections.length;
742 |   return Math.round(avgConfidence * 100);
743 | }
744 | 
745 | /**
746 |  * Clean JSDoc comment
747 |  */
748 | function cleanDocComment(comment: string): string {
749 |   return comment
750 |     .replace(/\/\*\*|\*\//g, "")
751 |     .replace(/^\s*\* ?/gm, "")
752 |     .trim();
753 | }
754 | 
755 | /**
756 |  * Tool definition
757 |  */
758 | export const generateContextualContent: Tool = {
759 |   name: "generate_contextual_content",
760 |   description:
761 |     "Generate context-aware documentation using AST analysis and knowledge graph insights (Phase 3)",
762 |   inputSchema: {
763 |     type: "object",
764 |     properties: {
765 |       filePath: {
766 |         type: "string",
767 |         description: "Path to the source code file to document",
768 |       },
769 |       documentationType: {
770 |         type: "string",
771 |         enum: ["tutorial", "how-to", "reference", "explanation", "all"],
772 |         default: "reference",
773 |         description: "Type of Diataxis documentation to generate",
774 |       },
775 |       includeExamples: {
776 |         type: "boolean",
777 |         default: true,
778 |         description: "Include code examples in generated documentation",
779 |       },
780 |       style: {
781 |         type: "string",
782 |         enum: ["concise", "detailed", "verbose"],
783 |         default: "detailed",
784 |         description: "Documentation detail level",
785 |       },
786 |       outputFormat: {
787 |         type: "string",
788 |         enum: ["markdown", "mdx", "html"],
789 |         default: "markdown",
790 |         description: "Output format for generated content",
791 |       },
792 |     },
793 |     required: ["filePath"],
794 |   },
795 | };
796 | 
```

--------------------------------------------------------------------------------
/docs/adrs/adr-0009-content-accuracy-validation-framework.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | id: adr-9-content-accuracy-validation-framework
  3 | title: "ADR-009: Content Accuracy Validation Framework"
  4 | sidebar_label: "ADR-009: Content Accuracy Validation Framework"
  5 | sidebar_position: 9
  6 | documcp:
  7 |   last_updated: "2025-01-14T00:00:00.000Z"
  8 |   last_validated: "2025-01-14T00:00:00.000Z"
  9 |   auto_updated: false
 10 |   update_frequency: monthly
 11 |   validated_against_commit: 40afe64
 12 | ---
 13 | 
 14 | # ADR-009: Content Accuracy and Validation Framework for Generated Documentation
 15 | 
 16 | ## Status
 17 | 
 18 | Accepted
 19 | 
 20 | ## Context
 21 | 
 22 | The Intelligent Content Population Engine (ADR-008) introduces sophisticated content generation capabilities, but with this power comes the critical challenge of ensuring content accuracy and handling scenarios where generated documentation is incorrect, outdated, or missing crucial context. This represents a fundamental risk to user trust and system adoption.
 23 | 
 24 | **Core Problem**: Automated content generation can fail in multiple ways:
 25 | 
 26 | - **Analysis Misinterpretation**: Repository analysis detects Express.js but project primarily uses GraphQL
 27 | - **Outdated Patterns**: Generated content assumes current best practices for deprecated framework versions
 28 | - **Missing Context**: Analysis cannot understand business domain, team conventions, or architectural constraints
 29 | - **Code Reality Mismatch**: Generated examples don't work with actual project structure
 30 | - **Confidence Overstatement**: System appears confident about uncertain conclusions
 31 | 
 32 | **Real-World Scenarios**:
 33 | 
 34 | 1. Analysis detects PostgreSQL in docker-compose but app actually uses MongoDB in production
 35 | 2. TypeScript project generates JavaScript examples due to build artifact analysis
 36 | 3. Monorepo analysis sees partial picture, generating incomplete architectural guidance
 37 | 4. Custom framework wrappers confuse standard pattern detection
 38 | 5. Legacy code patterns generate deprecated recommendation content
 39 | 
 40 | **Current State**: ADR-008 includes basic content validation but lacks comprehensive accuracy assurance, user correction workflows, and systematic approaches to handling uncertainty and missing information.
 41 | 
 42 | **Strategic Importance**: Content accuracy directly impacts:
 43 | 
 44 | - User trust and adoption rates
 45 | - Time savings vs. time wasted on incorrect guidance
 46 | - System credibility in professional development environments
 47 | - Long-term viability as intelligent documentation assistant
 48 | 
 49 | ## Decision
 50 | 
 51 | We will implement a comprehensive Content Accuracy and Validation Framework that treats content correctness as a first-class architectural concern, with systematic approaches to uncertainty management, reality verification, and continuous accuracy improvement.
 52 | 
 53 | ### Framework Architecture:
 54 | 
 55 | #### 1. Multi-Layer Validation System
 56 | 
 57 | **Purpose**: Systematic verification at multiple stages of content generation
 58 | **Layers**:
 59 | 
 60 | - **Pre-Generation Validation**: Verify analysis accuracy before content creation
 61 | - **Generation-Time Validation**: Real-time checks during content assembly
 62 | - **Post-Generation Validation**: Comprehensive verification against project reality
 63 | - **User-Guided Validation**: Interactive accuracy confirmation and correction
 64 | 
 65 | #### 2. Confidence-Aware Content Generation
 66 | 
 67 | **Purpose**: Explicit uncertainty management and confidence scoring
 68 | **Capabilities**:
 69 | 
 70 | - Granular confidence metrics for different content aspects
 71 | - Uncertainty flagging for areas requiring user verification
 72 | - Content degradation strategies when confidence is insufficient
 73 | - Alternative content paths for ambiguous scenarios
 74 | 
 75 | #### 3. Reality-Check Validation Engine
 76 | 
 77 | **Purpose**: Verify generated content against actual project characteristics
 78 | **Verification Types**:
 79 | 
 80 | - Code example compilation and execution validation
 81 | - Pattern existence verification in actual codebase
 82 | - Dependency version compatibility checking
 83 | - Framework usage pattern matching
 84 | 
 85 | #### 4. Interactive Accuracy Workflow
 86 | 
 87 | **Purpose**: User-guided accuracy improvement and correction
 88 | **Components**:
 89 | 
 90 | - Pre-generation clarification requests for uncertain areas
 91 | - Inline content correction and improvement interfaces
 92 | - Accuracy feedback collection and learning system
 93 | - Project-specific accuracy profile building
 94 | 
 95 | ### Implementation Details:
 96 | 
 97 | #### Confidence-Aware Generation System
 98 | 
 99 | ```typescript
100 | interface ConfidenceAwareGenerator {
101 |   generateWithConfidence(
102 |     contentRequest: ContentRequest,
103 |     projectContext: ProjectContext,
104 |   ): ConfidenceAwareContent;
105 | 
106 |   handleUncertainty(
107 |     uncertainty: UncertaintyArea,
108 |     alternatives: ContentAlternative[],
109 |   ): UncertaintyHandlingStrategy;
110 | 
111 |   degradeContentSafely(
112 |     highRiskContent: GeneratedContent,
113 |     safetyThreshold: number,
114 |   ): SaferContent;
115 | }
116 | 
117 | interface ConfidenceAwareContent {
118 |   content: GeneratedContent;
119 |   confidence: ConfidenceMetrics;
120 |   uncertainties: UncertaintyFlag[];
121 |   validationRequests: ValidationRequest[];
122 |   alternatives: ContentAlternative[];
123 | }
124 | 
125 | interface ConfidenceMetrics {
126 |   overall: number; // 0-100
127 |   breakdown: {
128 |     technologyDetection: number;
129 |     frameworkVersionAccuracy: number;
130 |     codeExampleRelevance: number;
131 |     architecturalAssumptions: number;
132 |     businessContextAlignment: number;
133 |   };
134 |   riskFactors: RiskFactor[];
135 | }
136 | 
137 | interface UncertaintyFlag {
138 |   area: UncertaintyArea;
139 |   severity: "low" | "medium" | "high" | "critical";
140 |   description: string;
141 |   potentialImpact: string;
142 |   clarificationNeeded: string;
143 |   fallbackStrategy: string;
144 | }
145 | ```
146 | 
147 | #### Reality-Check Validation Engine
148 | 
149 | ```typescript
150 | interface RealityCheckValidator {
151 |   // Validate against actual project structure and code
152 |   validateAgainstCodebase(
153 |     content: GeneratedContent,
154 |     projectPath: string,
155 |   ): Promise<ValidationResult>;
156 | 
157 |   // Check if generated code examples actually work
158 |   validateCodeExamples(
159 |     examples: CodeExample[],
160 |     projectContext: ProjectContext,
161 |   ): Promise<CodeValidationResult>;
162 | 
163 |   // Verify framework patterns exist in project
164 |   verifyFrameworkPatterns(
165 |     patterns: FrameworkPattern[],
166 |     projectFiles: ProjectFile[],
167 |   ): PatternValidationResult;
168 | 
169 |   // Check dependency compatibility
170 |   validateDependencyCompatibility(
171 |     suggestions: DependencySuggestion[],
172 |     projectManifest: ProjectManifest,
173 |   ): CompatibilityResult;
174 | }
175 | 
176 | /**
177 |  * LLM-Enhanced Semantic Analysis (Phase 3 Implementation)
178 |  *
179 |  * Provides semantic understanding of code changes using LLM integration
180 |  * with fallback to AST-based analysis when LLM is unavailable.
181 |  */
182 | interface LLMSemanticAnalyzer {
183 |   // Analyze semantic impact of code changes using LLM
184 |   analyzeCodeChange(before: string, after: string): Promise<SemanticAnalysis>;
185 | 
186 |   // Simulate execution of code examples to validate correctness
187 |   simulateExecution(
188 |     example: string,
189 |     implementation: string,
190 |   ): Promise<SimulationResult>;
191 | 
192 |   // Hybrid analysis combining LLM and AST approaches
193 |   analyzeWithFallback(
194 |     before: string,
195 |     after: string,
196 |     options?: SemanticAnalysisOptions,
197 |   ): Promise<EnhancedSemanticAnalysis>;
198 | }
199 | 
200 | interface SemanticAnalysis {
201 |   hasBehavioralChange: boolean;
202 |   breakingForExamples: boolean;
203 |   changeDescription: string;
204 |   affectedDocSections: string[];
205 |   confidence: number;
206 | }
207 | 
208 | interface SemanticAnalysisOptions {
209 |   useLLM?: boolean;
210 |   confidenceThreshold?: number;
211 |   includeASTFallback?: boolean;
212 |   llmConfig?: {
213 |     provider?: "deepseek" | "openai" | "anthropic" | "ollama";
214 |     apiKey?: string;
215 |     model?: string;
216 |   };
217 | }
218 | 
219 | interface EnhancedSemanticAnalysis extends SemanticAnalysis {
220 |   analysisMode: "llm" | "ast" | "hybrid";
221 |   astDiffs?: CodeDiff[];
222 |   llmAvailable: boolean;
223 |   timestamp: string;
224 | }
225 | 
226 | interface ValidationResult {
227 |   isValid: boolean;
228 |   confidence: number;
229 |   issues: ValidationIssue[];
230 |   suggestions: ImprovementSuggestion[];
231 |   corrections: AutomaticCorrection[];
232 | }
233 | 
234 | interface ValidationIssue {
235 |   type: IssueType;
236 |   severity: "error" | "warning" | "info";
237 |   location: ContentLocation;
238 |   description: string;
239 |   evidence: Evidence[];
240 |   suggestedFix: string;
241 |   confidence: number;
242 | }
243 | 
244 | class TypeScriptRealityChecker implements RealityCheckValidator {
245 |   async validateCodeExamples(
246 |     examples: CodeExample[],
247 |     projectContext: ProjectContext,
248 |   ): Promise<CodeValidationResult> {
249 |     const results: ExampleValidation[] = [];
250 | 
251 |     for (const example of examples) {
252 |       try {
253 |         // Create temporary test file
254 |         const testFile = await this.createTestFile(example, projectContext);
255 | 
256 |         // Attempt TypeScript compilation
257 |         const compileResult = await this.compileTypeScript(testFile);
258 | 
259 |         // Run basic execution test if compilation succeeds
260 |         const executionResult = compileResult.success
261 |           ? await this.testExecution(testFile)
262 |           : null;
263 | 
264 |         results.push({
265 |           example: example.id,
266 |           compilationSuccess: compileResult.success,
267 |           executionSuccess: executionResult?.success ?? false,
268 |           issues: [...compileResult.errors, ...(executionResult?.errors ?? [])],
269 |           confidence: this.calculateExampleConfidence(
270 |             compileResult,
271 |             executionResult,
272 |           ),
273 |         });
274 |       } catch (error) {
275 |         results.push({
276 |           example: example.id,
277 |           compilationSuccess: false,
278 |           executionSuccess: false,
279 |           issues: [{ type: "validation_error", message: error.message }],
280 |           confidence: 0,
281 |         });
282 |       }
283 |     }
284 | 
285 |     return {
286 |       overallSuccess: results.every((r) => r.compilationSuccess),
287 |       exampleResults: results,
288 |       confidence: this.calculateOverallConfidence(results),
289 |     };
290 |   }
291 | }
292 | ```
293 | 
294 | #### Interactive Accuracy Workflow
295 | 
296 | ```typescript
297 | interface InteractiveAccuracyWorkflow {
298 |   // Pre-generation clarification
299 |   requestClarification(
300 |     uncertainties: UncertaintyFlag[],
301 |     analysisContext: AnalysisContext,
302 |   ): Promise<UserClarification>;
303 | 
304 |   // Real-time accuracy feedback during generation
305 |   enableRealTimeFeedback(
306 |     generationSession: GenerationSession,
307 |   ): AccuracyFeedbackInterface;
308 | 
309 |   // Post-generation correction and improvement
310 |   facilitateCorrections(
311 |     generatedContent: GeneratedContent,
312 |     userContext: UserContext,
313 |   ): CorrectionInterface;
314 | 
315 |   // Learning from corrections
316 |   recordAccuracyLearning(
317 |     original: GeneratedContent,
318 |     corrected: GeneratedContent,
319 |     userFeedback: UserFeedback,
320 |   ): AccuracyLearning;
321 | }
322 | 
323 | interface UserClarification {
324 |   uncertaintyArea: UncertaintyArea;
325 |   userResponse: string;
326 |   confidence: number;
327 |   additionalContext?: string;
328 | }
329 | 
330 | interface CorrectionInterface {
331 |   // Inline editing capabilities
332 |   enableInlineEditing(content: GeneratedContent): EditableContent;
333 | 
334 |   // Structured feedback collection
335 |   collectStructuredFeedback(
336 |     content: GeneratedContent,
337 |   ): Promise<StructuredFeedback>;
338 | 
339 |   // Quick accuracy rating
340 |   requestAccuracyRating(
341 |     contentSection: ContentSection,
342 |   ): Promise<AccuracyRating>;
343 | 
344 |   // Pattern correction learning
345 |   identifyPatternCorrections(
346 |     corrections: ContentCorrection[],
347 |   ): PatternLearning[];
348 | }
349 | ```
350 | 
351 | #### Fallback and Recovery Strategies
352 | 
353 | ```typescript
354 | interface ContentFallbackStrategy {
355 |   // Progressive content degradation
356 |   degradeToSaferContent(
357 |     failedContent: GeneratedContent,
358 |     validationFailures: ValidationFailure[],
359 |   ): SaferContent;
360 | 
361 |   // Multiple alternative generation
362 |   generateAlternatives(
363 |     contentRequest: ContentRequest,
364 |     primaryFailure: GenerationFailure,
365 |   ): ContentAlternative[];
366 | 
367 |   // Graceful uncertainty handling
368 |   handleInsufficientInformation(
369 |     analysisGaps: AnalysisGap[],
370 |     contentRequirements: ContentRequirement[],
371 |   ): PartialContent;
372 | 
373 |   // Safe default content
374 |   provideSafeDefaults(
375 |     projectType: ProjectType,
376 |     framework: Framework,
377 |     confidence: number,
378 |   ): DefaultContent;
379 | }
380 | 
381 | interface SafetyThresholds {
382 |   minimumConfidenceForCodeExamples: 85;
383 |   minimumConfidenceForArchitecturalAdvice: 75;
384 |   minimumConfidenceForProductionGuidance: 90;
385 |   uncertaintyThresholdForUserConfirmation: 70;
386 | }
387 | 
388 | const fallbackHierarchy = [
389 |   {
390 |     level: "project-specific-optimized",
391 |     confidence: 85,
392 |     description: "Highly confident project-specific content",
393 |   },
394 |   {
395 |     level: "framework-specific-validated",
396 |     confidence: 95,
397 |     description: "Framework patterns validated against project",
398 |   },
399 |   {
400 |     level: "technology-generic-safe",
401 |     confidence: 98,
402 |     description: "Generic patterns known to work",
403 |   },
404 |   {
405 |     level: "diataxis-structure-only",
406 |     confidence: 100,
407 |     description: "Structure with clear placeholders for manual completion",
408 |   },
409 | ];
410 | ```
411 | 
412 | ## Alternatives Considered
413 | 
414 | ### Trust-But-Verify Approach (Basic Validation Only)
415 | 
416 | - **Pros**: Simpler implementation, faster content generation, less user friction
417 | - **Cons**: High risk of incorrect content, potential user frustration, system credibility damage
418 | - **Decision**: Rejected - accuracy is fundamental to system value proposition
419 | 
420 | ### AI-Only Validation (External LLM Review)
421 | 
422 | - **Pros**: Advanced natural language understanding, sophisticated error detection
423 | - **Cons**: External dependencies, costs, latency, inconsistent results, black box validation
424 | - **Decision**: Rejected for primary validation - may integrate as supplementary check
425 | 
426 | ### Manual Review Required (Human-in-the-Loop Always)
427 | 
428 | - **Pros**: Maximum accuracy assurance, user control, learning opportunities
429 | - **Cons**: Eliminates automation benefits, slows workflow, high user burden
430 | - **Decision**: Rejected as default - integrate as optional high-accuracy mode
431 | 
432 | ### Static Analysis Only (No Dynamic Validation)
433 | 
434 | - **Pros**: Fast execution, no code execution risks, consistent results
435 | - **Cons**: Misses runtime issues, limited pattern verification, poor accuracy detection
436 | - **Decision**: Rejected as sole approach - integrate as first-pass validation
437 | 
438 | ### Crowdsourced Accuracy (Community Validation)
439 | 
440 | - **Pros**: Diverse perspectives, real-world validation, community engagement
441 | - **Cons**: Inconsistent quality, coordination complexity, slow feedback loops
442 | - **Decision**: Deferred to future enhancement - focus on systematic validation first
443 | 
444 | ## Consequences
445 | 
446 | ### Positive
447 | 
448 | - **Trust and Credibility**: Systematic accuracy assurance builds user confidence
449 | - **Reduced Risk**: Explicit uncertainty handling prevents misleading guidance
450 | - **Continuous Improvement**: Learning from corrections improves future accuracy
451 | - **Professional Reliability**: Reality-check validation ensures professional-grade output
452 | - **User Empowerment**: Interactive workflows give users control over accuracy
453 | 
454 | ### Negative
455 | 
456 | - **Implementation Complexity**: Multi-layer validation requires significant engineering effort
457 | - **Performance Impact**: Validation processes may slow content generation
458 | - **User Experience Friction**: Clarification requests may interrupt workflow
459 | - **Maintenance Overhead**: Validation rules require updates as technologies evolve
460 | 
461 | ### Risks and Mitigations
462 | 
463 | - **Validation Accuracy**: Validate the validators through comprehensive testing
464 | - **Performance Impact**: Implement parallel validation and smart caching
465 | - **User Fatigue**: Balance accuracy requests with workflow efficiency
466 | - **Technology Coverage**: Start with well-known patterns, expand methodically
467 | 
468 | ## Integration Points
469 | 
470 | ### Repository Analysis Integration (ADR-002)
471 | 
472 | - Use analysis confidence metrics to inform content generation confidence
473 | - Validate analysis assumptions against actual project characteristics
474 | - Identify analysis gaps that require user clarification
475 | 
476 | ### Content Population Integration (ADR-008)
477 | 
478 | - Integrate validation framework into content generation pipeline
479 | - Use confidence metrics to guide content generation strategies
480 | - Apply reality-check validation to all generated content
481 | 
482 | ### MCP Tools API Integration (ADR-006)
483 | 
484 | - Add validation results to MCP tool responses
485 | - Provide user interfaces for accuracy feedback and correction
486 | - Maintain consistency with existing error handling patterns
487 | 
488 | ### Diataxis Framework Integration (ADR-004)
489 | 
490 | - Ensure validation preserves Diataxis category integrity
491 | - Validate content type appropriateness within framework
492 | - Maintain cross-reference accuracy across content categories
493 | 
494 | ## Implementation Roadmap
495 | 
496 | ### Phase 1: Core Validation Infrastructure (High Priority)
497 | 
498 | - Confidence scoring system implementation
499 | - Basic reality-check validation for common patterns
500 | - User clarification workflow for high-uncertainty areas
501 | - Fallback content generation strategies
502 | 
503 | ### Phase 2: Advanced Validation (Medium Priority)
504 | 
505 | - Code example compilation and execution testing
506 | - Framework pattern existence verification
507 | - Interactive correction interfaces
508 | - Accuracy learning and improvement systems
509 | 
510 | ### Phase 2.5: LLM-Enhanced Semantic Analysis (Implemented)
511 | 
512 | - **LLM Integration Layer**: Unified interface for multiple LLM providers (DeepSeek, OpenAI, Anthropic, Ollama)
513 | - **Semantic Code Analysis**: LLM-powered understanding of code change impact on documentation
514 | - **Execution Simulation**: Validate code examples through LLM-based execution simulation
515 | - **Hybrid Analysis**: Combine LLM semantic analysis with AST-based fallback for reliability
516 | - **Rate Limiting & Error Handling**: Robust API management with graceful degradation
517 | - **Multi-Provider Support**: Provider-agnostic design supporting multiple LLM backends
518 | 
519 | ### Phase 3: Intelligent Accuracy Features (Future)
520 | 
521 | - Machine learning-based accuracy prediction
522 | - Community-driven validation and improvement
523 | - Advanced uncertainty reasoning and handling
524 | - Personalized accuracy preferences and thresholds
525 | 
526 | ## Quality Assurance
527 | 
528 | ### Validation Testing Framework
529 | 
530 | ```typescript
531 | describe("ContentAccuracyFramework", () => {
532 |   describe("Confidence Scoring", () => {
533 |     it("should correctly identify low-confidence scenarios");
534 |     it("should provide appropriate uncertainty flags");
535 |     it("should degrade content safely when confidence is insufficient");
536 |   });
537 | 
538 |   describe("Reality-Check Validation", () => {
539 |     it("should detect when generated code examples fail compilation");
540 |     it("should identify pattern mismatches with actual codebase");
541 |     it("should validate dependency compatibility accurately");
542 |   });
543 | 
544 |   describe("Interactive Workflows", () => {
545 |     it("should request clarification for appropriate uncertainty levels");
546 |     it("should enable effective user corrections and learning");
547 |     it("should maintain accuracy improvements across sessions");
548 |   });
549 | });
550 | ```
551 | 
552 | ### Accuracy Metrics and Monitoring
553 | 
554 | - **Content Accuracy Rate**: Percentage of generated content validated as correct
555 | - **User Correction Rate**: Frequency of user corrections per content section
556 | - **Confidence Calibration**: Alignment between confidence scores and actual accuracy
557 | - **Validation Performance**: Speed and accuracy of validation processes
558 | 
559 | ### Continuous Improvement Process
560 | 
561 | - Regular validation of validation systems (meta-validation)
562 | - User feedback analysis and pattern identification
563 | - Technology pattern database updates and maintenance
564 | - Accuracy threshold tuning based on real-world usage
565 | 
566 | ## Success Metrics
567 | 
568 | ### Accuracy Metrics
569 | 
570 | - **Content Accuracy Rate**: 85%+ technical accuracy for generated content
571 | - **Confidence Calibration**: ±10% alignment between confidence and actual accuracy
572 | - **False Positive Rate**: &lt;5% validation failures for actually correct content
573 | - **User Correction Rate**: &lt;20% of content sections require user correction
574 | 
575 | ### User Experience Metrics
576 | 
577 | - **Trust Score**: 90%+ user confidence in system accuracy
578 | - **Workflow Efficiency**: Validation processes add &lt;15% to generation time
579 | - **Clarification Effectiveness**: 80%+ of clarification requests improve accuracy
580 | - **Learning Effectiveness**: 70% reduction in repeat accuracy issues
581 | 
582 | ### System Reliability Metrics
583 | 
584 | - **Validation Coverage**: 95%+ of generated content passes through validation
585 | - **Fallback Effectiveness**: 100% of failed generations provide safe alternatives
586 | - **Error Recovery**: 90%+ of validation failures result in improved content
587 | - **Performance Impact**: &lt;30 seconds total for accuracy-validated content generation
588 | 
589 | ## Future Enhancements
590 | 
591 | ### Advanced Validation Technologies
592 | 
593 | - **Static Analysis Integration**: Deeper code analysis for pattern verification
594 | - **Dynamic Testing**: Automated testing of generated examples in project context
595 | - **Semantic Validation**: AI-powered understanding of content meaning and correctness
596 | - **Cross-Project Learning**: Accuracy improvements shared across similar projects
597 | 
598 | ### User Experience Improvements
599 | 
600 | - **Accuracy Preferences**: User-configurable accuracy vs. speed trade-offs
601 | - **Domain-Specific Validation**: Specialized validation for different technical domains
602 | - **Real-Time Collaboration**: Team-based accuracy review and improvement workflows
603 | - **Accuracy Analytics**: Detailed insights into content accuracy patterns and trends
604 | 
605 | ### Integration Expansions
606 | 
607 | - **IDE Integration**: Real-time accuracy feedback in development environments
608 | - **CI/CD Integration**: Accuracy validation as part of documentation deployment
609 | - **Documentation Management**: Integration with existing documentation systems
610 | - **Quality Metrics**: Accuracy tracking as part of documentation quality scoring
611 | 
612 | ## References
613 | 
614 | - [ADR-002: Multi-Layered Repository Analysis Engine Design](adr-0002-repository-analysis-engine.md)
615 | - [ADR-008: Intelligent Content Population Engine](adr-0008-intelligent-content-population-engine.md)
616 | - [Software Verification and Validation](https://en.wikipedia.org/wiki/Software_verification_and_validation)
617 | - [Web Content Accessibility Guidelines](https://www.w3.org/WAI/WCAG21/quickref/)
618 | - [AI Documentation Best Practices](https://developers.google.com/machine-learning/guides/rules-of-ml)
619 | - Commit: f7b6fcd - feat: Add LLM integration layer for semantic code analysis (#82)
620 | - GitHub Issue: #82 - LLM integration layer for semantic code analysis
621 | 
```

--------------------------------------------------------------------------------
/docs/reference/mcp-tools.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | documcp:
  3 |   last_updated: "2025-11-20T00:46:21.963Z"
  4 |   last_validated: "2025-12-09T19:41:38.593Z"
  5 |   auto_updated: false
  6 |   update_frequency: monthly
  7 |   validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
  8 | ---
  9 | 
 10 | # MCP Tools API Reference
 11 | 
 12 | DocuMCP provides a comprehensive set of tools via the Model Context Protocol (MCP). These tools enable intelligent documentation deployment through repository analysis, SSG recommendations, and automated GitHub Pages setup.
 13 | 
 14 | ## Implementation Details
 15 | 
 16 | DocuMCP implements the MCP protocol using the low-level `Server` class from `@modelcontextprotocol/sdk/server/index.js` with `StdioServerTransport` for process-based communication. Tools are registered manually using `setRequestHandler` with `CallToolRequestSchema` and `ListToolsRequestSchema`, providing full control over tool execution and response formatting.
 17 | 
 18 | ## Core Documentation Tools
 19 | 
 20 | ### analyze_repository
 21 | 
 22 | **Description**: Analyze repository structure, dependencies, and documentation needs
 23 | 
 24 | **Parameters**:
 25 | 
 26 | - `path` (string, required): Path to the repository to analyze
 27 | - `depth` (enum, optional): Analysis depth level
 28 |   - `"quick"`: Fast overview focusing on basic structure
 29 |   - `"standard"`: Comprehensive analysis (default)
 30 |   - `"deep"`: Detailed analysis with advanced insights
 31 | 
 32 | **Returns**: Analysis object containing:
 33 | 
 34 | - `id`: Unique analysis identifier for use in other tools
 35 | - `timestamp`: Analysis execution time
 36 | - `structure`: File counts, languages, and project features
 37 | - `dependencies`: Package ecosystem and dependency analysis
 38 | - `documentation`: Existing documentation assessment
 39 | - `recommendations`: Project classification and team size estimates
 40 | 
 41 | **Example**:
 42 | 
 43 | ```json
 44 | {
 45 |   "path": "/path/to/repository",
 46 |   "depth": "standard"
 47 | }
 48 | ```
 49 | 
 50 | ### recommend_ssg
 51 | 
 52 | **Description**: Recommend the best static site generator based on project analysis
 53 | 
 54 | **Parameters**:
 55 | 
 56 | - `analysisId` (string, required): ID from previous repository analysis
 57 | - `preferences` (object, optional):
 58 |   - `priority`: `"simplicity"`, `"features"`, or `"performance"`
 59 |   - `ecosystem`: `"javascript"`, `"python"`, `"ruby"`, `"go"`, or `"any"`
 60 | 
 61 | **Returns**: Recommendation object with weighted scoring and justifications
 62 | 
 63 | **Example**:
 64 | 
 65 | ```json
 66 | {
 67 |   "analysisId": "analysis_abc123",
 68 |   "preferences": {
 69 |     "priority": "simplicity",
 70 |     "ecosystem": "javascript"
 71 |   }
 72 | }
 73 | ```
 74 | 
 75 | ### generate_config
 76 | 
 77 | **Description**: Generate configuration files for the selected static site generator
 78 | 
 79 | **Parameters**:
 80 | 
 81 | - `ssg` (enum, required): `"jekyll"`, `"hugo"`, `"docusaurus"`, `"mkdocs"`, or `"eleventy"`
 82 | - `projectName` (string, required): Name of the project
 83 | - `projectDescription` (string, optional): Brief description
 84 | - `outputPath` (string, required): Where to generate config files
 85 | 
 86 | **Returns**: Generated configuration files and setup instructions
 87 | 
 88 | **Example**:
 89 | 
 90 | ```json
 91 | {
 92 |   "ssg": "hugo",
 93 |   "projectName": "My Documentation Site",
 94 |   "outputPath": "./docs"
 95 | }
 96 | ```
 97 | 
 98 | ### setup_structure
 99 | 
100 | **Description**: Create Diataxis-compliant documentation structure
101 | 
102 | **Parameters**:
103 | 
104 | - `path` (string, required): Root path for documentation
105 | - `ssg` (enum, required): Static site generator type
106 | - `includeExamples` (boolean, optional, default: true): Include example content
107 | 
108 | **Returns**: Created directory structure following Diataxis framework:
109 | 
110 | - **tutorials/**: Learning-oriented guides for skill acquisition (study context)
111 | - **how-to/**: Problem-solving guides for specific tasks (work context)
112 | - **reference/**: Information-oriented content for lookup and verification (information context)
113 | - **explanation/**: Understanding-oriented content for context and background (understanding context)
114 | 
115 | **Example**:
116 | 
117 | ```json
118 | {
119 |   "path": "./docs",
120 |   "ssg": "mkdocs",
121 |   "includeExamples": true
122 | }
123 | ```
124 | 
125 | ### deploy_pages
126 | 
127 | **Description**: Set up GitHub Pages deployment workflow
128 | 
129 | **Parameters**:
130 | 
131 | - `repository` (string, required): Repository path or URL
132 | - `ssg` (enum, required): Static site generator type
133 | - `branch` (string, optional, default: "gh-pages"): Deployment branch
134 | - `customDomain` (string, optional): Custom domain name
135 | 
136 | **Returns**: GitHub Actions workflow files for automated deployment
137 | 
138 | **Example**:
139 | 
140 | ```json
141 | {
142 |   "repository": "username/repository",
143 |   "ssg": "docusaurus",
144 |   "customDomain": "docs.example.com"
145 | }
146 | ```
147 | 
148 | ### verify_deployment
149 | 
150 | **Description**: Verify and troubleshoot GitHub Pages deployment
151 | 
152 | **Parameters**:
153 | 
154 | - `repository` (string, required): Repository path or URL
155 | - `url` (string, optional): Expected deployment URL
156 | 
157 | **Returns**: Deployment status and troubleshooting recommendations
158 | 
159 | **Example**:
160 | 
161 | ```json
162 | {
163 |   "repository": "username/repository",
164 |   "url": "https://username.github.io/repository"
165 | }
166 | ```
167 | 
168 | ## Content Management Tools
169 | 
170 | ### populate_diataxis_content
171 | 
172 | **Description**: Intelligently populate Diataxis documentation with project-specific content
173 | 
174 | **Parameters**:
175 | 
176 | - `analysisId` (string, required): Repository analysis ID
177 | - `docsPath` (string, required): Path to documentation directory
178 | - `populationLevel` (enum, optional, default: "comprehensive"): Content generation level
179 | - `includeProjectSpecific` (boolean, optional, default: true): Include project-specific content
180 | - `preserveExisting` (boolean, optional, default: true): Preserve existing content
181 | - `technologyFocus` (array of strings, optional): Specific technologies to emphasize
182 | 
183 | **Returns**: Populated content metrics and file creation summary
184 | 
185 | ### update_existing_documentation
186 | 
187 | **Description**: Intelligently analyze and update existing documentation using memory insights
188 | 
189 | **Parameters**:
190 | 
191 | - `analysisId` (string, required): Repository analysis ID
192 | - `docsPath` (string, required): Path to existing documentation directory
193 | - `compareMode` (enum, optional, default: "comprehensive"): Comparison mode
194 | - `updateStrategy` (enum, optional, default: "moderate"): Update aggressiveness
195 | - `preserveStyle` (boolean, optional, default: true): Preserve existing style
196 | - `focusAreas` (array of strings, optional): Specific areas to focus on
197 | 
198 | **Returns**: Update recommendations and gap analysis
199 | 
200 | ### detect_documentation_gaps
201 | 
202 | **Description**: Analyze repository and existing documentation to identify missing content
203 | 
204 | **Parameters**:
205 | 
206 | - `repositoryPath` (string, required): Path to the repository
207 | - `documentationPath` (string, optional): Path to existing documentation
208 | - `analysisId` (string, optional): Optional existing analysis ID to reuse
209 | - `depth` (enum, optional, default: "standard"): Analysis depth
210 | 
211 | **Returns**: Identified gaps and recommendations for improvement
212 | 
213 | ## Validation Tools
214 | 
215 | ### validate_diataxis_content
216 | 
217 | **Description**: Validate the accuracy, completeness, and compliance of generated Diataxis documentation
218 | 
219 | **Parameters**:
220 | 
221 | - `contentPath` (string, required): Path to documentation directory to validate
222 | - `analysisId` (string, optional): Repository analysis ID for context
223 | - `validationType` (enum, optional, default: "all"): Type of validation
224 | - `includeCodeValidation` (boolean, optional, default: true): Validate code examples
225 | - `confidence` (enum, optional, default: "moderate"): Validation confidence level
226 | 
227 | **Returns**: Validation results with issues, recommendations, and confidence scores
228 | 
229 | ### validate_content
230 | 
231 | **Description**: Validate general content quality including links and code syntax
232 | 
233 | **Parameters**:
234 | 
235 | - `contentPath` (string, required): Path to content directory
236 | - `validationType` (string, optional, default: "all"): Validation type
237 | - `includeCodeValidation` (boolean, optional, default: true): Validate code blocks
238 | - `followExternalLinks` (boolean, optional, default: false): Check external URLs
239 | 
240 | **Returns**: Content validation results with broken links and code errors
241 | 
242 | ### check_documentation_links
243 | 
244 | **Description**: Comprehensive link checking for documentation deployment
245 | 
246 | **Parameters**:
247 | 
248 | - `documentation_path` (string, optional, default: "./docs"): Documentation directory
249 | - `check_external_links` (boolean, optional, default: true): Validate external URLs
250 | - `check_internal_links` (boolean, optional, default: true): Validate internal references
251 | - `check_anchor_links` (boolean, optional, default: true): Validate anchor links
252 | - `timeout_ms` (number, optional, default: 5000): Request timeout
253 | - `max_concurrent_checks` (number, optional, default: 5): Concurrent check limit
254 | 
255 | **Returns**: Comprehensive link validation report
256 | 
257 | ## Testing and Deployment Tools
258 | 
259 | ### test_local_deployment
260 | 
261 | **Description**: Test documentation build and local server before deploying to GitHub Pages
262 | 
263 | **Parameters**:
264 | 
265 | - `repositoryPath` (string, required): Path to the repository
266 | - `ssg` (enum, required): Static site generator type
267 | - `port` (number, optional, default: 3000): Local server port
268 | - `timeout` (number, optional, default: 60): Build timeout in seconds
269 | - `skipBuild` (boolean, optional, default: false): Skip build step
270 | 
271 | **Returns**: Local testing results and server status
272 | 
273 | ## README Management Tools
274 | 
275 | ### evaluate_readme_health
276 | 
277 | **Description**: Evaluate README files for community health and onboarding effectiveness
278 | 
279 | **Parameters**:
280 | 
281 | - `readme_path` (string, required): Path to README file
282 | - `project_type` (enum, optional, default: "community_library"): Project type
283 | - `repository_path` (string, optional): Repository path for context
284 | 
285 | **Returns**: Health evaluation with scores and recommendations
286 | 
287 | ### readme_best_practices
288 | 
289 | **Description**: Analyze README files against best practices checklist
290 | 
291 | **Parameters**:
292 | 
293 | - `readme_path` (string, required): Path to README file
294 | - `project_type` (enum, optional, default: "library"): Project type
295 | - `generate_template` (boolean, optional, default: false): Generate templates
296 | - `target_audience` (enum, optional, default: "mixed"): Target audience
297 | 
298 | **Returns**: Best practices analysis and improvement recommendations
299 | 
300 | ### generate_readme_template
301 | 
302 | **Description**: Generate standardized README templates for different project types
303 | 
304 | **Parameters**:
305 | 
306 | - `projectName` (string, required): Name of the project
307 | - `description` (string, required): Brief project description
308 | - `templateType` (enum, required): Project template type
309 | - `author` (string, optional): Project author/organization
310 | - `license` (string, optional, default: "MIT"): Project license
311 | - `outputPath` (string, optional): Output file path
312 | 
313 | **Returns**: Generated README template content
314 | 
315 | ### validate_readme_checklist
316 | 
317 | **Description**: Validate README files against community best practices checklist
318 | 
319 | **Parameters**:
320 | 
321 | - `readmePath` (string, required): Path to README file
322 | - `projectPath` (string, optional): Project directory for context
323 | - `strict` (boolean, optional, default: false): Use strict validation
324 | - `outputFormat` (enum, optional, default: "console"): Output format
325 | 
326 | **Returns**: Validation report with detailed scoring
327 | 
328 | ### analyze_readme
329 | 
330 | **Description**: Comprehensive README analysis with length assessment and optimization opportunities
331 | 
332 | **Parameters**:
333 | 
334 | - `project_path` (string, required): Path to project directory
335 | - `target_audience` (enum, optional, default: "community_contributors"): Target audience
336 | - `optimization_level` (enum, optional, default: "moderate"): Optimization level
337 | - `max_length_target` (number, optional, default: 300): Target max length
338 | 
339 | **Returns**: README analysis with optimization recommendations
340 | 
341 | ### optimize_readme
342 | 
343 | **Description**: Optimize README content by restructuring and condensing
344 | 
345 | **Parameters**:
346 | 
347 | - `readme_path` (string, required): Path to README file
348 | - `strategy` (enum, optional, default: "community_focused"): Optimization strategy
349 | - `max_length` (number, optional, default: 300): Target maximum length
350 | - `include_tldr` (boolean, optional, default: true): Include TL;DR section
351 | - `create_docs_directory` (boolean, optional, default: true): Create docs directory
352 | 
353 | **Returns**: Optimized README content and extracted documentation
354 | 
355 | ## Documentation Freshness Tracking Tools
356 | 
357 | DocuMCP includes comprehensive tools for tracking and managing documentation freshness, ensuring your documentation stays up-to-date and identifying files that need attention.
358 | 
359 | ### track_documentation_freshness
360 | 
361 | **Description**: Scan documentation directory for staleness markers and identify files needing updates based on configurable time thresholds (minutes, hours, days)
362 | 
363 | **Parameters**:
364 | 
365 | - `docsPath` (string, required): Path to documentation directory
366 | - `projectPath` (string, optional): Path to project root (for knowledge graph tracking)
367 | - `warningThreshold` (object, optional): Warning threshold (yellow flag)
368 |   - `value` (number, positive): Threshold value
369 |   - `unit` (enum): `"minutes"`, `"hours"`, or `"days"`
370 | - `staleThreshold` (object, optional): Stale threshold (orange flag)
371 |   - `value` (number, positive): Threshold value
372 |   - `unit` (enum): `"minutes"`, `"hours"`, or `"days"`
373 | - `criticalThreshold` (object, optional): Critical threshold (red flag)
374 |   - `value` (number, positive): Threshold value
375 |   - `unit` (enum): `"minutes"`, `"hours"`, or `"days"`
376 | - `preset` (enum, optional): Use predefined threshold preset
377 |   - Options: `"realtime"`, `"active"`, `"recent"`, `"weekly"`, `"monthly"`, `"quarterly"`
378 | - `includeFileList` (boolean, optional, default: true): Include detailed file list in response
379 | - `sortBy` (enum, optional, default: "staleness"): Sort order for file list
380 |   - Options: `"age"`, `"path"`, `"staleness"`
381 | - `storeInKG` (boolean, optional, default: true): Store tracking event in knowledge graph for historical analysis
382 | 
383 | **Returns**: Freshness report with:
384 | 
385 | - Summary statistics (total files, fresh, warning, stale, critical)
386 | - Detailed file list with staleness levels
387 | - Age information for each file
388 | - Recommendations for action
389 | 
390 | **Example**:
391 | 
392 | ```json
393 | {
394 |   "docsPath": "/path/to/docs",
395 |   "preset": "monthly",
396 |   "includeFileList": true
397 | }
398 | ```
399 | 
400 | **Default Thresholds**:
401 | 
402 | - Warning: 7 days
403 | - Stale: 30 days
404 | - Critical: 90 days
405 | 
406 | ### validate_documentation_freshness
407 | 
408 | **Description**: Validate documentation freshness, initialize metadata for files without it, and update timestamps based on code changes
409 | 
410 | **Parameters**:
411 | 
412 | - `docsPath` (string, required): Path to documentation directory
413 | - `projectPath` (string, required): Path to project root (for git integration)
414 | - `initializeMissing` (boolean, optional, default: true): Initialize metadata for files without it
415 | - `updateExisting` (boolean, optional, default: false): Update last_validated timestamp for all files
416 | - `updateFrequency` (enum, optional, default: "monthly"): Default update frequency for new metadata
417 |   - Options: `"realtime"`, `"active"`, `"recent"`, `"weekly"`, `"monthly"`, `"quarterly"`
418 | - `validateAgainstGit` (boolean, optional, default: true): Validate against current git commit
419 | 
420 | **Returns**: Validation report with:
421 | 
422 | - Files initialized (new metadata created)
423 | - Files updated (existing metadata refreshed)
424 | - Metadata structure for each file
425 | - Recommendations for next steps
426 | 
427 | **Example**:
428 | 
429 | ```json
430 | {
431 |   "docsPath": "/path/to/docs",
432 |   "projectPath": "/path/to/project",
433 |   "initializeMissing": true,
434 |   "validateAgainstGit": true
435 | }
436 | ```
437 | 
438 | **Use Cases**:
439 | 
440 | - First-time setup: Initialize freshness metadata for all documentation files
441 | - Regular maintenance: Update validation timestamps
442 | - After code changes: Sync documentation freshness with git history
443 | 
444 | ## Sitemap Management Tools
445 | 
446 | ### manage_sitemap
447 | 
448 | **Description**: Generate, validate, and manage sitemap.xml as the source of truth for documentation links. Sitemap.xml is used for SEO, search engine submission, and deployment tracking.
449 | 
450 | **Parameters**:
451 | 
452 | - `action` (enum, required): Action to perform
453 |   - `"generate"`: Create new sitemap.xml
454 |   - `"validate"`: Check sitemap structure
455 |   - `"update"`: Sync sitemap with documentation
456 |   - `"list"`: Show all URLs in sitemap
457 | - `docsPath` (string, required): Path to documentation root directory
458 | - `baseUrl` (string, required for generate/update): Base URL for the site (e.g., `https://user.github.io/repo`)
459 | - `includePatterns` (array, optional): File patterns to include
460 |   - Default: `["**/*.md", "**/*.html", "**/*.mdx"]`
461 | - `excludePatterns` (array, optional): File patterns to exclude
462 |   - Default: `["node_modules", ".git", "dist", "build", ".documcp"]`
463 | - `updateFrequency` (enum, optional): Default change frequency for pages
464 |   - Options: `"always"`, `"hourly"`, `"daily"`, `"weekly"`, `"monthly"`, `"yearly"`, `"never"`
465 | - `useGitHistory` (boolean, optional, default: true): Use git history for last modified dates
466 | - `sitemapPath` (string, optional): Custom path for sitemap.xml (default: `docsPath/sitemap.xml`)
467 | 
468 | **Returns**: Sitemap operation result with:
469 | 
470 | - Generated/validated sitemap structure
471 | - URL count and statistics
472 | - Validation errors (if any)
473 | - Recommendations for SEO optimization
474 | 
475 | **Example**:
476 | 
477 | ```json
478 | {
479 |   "action": "generate",
480 |   "docsPath": "/path/to/docs",
481 |   "baseUrl": "https://example.com/docs"
482 | }
483 | ```
484 | 
485 | **Use Cases**:
486 | 
487 | - SEO optimization: Generate sitemap for search engines
488 | - Link validation: Ensure all documentation pages are discoverable
489 | - Deployment tracking: Monitor documentation changes over time
490 | 
491 | ## User Preferences & Analytics Tools
492 | 
493 | ### manage_preferences
494 | 
495 | **Description**: Manage user preferences for personalized recommendations
496 | 
497 | **Parameters**:
498 | 
499 | - `action` (enum, required): Action to perform
500 |   - `"get"`: Retrieve current preferences
501 |   - `"update"`: Update preferences
502 |   - `"reset"`: Reset to defaults
503 |   - `"export"`: Export preferences as JSON
504 |   - `"import"`: Import preferences from JSON
505 |   - `"recommendations"`: Get SSG recommendations based on preferences
506 | - `userId` (string, optional, default: "default"): User ID for multi-user setups
507 | - `preferences` (object, optional): Preference updates (for update action)
508 |   - `preferredSSGs` (array, optional): List of preferred static site generators
509 |   - `documentationStyle` (enum, optional): `"minimal"`, `"comprehensive"`, or `"tutorial-heavy"`
510 |   - `expertiseLevel` (enum, optional): `"beginner"`, `"intermediate"`, or `"advanced"`
511 |   - `preferredTechnologies` (array, optional): Preferred technologies and frameworks
512 |   - `preferredDiataxisCategories` (array, optional): Preferred documentation categories
513 |   - `autoApplyPreferences` (boolean, optional): Automatically apply preferences to recommendations
514 | - `json` (string, optional): JSON string for import action
515 | 
516 | **Returns**: Preference data or operation result
517 | 
518 | **Example**:
519 | 
520 | ```json
521 | {
522 |   "action": "get",
523 |   "userId": "user123"
524 | }
525 | ```
526 | 
527 | ### analyze_deployments
528 | 
529 | **Description**: Analyze deployment patterns and generate insights
530 | 
531 | **Parameters**:
532 | 
533 | - `analysisType` (enum, optional, default: "full_report"): Type of analysis
534 |   - `"full_report"`: Comprehensive analysis
535 |   - `"ssg_stats"`: Per-SSG statistics
536 |   - `"compare"`: Compare multiple SSGs
537 |   - `"health"`: Deployment health score
538 |   - `"trends"`: Temporal analysis
539 | - `ssg` (string, optional): SSG name for ssg_stats analysis
540 | - `ssgs` (array, optional): Array of SSG names for comparison
541 | - `periodDays` (number, optional, default: 30): Period in days for trend analysis
542 | 
543 | **Returns**: Analytics report with deployment insights
544 | 
545 | **Example**:
546 | 
547 | ```json
548 | {
549 |   "analysisType": "full_report"
550 | }
551 | ```
552 | 
553 | ## Memory System Tools
554 | 
555 | The memory system provides intelligent learning and pattern recognition across documentation projects.
556 | 
557 | ### memory_recall
558 | 
559 | **Description**: Recall memories about a project or topic
560 | 
561 | **Parameters**:
562 | 
563 | - `query` (string, required): Search query or project ID
564 | - `type` (enum, optional): Memory type to recall
565 | - `limit` (number, optional, default: 10): Maximum results
566 | 
567 | ### memory_insights
568 | 
569 | **Description**: Get insights and patterns from memory
570 | 
571 | **Parameters**:
572 | 
573 | - `projectId` (string, optional): Project ID to analyze
574 | - `timeRange` (object, optional): Time range for analysis
575 | 
576 | ### memory_similar
577 | 
578 | **Description**: Find similar projects from memory
579 | 
580 | **Parameters**:
581 | 
582 | - `analysisId` (string, required): Analysis ID to find similar projects for
583 | - `limit` (number, optional, default: 5): Maximum similar projects
584 | 
585 | ### memory_export
586 | 
587 | **Description**: Export memories to JSON or CSV
588 | 
589 | **Parameters**:
590 | 
591 | - `filter` (object, optional): Filter memories to export
592 | - `format` (enum, optional, default: "json"): Export format
593 | 
594 | ### memory_cleanup
595 | 
596 | **Description**: Clean up old memories
597 | 
598 | **Parameters**:
599 | 
600 | - `daysToKeep` (number, optional, default: 30): Number of days to retain
601 | - `dryRun` (boolean, optional, default: false): Preview without deleting
602 | 
603 | ## Tool Chaining and Workflows
604 | 
605 | DocuMCP tools are designed to work together in workflows:
606 | 
607 | 1. **Analysis → Recommendation → Implementation**:
608 | 
609 |    ```
610 |    analyze_repository → recommend_ssg → generate_config → setup_structure → deploy_pages
611 |    ```
612 | 
613 | 2. **Content Management**:
614 | 
615 |    ```
616 |    analyze_repository → populate_diataxis_content → validate_diataxis_content
617 |    ```
618 | 
619 | 3. **Documentation Maintenance**:
620 | 
621 |    ```
622 |    detect_documentation_gaps → update_existing_documentation → validate_content
623 |    ```
624 | 
625 | 4. **Freshness Tracking**:
626 | 
627 |    ```
628 |    validate_documentation_freshness → track_documentation_freshness → (update files as needed)
629 |    ```
630 | 
631 | 5. **SEO and Sitemap Management**:
632 |    ```
633 |    manage_sitemap (generate) → deploy_pages → manage_sitemap (validate)
634 |    ```
635 | 
636 | ## Error Handling
637 | 
638 | All tools return structured responses with error information when failures occur:
639 | 
640 | ```json
641 | {
642 |   "content": [
643 |     {
644 |       "type": "text",
645 |       "text": "Error executing tool_name: error_message"
646 |     }
647 |   ],
648 |   "isError": true
649 | }
650 | ```
651 | 
652 | ## Resource Storage
653 | 
654 | Tool results are automatically stored as MCP resources with URIs like:
655 | 
656 | - `documcp://analysis/{id}`: Analysis results
657 | - `documcp://config/{ssg}/{id}`: Configuration files
658 | - `documcp://deployment/{id}`: Deployment workflows
659 | 
660 | These resources can be accessed later for reference or further processing.
661 | 
662 | ## Version Information
663 | 
664 | Current DocuMCP version: **0.5.2**
665 | 
666 | For the latest updates and detailed changelog, see the project repository.
667 | 
668 | ## Recent Additions (v0.5.2)
669 | 
670 | ### Documentation Freshness Tracking
671 | 
672 | - `track_documentation_freshness`: Monitor documentation staleness with configurable thresholds
673 | - `validate_documentation_freshness`: Initialize and update freshness metadata
674 | 
675 | ### Sitemap Management
676 | 
677 | - `manage_sitemap`: Generate, validate, and manage sitemap.xml for SEO and deployment tracking
678 | 
679 | These tools integrate with the knowledge graph system to provide historical analysis and intelligent recommendations.
680 | 
```

--------------------------------------------------------------------------------
/tests/tools/detect-gaps.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { promises as fs } from "fs";
  2 | import path from "path";
  3 | import { tmpdir } from "os";
  4 | 
  5 | // Mock dependencies that don't involve filesystem
  6 | const mockAnalyzeRepository = jest.fn();
  7 | const mockValidateContent = jest.fn();
  8 | 
  9 | jest.mock("../../src/tools/analyze-repository.js", () => ({
 10 |   analyzeRepository: mockAnalyzeRepository,
 11 | }));
 12 | 
 13 | jest.mock("../../src/tools/validate-content.js", () => ({
 14 |   handleValidateDiataxisContent: mockValidateContent,
 15 | }));
 16 | 
 17 | jest.mock("../../src/utils/code-scanner.js", () => ({
 18 |   CodeScanner: jest.fn().mockImplementation(() => ({
 19 |     analyzeRepository: jest.fn().mockResolvedValue({
 20 |       summary: {
 21 |         totalFiles: 5,
 22 |         parsedFiles: 3,
 23 |         functions: 10,
 24 |         classes: 2,
 25 |         interfaces: 3,
 26 |         types: 1,
 27 |         constants: 2,
 28 |         apiEndpoints: 1,
 29 |       },
 30 |       files: ["src/test.ts"],
 31 |       functions: [
 32 |         {
 33 |           name: "testFunction",
 34 |           filePath: "src/test.ts",
 35 |           line: 1,
 36 |           exported: true,
 37 |           hasJSDoc: false,
 38 |         },
 39 |       ],
 40 |       classes: [
 41 |         {
 42 |           name: "TestClass",
 43 |           filePath: "src/test.ts",
 44 |           line: 5,
 45 |           exported: true,
 46 |           hasJSDoc: false,
 47 |         },
 48 |       ],
 49 |       interfaces: [
 50 |         {
 51 |           name: "TestInterface",
 52 |           filePath: "src/test.ts",
 53 |           line: 10,
 54 |           exported: true,
 55 |           hasJSDoc: false,
 56 |         },
 57 |       ],
 58 |       types: [],
 59 |       constants: [],
 60 |       apiEndpoints: [],
 61 |       imports: [],
 62 |       exports: [],
 63 |       frameworks: [],
 64 |     }),
 65 |   })),
 66 | }));
 67 | 
 68 | // Helper functions for creating test directories and files
 69 | async function createTestDirectory(name: string): Promise<string> {
 70 |   const testDir = path.join(
 71 |     tmpdir(),
 72 |     "documcp-test-" +
 73 |       Date.now() +
 74 |       "-" +
 75 |       Math.random().toString(36).substring(7),
 76 |   );
 77 |   await fs.mkdir(testDir, { recursive: true });
 78 |   return testDir;
 79 | }
 80 | 
 81 | async function createTestFile(
 82 |   filePath: string,
 83 |   content: string,
 84 | ): Promise<void> {
 85 |   await fs.mkdir(path.dirname(filePath), { recursive: true });
 86 |   await fs.writeFile(filePath, content);
 87 | }
 88 | 
 89 | async function cleanupTestDirectory(dirPath: string): Promise<void> {
 90 |   try {
 91 |     await fs.rm(dirPath, { recursive: true, force: true });
 92 |   } catch (error) {
 93 |     // Ignore cleanup errors
 94 |   }
 95 | }
 96 | 
 97 | // Now import the module under test
 98 | import { detectDocumentationGaps } from "../../src/tools/detect-gaps.js";
 99 | 
100 | describe("detectDocumentationGaps (Real Filesystem)", () => {
101 |   const mockRepositoryAnalysis = {
102 |     id: "analysis_123",
103 |     structure: {
104 |       hasTests: true,
105 |       hasCI: true,
106 |       hasDocs: true,
107 |     },
108 |     dependencies: {
109 |       ecosystem: "javascript",
110 |       packages: ["react", "express"],
111 |     },
112 |     hasApiEndpoints: true,
113 |     packageManager: "npm",
114 |     hasDocker: true,
115 |     hasCICD: true,
116 |   };
117 | 
118 |   const mockValidationResult = {
119 |     success: true,
120 |     confidence: { overall: 85 },
121 |     issues: [{ type: "warning", description: "Missing API examples" }],
122 |     validationResults: [
123 |       { status: "pass", message: "Good structure" },
124 |       {
125 |         status: "fail",
126 |         message: "Missing references",
127 |         recommendation: "Add API docs",
128 |       },
129 |     ],
130 |   };
131 | 
132 |   let testRepoDir: string;
133 |   const createdDirs: string[] = [];
134 | 
135 |   beforeEach(async () => {
136 |     jest.clearAllMocks();
137 | 
138 |     // Create a fresh test directory for each test
139 |     testRepoDir = await createTestDirectory("test-repo");
140 |     createdDirs.push(testRepoDir);
141 | 
142 |     // Default successful repository analysis
143 |     mockAnalyzeRepository.mockResolvedValue({
144 |       content: [
145 |         {
146 |           type: "text",
147 |           text: JSON.stringify(mockRepositoryAnalysis),
148 |         },
149 |       ],
150 |     });
151 | 
152 |     // Default validation result
153 |     mockValidateContent.mockResolvedValue({
154 |       content: [
155 |         {
156 |           type: "text",
157 |           text: JSON.stringify({ success: true, data: mockValidationResult }),
158 |         },
159 |       ],
160 |     } as any);
161 |   });
162 | 
163 |   afterEach(async () => {
164 |     // Cleanup all created directories
165 |     await Promise.all(createdDirs.map((dir) => cleanupTestDirectory(dir)));
166 |     createdDirs.length = 0;
167 |   });
168 | 
169 |   describe("basic functionality", () => {
170 |     it("should detect gaps in repository without documentation", async () => {
171 |       // No docs directory created - test repo is empty
172 | 
173 |       const result = await detectDocumentationGaps({
174 |         repositoryPath: testRepoDir,
175 |         depth: "quick",
176 |       });
177 | 
178 |       expect(result.content).toBeDefined();
179 |       expect(result.content[0]).toBeDefined();
180 |       const data = JSON.parse(result.content[0].text);
181 | 
182 |       expect(data.repositoryPath).toBe(testRepoDir);
183 |       expect(data.analysisId).toBe("analysis_123");
184 |       expect(data.overallScore).toBe(0);
185 |       expect(data.gaps).toContainEqual(
186 |         expect.objectContaining({
187 |           category: "general",
188 |           gapType: "missing_section",
189 |           description: "No documentation directory found",
190 |           priority: "critical",
191 |         }),
192 |       );
193 |     });
194 | 
195 |     it("should detect missing Diataxis sections", async () => {
196 |       // Create docs directory with some sections but missing tutorials and how-to
197 |       const docsDir = path.join(testRepoDir, "docs");
198 |       await fs.mkdir(docsDir);
199 |       await createTestFile(
200 |         path.join(docsDir, "index.md"),
201 |         "# Main Documentation",
202 |       );
203 | 
204 |       // Create reference and explanation sections
205 |       await fs.mkdir(path.join(docsDir, "reference"));
206 |       await createTestFile(
207 |         path.join(docsDir, "reference", "api.md"),
208 |         "# API Reference",
209 |       );
210 |       await fs.mkdir(path.join(docsDir, "explanation"));
211 |       await createTestFile(
212 |         path.join(docsDir, "explanation", "concepts.md"),
213 |         "# Concepts",
214 |       );
215 | 
216 |       // tutorials and how-to are missing
217 | 
218 |       const result = await detectDocumentationGaps({
219 |         repositoryPath: testRepoDir,
220 |         documentationPath: docsDir,
221 |         depth: "standard",
222 |       });
223 | 
224 |       const data = JSON.parse(result.content[0].text);
225 | 
226 |       expect(data.gaps).toContainEqual(
227 |         expect.objectContaining({
228 |           category: "tutorials",
229 |           gapType: "missing_section",
230 |           priority: "high",
231 |         }),
232 |       );
233 |       expect(data.gaps).toContainEqual(
234 |         expect.objectContaining({
235 |           category: "how-to",
236 |           gapType: "missing_section",
237 |           priority: "medium",
238 |         }),
239 |       );
240 |     });
241 | 
242 |     it("should identify existing documentation strengths", async () => {
243 |       // Create comprehensive docs structure
244 |       const docsDir = path.join(testRepoDir, "docs");
245 |       await fs.mkdir(docsDir);
246 |       await createTestFile(
247 |         path.join(docsDir, "README.md"),
248 |         "# Project Documentation",
249 |       );
250 | 
251 |       // Create all Diataxis sections
252 |       await fs.mkdir(path.join(docsDir, "tutorials"));
253 |       await createTestFile(
254 |         path.join(docsDir, "tutorials", "getting-started.md"),
255 |         "# Getting Started",
256 |       );
257 |       await fs.mkdir(path.join(docsDir, "how-to"));
258 |       await createTestFile(
259 |         path.join(docsDir, "how-to", "deployment.md"),
260 |         "# How to Deploy",
261 |       );
262 |       await fs.mkdir(path.join(docsDir, "reference"));
263 |       await createTestFile(
264 |         path.join(docsDir, "reference", "api.md"),
265 |         "# API Reference",
266 |       );
267 |       await fs.mkdir(path.join(docsDir, "explanation"));
268 |       await createTestFile(
269 |         path.join(docsDir, "explanation", "architecture.md"),
270 |         "# Architecture",
271 |       );
272 | 
273 |       const result = await detectDocumentationGaps({
274 |         repositoryPath: testRepoDir,
275 |         documentationPath: docsDir,
276 |         depth: "comprehensive",
277 |       });
278 | 
279 |       const data = JSON.parse(result.content[0].text);
280 | 
281 |       expect(data.strengths).toContain("Has main documentation index file");
282 |       expect(data.strengths).toContain(
283 |         "Well-organized sections: tutorials, how-to, reference, explanation",
284 |       );
285 |       expect(data.overallScore).toBeGreaterThan(50); // Adjust expectation to match actual scoring
286 |     });
287 |   });
288 | 
289 |   describe("error handling", () => {
290 |     it("should handle repository analysis failure", async () => {
291 |       mockAnalyzeRepository.mockResolvedValue({
292 |         content: [
293 |           {
294 |             type: "text",
295 |             text: JSON.stringify({ success: false, error: "Analysis failed" }),
296 |           },
297 |         ],
298 |       });
299 | 
300 |       const result = await detectDocumentationGaps({
301 |         repositoryPath: testRepoDir,
302 |       });
303 | 
304 |       expect(result.content[0].text).toContain("GAP_DETECTION_FAILED");
305 |       expect(result).toHaveProperty("isError", true);
306 |     });
307 | 
308 |     it("should handle file system errors gracefully", async () => {
309 |       // Create a docs directory but then make it inaccessible
310 |       const docsDir = path.join(testRepoDir, "docs");
311 |       await fs.mkdir(docsDir);
312 | 
313 |       const result = await detectDocumentationGaps({
314 |         repositoryPath: testRepoDir,
315 |         documentationPath: docsDir,
316 |       });
317 | 
318 |       const data = JSON.parse(result.content[0].text);
319 |       expect(data.analysisId).toBe("analysis_123");
320 |       expect(data.gaps).toBeInstanceOf(Array);
321 |     });
322 |   });
323 | 
324 |   describe("code-based gap detection", () => {
325 |     it("should detect missing API documentation when endpoints exist", async () => {
326 |       // Create docs directory without API documentation
327 |       const docsDir = path.join(testRepoDir, "docs");
328 |       await fs.mkdir(docsDir);
329 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
330 | 
331 |       // Mock CodeScanner to return API endpoints
332 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
333 |       CodeScanner.mockImplementationOnce(() => ({
334 |         analyzeRepository: jest.fn().mockResolvedValue({
335 |           summary: {
336 |             totalFiles: 5,
337 |             parsedFiles: 3,
338 |             functions: 10,
339 |             classes: 2,
340 |             interfaces: 3,
341 |             types: 1,
342 |             constants: 2,
343 |             apiEndpoints: 3,
344 |           },
345 |           files: ["src/api.ts", "src/routes.ts"],
346 |           functions: [],
347 |           classes: [],
348 |           interfaces: [],
349 |           types: [],
350 |           constants: [],
351 |           apiEndpoints: [
352 |             {
353 |               method: "GET",
354 |               path: "/api/users",
355 |               filePath: "src/api.ts",
356 |               line: 10,
357 |               hasDocumentation: true,
358 |             },
359 |             {
360 |               method: "POST",
361 |               path: "/api/users",
362 |               filePath: "src/api.ts",
363 |               line: 20,
364 |               hasDocumentation: true,
365 |             },
366 |             {
367 |               method: "DELETE",
368 |               path: "/api/users/:id",
369 |               filePath: "src/routes.ts",
370 |               line: 5,
371 |               hasDocumentation: true,
372 |             },
373 |           ],
374 |           imports: [],
375 |           exports: [],
376 |           frameworks: [],
377 |         }),
378 |       }));
379 | 
380 |       const result = await detectDocumentationGaps({
381 |         repositoryPath: testRepoDir,
382 |         documentationPath: docsDir,
383 |         depth: "comprehensive",
384 |       });
385 | 
386 |       const data = JSON.parse(result.content[0].text);
387 | 
388 |       // Should detect missing API documentation section
389 |       expect(data.gaps).toContainEqual(
390 |         expect.objectContaining({
391 |           category: "reference",
392 |           gapType: "missing_section",
393 |           description: expect.stringContaining("API endpoints"),
394 |           priority: "critical",
395 |         }),
396 |       );
397 |     });
398 | 
399 |     it("should detect undocumented API endpoints", async () => {
400 |       // Create docs directory with API section
401 |       const docsDir = path.join(testRepoDir, "docs");
402 |       await fs.mkdir(docsDir);
403 |       await fs.mkdir(path.join(docsDir, "reference"));
404 |       await createTestFile(
405 |         path.join(docsDir, "reference", "api.md"),
406 |         "# API Reference",
407 |       );
408 | 
409 |       // Mock CodeScanner to return endpoints without documentation
410 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
411 |       CodeScanner.mockImplementationOnce(() => ({
412 |         analyzeRepository: jest.fn().mockResolvedValue({
413 |           summary: {
414 |             totalFiles: 5,
415 |             parsedFiles: 3,
416 |             functions: 10,
417 |             classes: 2,
418 |             interfaces: 3,
419 |             types: 1,
420 |             constants: 2,
421 |             apiEndpoints: 2,
422 |           },
423 |           files: ["src/api.ts"],
424 |           functions: [],
425 |           classes: [],
426 |           interfaces: [],
427 |           types: [],
428 |           constants: [],
429 |           apiEndpoints: [
430 |             {
431 |               method: "GET",
432 |               path: "/api/data",
433 |               filePath: "src/api.ts",
434 |               line: 15,
435 |               hasDocumentation: false, // No JSDoc
436 |             },
437 |             {
438 |               method: "POST",
439 |               path: "/api/data",
440 |               filePath: "src/api.ts",
441 |               line: 25,
442 |               hasDocumentation: false, // No JSDoc
443 |             },
444 |           ],
445 |           imports: [],
446 |           exports: [],
447 |           frameworks: [],
448 |         }),
449 |       }));
450 | 
451 |       const result = await detectDocumentationGaps({
452 |         repositoryPath: testRepoDir,
453 |         documentationPath: docsDir,
454 |         depth: "comprehensive",
455 |       });
456 | 
457 |       const data = JSON.parse(result.content[0].text);
458 | 
459 |       // Should detect undocumented endpoints
460 |       expect(data.gaps).toContainEqual(
461 |         expect.objectContaining({
462 |           category: "reference",
463 |           gapType: "missing_examples",
464 |           description: expect.stringContaining("2 API endpoints lack"),
465 |           priority: "high",
466 |         }),
467 |       );
468 |     });
469 | 
470 |     it("should detect undocumented exported classes", async () => {
471 |       const docsDir = path.join(testRepoDir, "docs");
472 |       await fs.mkdir(docsDir);
473 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
474 | 
475 |       // Mock CodeScanner to return undocumented classes
476 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
477 |       CodeScanner.mockImplementationOnce(() => ({
478 |         analyzeRepository: jest.fn().mockResolvedValue({
479 |           summary: {
480 |             totalFiles: 5,
481 |             parsedFiles: 3,
482 |             functions: 10,
483 |             classes: 3,
484 |             interfaces: 2,
485 |             types: 1,
486 |             constants: 2,
487 |             apiEndpoints: 0,
488 |           },
489 |           files: ["src/models.ts"],
490 |           functions: [],
491 |           classes: [
492 |             {
493 |               name: "UserModel",
494 |               filePath: "src/models.ts",
495 |               line: 10,
496 |               exported: true,
497 |               hasJSDoc: false,
498 |             },
499 |             {
500 |               name: "PostModel",
501 |               filePath: "src/models.ts",
502 |               line: 30,
503 |               exported: true,
504 |               hasJSDoc: false,
505 |             },
506 |             {
507 |               name: "InternalHelper",
508 |               filePath: "src/models.ts",
509 |               line: 50,
510 |               exported: false, // Not exported, should be ignored
511 |               hasJSDoc: false,
512 |             },
513 |           ],
514 |           interfaces: [],
515 |           types: [],
516 |           constants: [],
517 |           apiEndpoints: [],
518 |           imports: [],
519 |           exports: [],
520 |           frameworks: [],
521 |         }),
522 |       }));
523 | 
524 |       const result = await detectDocumentationGaps({
525 |         repositoryPath: testRepoDir,
526 |         documentationPath: docsDir,
527 |         depth: "comprehensive",
528 |       });
529 | 
530 |       const data = JSON.parse(result.content[0].text);
531 | 
532 |       // Should detect undocumented exported classes (only 2, not the non-exported one)
533 |       expect(data.gaps).toContainEqual(
534 |         expect.objectContaining({
535 |           category: "reference",
536 |           gapType: "incomplete_content",
537 |           description: expect.stringContaining("2 exported classes lack"),
538 |           priority: "medium",
539 |         }),
540 |       );
541 |     });
542 | 
543 |     it("should detect undocumented exported interfaces", async () => {
544 |       const docsDir = path.join(testRepoDir, "docs");
545 |       await fs.mkdir(docsDir);
546 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
547 | 
548 |       // Mock CodeScanner to return undocumented interfaces
549 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
550 |       CodeScanner.mockImplementationOnce(() => ({
551 |         analyzeRepository: jest.fn().mockResolvedValue({
552 |           summary: {
553 |             totalFiles: 5,
554 |             parsedFiles: 3,
555 |             functions: 10,
556 |             classes: 2,
557 |             interfaces: 3,
558 |             types: 1,
559 |             constants: 2,
560 |             apiEndpoints: 0,
561 |           },
562 |           files: ["src/types.ts"],
563 |           functions: [],
564 |           classes: [],
565 |           interfaces: [
566 |             {
567 |               name: "IUser",
568 |               filePath: "src/types.ts",
569 |               line: 5,
570 |               exported: true,
571 |               hasJSDoc: false,
572 |             },
573 |             {
574 |               name: "IConfig",
575 |               filePath: "src/types.ts",
576 |               line: 15,
577 |               exported: true,
578 |               hasJSDoc: false,
579 |             },
580 |             {
581 |               name: "IInternalState",
582 |               filePath: "src/types.ts",
583 |               line: 25,
584 |               exported: false, // Not exported
585 |               hasJSDoc: false,
586 |             },
587 |           ],
588 |           types: [],
589 |           constants: [],
590 |           apiEndpoints: [],
591 |           imports: [],
592 |           exports: [],
593 |           frameworks: [],
594 |         }),
595 |       }));
596 | 
597 |       const result = await detectDocumentationGaps({
598 |         repositoryPath: testRepoDir,
599 |         documentationPath: docsDir,
600 |         depth: "comprehensive",
601 |       });
602 | 
603 |       const data = JSON.parse(result.content[0].text);
604 | 
605 |       // Should detect undocumented exported interfaces
606 |       expect(data.gaps).toContainEqual(
607 |         expect.objectContaining({
608 |           category: "reference",
609 |           gapType: "incomplete_content",
610 |           description: expect.stringContaining("2 exported interfaces lack"),
611 |           priority: "medium",
612 |         }),
613 |       );
614 |     });
615 | 
616 |     it("should handle validation errors gracefully", async () => {
617 |       const docsDir = path.join(testRepoDir, "docs");
618 |       await fs.mkdir(docsDir);
619 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
620 | 
621 |       // Mock validation to throw an error
622 |       mockValidateContent.mockRejectedValueOnce(
623 |         new Error("Validation service unavailable"),
624 |       );
625 | 
626 |       const result = await detectDocumentationGaps({
627 |         repositoryPath: testRepoDir,
628 |         documentationPath: docsDir,
629 |         depth: "comprehensive",
630 |       });
631 | 
632 |       const data = JSON.parse(result.content[0].text);
633 | 
634 |       // Should still succeed without validation data
635 |       expect(data.analysisId).toBe("analysis_123");
636 |       expect(data.gaps).toBeInstanceOf(Array);
637 |       expect(data.repositoryPath).toBe(testRepoDir);
638 |     });
639 | 
640 |     it("should handle empty repository analysis result", async () => {
641 |       // Mock analyze_repository to return empty/no content
642 |       mockAnalyzeRepository.mockResolvedValueOnce({
643 |         content: [], // Empty content array
644 |       });
645 | 
646 |       const result = await detectDocumentationGaps({
647 |         repositoryPath: testRepoDir,
648 |         depth: "quick",
649 |       });
650 | 
651 |       // Should return error about failed analysis
652 |       expect(result.content[0].text).toContain("GAP_DETECTION_FAILED");
653 |       expect(result.content[0].text).toContain("Repository analysis failed");
654 |     });
655 | 
656 |     it("should detect missing React framework documentation", async () => {
657 |       const docsDir = path.join(testRepoDir, "docs");
658 |       await fs.mkdir(docsDir);
659 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
660 | 
661 |       // Mock CodeScanner to return React framework
662 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
663 |       CodeScanner.mockImplementationOnce(() => ({
664 |         analyzeRepository: jest.fn().mockResolvedValue({
665 |           summary: {
666 |             totalFiles: 5,
667 |             parsedFiles: 3,
668 |             functions: 10,
669 |             classes: 2,
670 |             interfaces: 3,
671 |             types: 1,
672 |             constants: 2,
673 |             apiEndpoints: 0,
674 |           },
675 |           files: ["src/App.tsx"],
676 |           functions: [],
677 |           classes: [],
678 |           interfaces: [],
679 |           types: [],
680 |           constants: [],
681 |           apiEndpoints: [],
682 |           imports: [],
683 |           exports: [],
684 |           frameworks: ["React"], // Indicate React is used
685 |         }),
686 |       }));
687 | 
688 |       const result = await detectDocumentationGaps({
689 |         repositoryPath: testRepoDir,
690 |         documentationPath: docsDir,
691 |         depth: "comprehensive",
692 |       });
693 | 
694 |       const data = JSON.parse(result.content[0].text);
695 | 
696 |       // Should detect missing React documentation
697 |       expect(data.gaps).toContainEqual(
698 |         expect.objectContaining({
699 |           category: "how-to",
700 |           gapType: "missing_section",
701 |           description: expect.stringContaining("React framework detected"),
702 |           priority: "medium",
703 |         }),
704 |       );
705 |     });
706 | 
707 |     it("should detect missing Express framework documentation", async () => {
708 |       const docsDir = path.join(testRepoDir, "docs");
709 |       await fs.mkdir(docsDir);
710 |       await createTestFile(path.join(docsDir, "index.md"), "# Documentation");
711 | 
712 |       // Mock CodeScanner to return Express framework
713 |       const { CodeScanner } = require("../../src/utils/code-scanner.js");
714 |       CodeScanner.mockImplementationOnce(() => ({
715 |         analyzeRepository: jest.fn().mockResolvedValue({
716 |           summary: {
717 |             totalFiles: 5,
718 |             parsedFiles: 3,
719 |             functions: 10,
720 |             classes: 2,
721 |             interfaces: 3,
722 |             types: 1,
723 |             constants: 2,
724 |             apiEndpoints: 0,
725 |           },
726 |           files: ["src/server.ts"],
727 |           functions: [],
728 |           classes: [],
729 |           interfaces: [],
730 |           types: [],
731 |           constants: [],
732 |           apiEndpoints: [],
733 |           imports: [],
734 |           exports: [],
735 |           frameworks: ["Express"], // Indicate Express is used
736 |         }),
737 |       }));
738 | 
739 |       const result = await detectDocumentationGaps({
740 |         repositoryPath: testRepoDir,
741 |         documentationPath: docsDir,
742 |         depth: "comprehensive",
743 |       });
744 | 
745 |       const data = JSON.parse(result.content[0].text);
746 | 
747 |       // Should detect missing Express documentation
748 |       expect(data.gaps).toContainEqual(
749 |         expect.objectContaining({
750 |           category: "how-to",
751 |           gapType: "missing_section",
752 |           description: expect.stringContaining("Express framework detected"),
753 |           priority: "medium",
754 |         }),
755 |       );
756 |     });
757 |   });
758 | 
759 |   describe("input validation", () => {
760 |     it("should require repositoryPath", async () => {
761 |       await expect(detectDocumentationGaps({} as any)).rejects.toThrow();
762 |     });
763 | 
764 |     it("should handle invalid depth parameter", async () => {
765 |       await expect(
766 |         detectDocumentationGaps({
767 |           repositoryPath: testRepoDir,
768 |           depth: "invalid" as any,
769 |         }),
770 |       ).rejects.toThrow();
771 |     });
772 |   });
773 | });
774 | 
```

--------------------------------------------------------------------------------
/src/memory/index.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Memory System for DocuMCP
  3 |  * Provides persistent memory and learning capabilities
  4 |  */
  5 | 
  6 | export { JSONLStorage, type MemoryEntry } from "./storage.js";
  7 | export {
  8 |   MemoryManager,
  9 |   type MemoryContext,
 10 |   type MemorySearchOptions,
 11 | } from "./manager.js";
 12 | export {
 13 |   EnhancedMemoryManager,
 14 |   type EnhancedRecommendation,
 15 |   type IntelligentAnalysis,
 16 | } from "./enhanced-manager.js";
 17 | export {
 18 |   IncrementalLearningSystem,
 19 |   type ProjectFeatures,
 20 |   type LearningPattern,
 21 |   type LearningInsight,
 22 | } from "./learning.js";
 23 | export {
 24 |   KnowledgeGraph,
 25 |   type GraphNode,
 26 |   type GraphEdge,
 27 |   type GraphPath,
 28 |   type RecommendationPath,
 29 | } from "./knowledge-graph.js";
 30 | export {
 31 |   ContextualMemoryRetrieval,
 32 |   type RetrievalContext,
 33 |   type ContextualMatch,
 34 |   type RetrievalResult,
 35 | } from "./contextual-retrieval.js";
 36 | export {
 37 |   MultiAgentMemorySharing,
 38 |   type AgentIdentity,
 39 |   type SharedMemory,
 40 |   type CollaborativeInsight,
 41 | } from "./multi-agent-sharing.js";
 42 | export {
 43 |   MemoryPruningSystem,
 44 |   type PruningPolicy,
 45 |   type OptimizationMetrics,
 46 |   type PruningResult,
 47 | } from "./pruning.js";
 48 | export {
 49 |   TemporalMemoryAnalysis,
 50 |   type TemporalPattern,
 51 |   type TemporalMetrics,
 52 |   type PredictionResult,
 53 |   type TemporalInsight,
 54 | } from "./temporal-analysis.js";
 55 | export {
 56 |   MemoryVisualizationSystem,
 57 |   type VisualizationConfig,
 58 |   type ChartData,
 59 |   type DashboardData,
 60 |   type NetworkVisualization,
 61 | } from "./visualization.js";
 62 | export {
 63 |   MemoryExportImportSystem,
 64 |   type ExportOptions,
 65 |   type ImportOptions,
 66 |   type ExportResult,
 67 |   type ImportResult,
 68 |   type MigrationPlan,
 69 | } from "./export-import.js";
 70 | export {
 71 |   initializeMemory,
 72 |   rememberAnalysis,
 73 |   rememberRecommendation,
 74 |   rememberDeployment,
 75 |   rememberConfiguration,
 76 |   recallProjectHistory,
 77 |   getProjectInsights,
 78 |   getSimilarProjects,
 79 |   cleanupOldMemories,
 80 |   exportMemories,
 81 |   importMemories,
 82 |   getMemoryStatistics,
 83 |   getMemoryManager,
 84 |   handleMemoryRecall,
 85 |   handleMemoryIntelligentAnalysis,
 86 |   handleMemoryEnhancedRecommendation,
 87 | } from "./integration.js";
 88 | 
 89 | // Memory Tools for MCP
 90 | export const memoryTools = [
 91 |   {
 92 |     name: "memory_recall",
 93 |     description: "Recall memories about a project or topic",
 94 |     inputSchema: {
 95 |       type: "object",
 96 |       properties: {
 97 |         query: {
 98 |           type: "string",
 99 |           description: "Search query or project ID",
100 |         },
101 |         type: {
102 |           type: "string",
103 |           enum: [
104 |             "analysis",
105 |             "recommendation",
106 |             "deployment",
107 |             "configuration",
108 |             "interaction",
109 |             "all",
110 |           ],
111 |           description: "Type of memory to recall",
112 |         },
113 |         limit: {
114 |           type: "number",
115 |           description: "Maximum number of memories to return",
116 |           default: 10,
117 |         },
118 |       },
119 |       required: ["query"],
120 |     },
121 |   },
122 |   {
123 |     name: "memory_intelligent_analysis",
124 |     description:
125 |       "Get intelligent analysis with patterns, predictions, and recommendations",
126 |     inputSchema: {
127 |       type: "object",
128 |       properties: {
129 |         projectPath: {
130 |           type: "string",
131 |           description: "Path to the project for analysis",
132 |         },
133 |         baseAnalysis: {
134 |           type: "object",
135 |           description: "Base analysis data to enhance",
136 |         },
137 |       },
138 |       required: ["projectPath", "baseAnalysis"],
139 |     },
140 |   },
141 |   {
142 |     name: "memory_enhanced_recommendation",
143 |     description:
144 |       "Get enhanced recommendations using learning and knowledge graph",
145 |     inputSchema: {
146 |       type: "object",
147 |       properties: {
148 |         projectPath: {
149 |           type: "string",
150 |           description: "Path to the project",
151 |         },
152 |         baseRecommendation: {
153 |           type: "object",
154 |           description: "Base recommendation to enhance",
155 |         },
156 |         projectFeatures: {
157 |           type: "object",
158 |           properties: {
159 |             language: { type: "string" },
160 |             framework: { type: "string" },
161 |             size: { type: "string", enum: ["small", "medium", "large"] },
162 |             complexity: {
163 |               type: "string",
164 |               enum: ["simple", "moderate", "complex"],
165 |             },
166 |             hasTests: { type: "boolean" },
167 |             hasCI: { type: "boolean" },
168 |             hasDocs: { type: "boolean" },
169 |             isOpenSource: { type: "boolean" },
170 |           },
171 |           required: ["language"],
172 |         },
173 |       },
174 |       required: ["projectPath", "baseRecommendation", "projectFeatures"],
175 |     },
176 |   },
177 |   {
178 |     name: "memory_learning_stats",
179 |     description: "Get comprehensive learning and knowledge graph statistics",
180 |     inputSchema: {
181 |       type: "object",
182 |       properties: {
183 |         includeDetails: {
184 |           type: "boolean",
185 |           description: "Include detailed statistics",
186 |           default: true,
187 |         },
188 |       },
189 |     },
190 |   },
191 |   {
192 |     name: "memory_knowledge_graph",
193 |     description: "Query the knowledge graph for relationships and paths",
194 |     inputSchema: {
195 |       type: "object",
196 |       properties: {
197 |         query: {
198 |           type: "object",
199 |           properties: {
200 |             nodeTypes: {
201 |               type: "array",
202 |               items: { type: "string" },
203 |               description: "Filter by node types",
204 |             },
205 |             edgeTypes: {
206 |               type: "array",
207 |               items: { type: "string" },
208 |               description: "Filter by edge types",
209 |             },
210 |             startNode: {
211 |               type: "string",
212 |               description: "Starting node for path queries",
213 |             },
214 |             maxDepth: {
215 |               type: "number",
216 |               description: "Maximum path depth",
217 |               default: 3,
218 |             },
219 |           },
220 |         },
221 |       },
222 |       required: ["query"],
223 |     },
224 |   },
225 |   {
226 |     name: "memory_contextual_search",
227 |     description: "Perform contextual memory retrieval with intelligent ranking",
228 |     inputSchema: {
229 |       type: "object",
230 |       properties: {
231 |         query: {
232 |           type: "string",
233 |           description: "Search query",
234 |         },
235 |         context: {
236 |           type: "object",
237 |           properties: {
238 |             currentProject: {
239 |               type: "object",
240 |               properties: {
241 |                 path: { type: "string" },
242 |                 language: { type: "string" },
243 |                 framework: { type: "string" },
244 |                 size: { type: "string", enum: ["small", "medium", "large"] },
245 |               },
246 |             },
247 |             userIntent: {
248 |               type: "object",
249 |               properties: {
250 |                 action: {
251 |                   type: "string",
252 |                   enum: [
253 |                     "analyze",
254 |                     "recommend",
255 |                     "deploy",
256 |                     "troubleshoot",
257 |                     "learn",
258 |                   ],
259 |                 },
260 |                 urgency: { type: "string", enum: ["low", "medium", "high"] },
261 |                 experience: {
262 |                   type: "string",
263 |                   enum: ["novice", "intermediate", "expert"],
264 |                 },
265 |               },
266 |             },
267 |             temporalContext: {
268 |               type: "object",
269 |               properties: {
270 |                 recency: {
271 |                   type: "string",
272 |                   enum: ["recent", "all", "historical"],
273 |                 },
274 |                 timeRange: {
275 |                   type: "object",
276 |                   properties: {
277 |                     start: { type: "string" },
278 |                     end: { type: "string" },
279 |                   },
280 |                 },
281 |               },
282 |             },
283 |           },
284 |         },
285 |         options: {
286 |           type: "object",
287 |           properties: {
288 |             maxResults: { type: "number", default: 10 },
289 |             minRelevance: { type: "number", default: 0.3 },
290 |             includeReasoning: { type: "boolean", default: true },
291 |           },
292 |         },
293 |       },
294 |       required: ["query", "context"],
295 |     },
296 |   },
297 |   {
298 |     name: "memory_agent_network",
299 |     description: "Manage multi-agent memory sharing and collaboration",
300 |     inputSchema: {
301 |       type: "object",
302 |       properties: {
303 |         action: {
304 |           type: "string",
305 |           enum: [
306 |             "register_agent",
307 |             "share_memory",
308 |             "sync_request",
309 |             "get_insights",
310 |             "network_status",
311 |           ],
312 |           description: "Action to perform",
313 |         },
314 |         agentInfo: {
315 |           type: "object",
316 |           properties: {
317 |             name: { type: "string" },
318 |             capabilities: { type: "array", items: { type: "string" } },
319 |             specializations: { type: "array", items: { type: "string" } },
320 |             trustLevel: {
321 |               type: "string",
322 |               enum: ["low", "medium", "high", "trusted"],
323 |             },
324 |           },
325 |         },
326 |         memoryId: {
327 |           type: "string",
328 |           description: "Memory ID for sharing operations",
329 |         },
330 |         targetAgent: {
331 |           type: "string",
332 |           description: "Target agent for sync operations",
333 |         },
334 |         options: {
335 |           type: "object",
336 |           properties: {
337 |             anonymize: { type: "boolean", default: false },
338 |             requireValidation: { type: "boolean", default: false },
339 |           },
340 |         },
341 |       },
342 |       required: ["action"],
343 |     },
344 |   },
345 |   {
346 |     name: "memory_insights",
347 |     description: "Get insights and patterns from memory",
348 |     inputSchema: {
349 |       type: "object",
350 |       properties: {
351 |         projectId: {
352 |           type: "string",
353 |           description: "Project ID to analyze",
354 |         },
355 |         timeRange: {
356 |           type: "object",
357 |           properties: {
358 |             start: { type: "string", format: "date-time" },
359 |             end: { type: "string", format: "date-time" },
360 |           },
361 |           description: "Time range for analysis",
362 |         },
363 |       },
364 |     },
365 |   },
366 |   {
367 |     name: "memory_similar",
368 |     description: "Find similar projects from memory",
369 |     inputSchema: {
370 |       type: "object",
371 |       properties: {
372 |         analysisId: {
373 |           type: "string",
374 |           description: "Analysis ID to find similar projects for",
375 |         },
376 |         limit: {
377 |           type: "number",
378 |           description: "Maximum number of similar projects",
379 |           default: 5,
380 |         },
381 |       },
382 |       required: ["analysisId"],
383 |     },
384 |   },
385 |   {
386 |     name: "memory_export",
387 |     description: "Export memories to JSON or CSV",
388 |     inputSchema: {
389 |       type: "object",
390 |       properties: {
391 |         format: {
392 |           type: "string",
393 |           enum: ["json", "csv"],
394 |           description: "Export format",
395 |           default: "json",
396 |         },
397 |         filter: {
398 |           type: "object",
399 |           properties: {
400 |             type: { type: "string" },
401 |             projectId: { type: "string" },
402 |             startDate: { type: "string", format: "date-time" },
403 |             endDate: { type: "string", format: "date-time" },
404 |           },
405 |           description: "Filter memories to export",
406 |         },
407 |       },
408 |     },
409 |   },
410 |   {
411 |     name: "memory_cleanup",
412 |     description: "Clean up old memories",
413 |     inputSchema: {
414 |       type: "object",
415 |       properties: {
416 |         daysToKeep: {
417 |           type: "number",
418 |           description: "Number of days of memories to keep",
419 |           default: 30,
420 |         },
421 |         dryRun: {
422 |           type: "boolean",
423 |           description:
424 |             "Preview what would be deleted without actually deleting",
425 |           default: false,
426 |         },
427 |       },
428 |     },
429 |   },
430 |   {
431 |     name: "memory_pruning",
432 |     description: "Intelligent memory pruning and optimization",
433 |     inputSchema: {
434 |       type: "object",
435 |       properties: {
436 |         policy: {
437 |           type: "object",
438 |           properties: {
439 |             maxAge: {
440 |               type: "number",
441 |               description: "Maximum age in days",
442 |               default: 180,
443 |             },
444 |             maxSize: {
445 |               type: "number",
446 |               description: "Maximum storage size in MB",
447 |               default: 500,
448 |             },
449 |             maxEntries: {
450 |               type: "number",
451 |               description: "Maximum number of entries",
452 |               default: 50000,
453 |             },
454 |             preservePatterns: {
455 |               type: "array",
456 |               items: { type: "string" },
457 |               description: "Pattern types to preserve",
458 |             },
459 |             compressionThreshold: {
460 |               type: "number",
461 |               description: "Compress entries older than X days",
462 |               default: 30,
463 |             },
464 |             redundancyThreshold: {
465 |               type: "number",
466 |               description: "Remove similar entries with similarity > X",
467 |               default: 0.85,
468 |             },
469 |           },
470 |         },
471 |         dryRun: {
472 |           type: "boolean",
473 |           description: "Preview pruning without executing",
474 |           default: false,
475 |         },
476 |       },
477 |     },
478 |   },
479 |   {
480 |     name: "memory_temporal_analysis",
481 |     description: "Analyze temporal patterns and trends in memory data",
482 |     inputSchema: {
483 |       type: "object",
484 |       properties: {
485 |         query: {
486 |           type: "object",
487 |           properties: {
488 |             timeRange: {
489 |               type: "object",
490 |               properties: {
491 |                 start: { type: "string", format: "date-time" },
492 |                 end: { type: "string", format: "date-time" },
493 |               },
494 |             },
495 |             granularity: {
496 |               type: "string",
497 |               enum: ["hour", "day", "week", "month", "year"],
498 |               default: "day",
499 |             },
500 |             aggregation: {
501 |               type: "string",
502 |               enum: ["count", "success_rate", "activity_level", "diversity"],
503 |               default: "count",
504 |             },
505 |             filters: {
506 |               type: "object",
507 |               properties: {
508 |                 types: { type: "array", items: { type: "string" } },
509 |                 projects: { type: "array", items: { type: "string" } },
510 |                 outcomes: { type: "array", items: { type: "string" } },
511 |                 tags: { type: "array", items: { type: "string" } },
512 |               },
513 |             },
514 |           },
515 |         },
516 |         analysisType: {
517 |           type: "string",
518 |           enum: ["patterns", "metrics", "predictions", "insights"],
519 |           default: "patterns",
520 |         },
521 |       },
522 |     },
523 |   },
524 |   {
525 |     name: "memory_visualization",
526 |     description: "Generate visual representations of memory data",
527 |     inputSchema: {
528 |       type: "object",
529 |       properties: {
530 |         visualizationType: {
531 |           type: "string",
532 |           enum: [
533 |             "dashboard",
534 |             "timeline",
535 |             "network",
536 |             "heatmap",
537 |             "distribution",
538 |             "trends",
539 |             "custom",
540 |           ],
541 |           default: "dashboard",
542 |         },
543 |         options: {
544 |           type: "object",
545 |           properties: {
546 |             timeRange: {
547 |               type: "object",
548 |               properties: {
549 |                 start: { type: "string", format: "date-time" },
550 |                 end: { type: "string", format: "date-time" },
551 |               },
552 |             },
553 |             includeCharts: { type: "array", items: { type: "string" } },
554 |             config: {
555 |               type: "object",
556 |               properties: {
557 |                 width: { type: "number", default: 800 },
558 |                 height: { type: "number", default: 600 },
559 |                 theme: {
560 |                   type: "string",
561 |                   enum: ["light", "dark", "auto"],
562 |                   default: "light",
563 |                 },
564 |                 exportFormat: {
565 |                   type: "string",
566 |                   enum: ["svg", "png", "json", "html"],
567 |                   default: "svg",
568 |                 },
569 |                 interactive: { type: "boolean", default: true },
570 |               },
571 |             },
572 |           },
573 |         },
574 |         customVisualization: {
575 |           type: "object",
576 |           properties: {
577 |             type: {
578 |               type: "string",
579 |               enum: [
580 |                 "line",
581 |                 "bar",
582 |                 "scatter",
583 |                 "heatmap",
584 |                 "network",
585 |                 "sankey",
586 |                 "treemap",
587 |                 "timeline",
588 |               ],
589 |             },
590 |             query: {
591 |               type: "object",
592 |               properties: {
593 |                 filters: { type: "object" },
594 |                 groupBy: { type: "string" },
595 |                 aggregation: { type: "string" },
596 |               },
597 |             },
598 |           },
599 |         },
600 |       },
601 |     },
602 |   },
603 |   {
604 |     name: "memory_export_advanced",
605 |     description: "Advanced memory export with multiple formats and options",
606 |     inputSchema: {
607 |       type: "object",
608 |       properties: {
609 |         outputPath: { type: "string", description: "Output file path" },
610 |         options: {
611 |           type: "object",
612 |           properties: {
613 |             format: {
614 |               type: "string",
615 |               enum: [
616 |                 "json",
617 |                 "jsonl",
618 |                 "csv",
619 |                 "xml",
620 |                 "yaml",
621 |                 "sqlite",
622 |                 "archive",
623 |               ],
624 |               default: "json",
625 |             },
626 |             compression: {
627 |               type: "string",
628 |               enum: ["gzip", "zip", "none"],
629 |               default: "none",
630 |             },
631 |             includeMetadata: { type: "boolean", default: true },
632 |             includeLearning: { type: "boolean", default: true },
633 |             includeKnowledgeGraph: { type: "boolean", default: true },
634 |             filters: {
635 |               type: "object",
636 |               properties: {
637 |                 types: { type: "array", items: { type: "string" } },
638 |                 dateRange: {
639 |                   type: "object",
640 |                   properties: {
641 |                     start: { type: "string", format: "date-time" },
642 |                     end: { type: "string", format: "date-time" },
643 |                   },
644 |                 },
645 |                 projects: { type: "array", items: { type: "string" } },
646 |                 tags: { type: "array", items: { type: "string" } },
647 |                 outcomes: { type: "array", items: { type: "string" } },
648 |               },
649 |             },
650 |             anonymize: {
651 |               type: "object",
652 |               properties: {
653 |                 enabled: { type: "boolean", default: false },
654 |                 fields: { type: "array", items: { type: "string" } },
655 |                 method: {
656 |                   type: "string",
657 |                   enum: ["hash", "remove", "pseudonymize"],
658 |                   default: "hash",
659 |                 },
660 |               },
661 |             },
662 |             encryption: {
663 |               type: "object",
664 |               properties: {
665 |                 enabled: { type: "boolean", default: false },
666 |                 algorithm: {
667 |                   type: "string",
668 |                   enum: ["aes-256-gcm", "aes-192-gcm", "aes-128-gcm"],
669 |                   default: "aes-256-gcm",
670 |                 },
671 |                 password: { type: "string" },
672 |               },
673 |             },
674 |           },
675 |         },
676 |       },
677 |       required: ["outputPath"],
678 |     },
679 |   },
680 |   {
681 |     name: "memory_import_advanced",
682 |     description:
683 |       "Advanced memory import with validation and conflict resolution",
684 |     inputSchema: {
685 |       type: "object",
686 |       properties: {
687 |         inputPath: { type: "string", description: "Input file path" },
688 |         options: {
689 |           type: "object",
690 |           properties: {
691 |             format: {
692 |               type: "string",
693 |               enum: [
694 |                 "json",
695 |                 "jsonl",
696 |                 "csv",
697 |                 "xml",
698 |                 "yaml",
699 |                 "sqlite",
700 |                 "archive",
701 |               ],
702 |               default: "json",
703 |             },
704 |             mode: {
705 |               type: "string",
706 |               enum: ["merge", "replace", "append", "update"],
707 |               default: "merge",
708 |             },
709 |             validation: {
710 |               type: "string",
711 |               enum: ["strict", "loose", "none"],
712 |               default: "strict",
713 |             },
714 |             conflictResolution: {
715 |               type: "string",
716 |               enum: ["skip", "overwrite", "merge", "rename"],
717 |               default: "skip",
718 |             },
719 |             backup: { type: "boolean", default: true },
720 |             dryRun: { type: "boolean", default: false },
721 |             mapping: {
722 |               type: "object",
723 |               description: "Field mapping for different schemas",
724 |             },
725 |             transformation: {
726 |               type: "object",
727 |               properties: {
728 |                 enabled: { type: "boolean", default: false },
729 |                 rules: {
730 |                   type: "array",
731 |                   items: {
732 |                     type: "object",
733 |                     properties: {
734 |                       field: { type: "string" },
735 |                       operation: {
736 |                         type: "string",
737 |                         enum: ["convert", "transform", "validate"],
738 |                       },
739 |                       params: { type: "object" },
740 |                     },
741 |                   },
742 |                 },
743 |               },
744 |             },
745 |           },
746 |         },
747 |       },
748 |       required: ["inputPath"],
749 |     },
750 |   },
751 |   {
752 |     name: "memory_migration",
753 |     description:
754 |       "Create and execute migration plans between different memory systems",
755 |     inputSchema: {
756 |       type: "object",
757 |       properties: {
758 |         action: {
759 |           type: "string",
760 |           enum: ["create_plan", "execute_migration", "validate_compatibility"],
761 |           default: "create_plan",
762 |         },
763 |         sourcePath: { type: "string", description: "Source data path" },
764 |         migrationPlan: {
765 |           type: "object",
766 |           properties: {
767 |             sourceSystem: { type: "string" },
768 |             targetSystem: { type: "string", default: "DocuMCP" },
769 |             mapping: { type: "object" },
770 |             transformations: { type: "array" },
771 |             validation: { type: "array" },
772 |             postProcessing: { type: "array", items: { type: "string" } },
773 |           },
774 |         },
775 |         sourceSchema: { type: "object", description: "Source system schema" },
776 |         targetSchema: { type: "object", description: "Target system schema" },
777 |         options: {
778 |           type: "object",
779 |           properties: {
780 |             autoMap: { type: "boolean", default: true },
781 |             preserveStructure: { type: "boolean", default: true },
782 |             customMappings: { type: "object" },
783 |           },
784 |         },
785 |       },
786 |     },
787 |   },
788 |   {
789 |     name: "memory_optimization_metrics",
790 |     description: "Get comprehensive optimization metrics and recommendations",
791 |     inputSchema: {
792 |       type: "object",
793 |       properties: {
794 |         includeRecommendations: { type: "boolean", default: true },
795 |         timeRange: {
796 |           type: "object",
797 |           properties: {
798 |             start: { type: "string", format: "date-time" },
799 |             end: { type: "string", format: "date-time" },
800 |           },
801 |         },
802 |       },
803 |     },
804 |   },
805 | ];
806 | 
```
Page 18/33FirstPrevNextLast