#
tokens: 45159/50000 7/274 files (page 13/29)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 13 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── 001-mcp-server-architecture.md
│   │   ├── 002-repository-analysis-engine.md
│   │   ├── 003-static-site-generator-recommendation-engine.md
│   │   ├── 004-diataxis-framework-integration.md
│   │   ├── 005-github-pages-deployment-automation.md
│   │   ├── 006-mcp-tools-api-design.md
│   │   ├── 007-mcp-prompts-and-resources-integration.md
│   │   ├── 008-intelligent-content-population-engine.md
│   │   ├── 009-content-accuracy-validation-framework.md
│   │   ├── 010-mcp-resource-pattern-redesign.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── check-documentation-links.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── ast-analyzer.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── permission-checker.ts
│   │   └── sitemap-generator.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/tests/tools/track-documentation-freshness.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Integration Tests for track_documentation_freshness Tool
  3 |  */
  4 | 
  5 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
  6 | import fs from "fs/promises";
  7 | import path from "path";
  8 | import os from "os";
  9 | import {
 10 |   trackDocumentationFreshness,
 11 |   type TrackDocumentationFreshnessInput,
 12 | } from "../../src/tools/track-documentation-freshness.js";
 13 | 
 14 | // Example git SHA for testing
 15 | const SHA_EXAMPLE = "a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0";
 16 | 
 17 | describe("track_documentation_freshness Tool", () => {
 18 |   let tempDir: string;
 19 | 
 20 |   beforeEach(async () => {
 21 |     tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "track-freshness-test-"));
 22 |   });
 23 | 
 24 |   afterEach(async () => {
 25 |     await fs.rm(tempDir, { recursive: true, force: true });
 26 |   });
 27 | 
 28 |   describe("Basic Functionality", () => {
 29 |     it("should track freshness with preset thresholds", async () => {
 30 |       const docsPath = path.join(tempDir, "docs");
 31 |       await fs.mkdir(docsPath);
 32 | 
 33 |       // Create test files
 34 |       const now = Date.now();
 35 |       await fs.writeFile(
 36 |         path.join(docsPath, "fresh.md"),
 37 |         `---
 38 | documcp:
 39 |   last_updated: "${new Date(now - 1 * 24 * 60 * 60 * 1000).toISOString()}"
 40 | ---
 41 | # Fresh Doc`,
 42 |       );
 43 | 
 44 |       await fs.writeFile(
 45 |         path.join(docsPath, "old.md"),
 46 |         `---
 47 | documcp:
 48 |   last_updated: "${new Date(now - 60 * 24 * 60 * 60 * 1000).toISOString()}"
 49 | ---
 50 | # Old Doc`,
 51 |       );
 52 | 
 53 |       const input: TrackDocumentationFreshnessInput = {
 54 |         docsPath,
 55 |         preset: "monthly",
 56 |       };
 57 | 
 58 |       const result = await trackDocumentationFreshness(input);
 59 | 
 60 |       expect(result.success).toBe(true);
 61 |       expect(result.data).toBeDefined();
 62 |       expect(result.data.report.totalFiles).toBe(2);
 63 |       expect(result.data.report.freshFiles).toBeGreaterThan(0);
 64 |       expect(result.metadata.executionTime).toBeGreaterThan(0);
 65 |     });
 66 | 
 67 |     it("should track freshness with custom thresholds", async () => {
 68 |       const docsPath = path.join(tempDir, "docs");
 69 |       await fs.mkdir(docsPath);
 70 | 
 71 |       await fs.writeFile(
 72 |         path.join(docsPath, "test.md"),
 73 |         `---
 74 | documcp:
 75 |   last_updated: "${new Date(Date.now() - 45 * 60 * 1000).toISOString()}"
 76 | ---
 77 | # Test`,
 78 |       );
 79 | 
 80 |       const input: TrackDocumentationFreshnessInput = {
 81 |         docsPath,
 82 |         warningThreshold: { value: 30, unit: "minutes" },
 83 |         staleThreshold: { value: 1, unit: "hours" },
 84 |         criticalThreshold: { value: 2, unit: "hours" },
 85 |       };
 86 | 
 87 |       const result = await trackDocumentationFreshness(input);
 88 | 
 89 |       expect(result.success).toBe(true);
 90 |       expect(result.data.report).toBeDefined();
 91 |       expect(result.data.thresholds.warning).toEqual({
 92 |         value: 30,
 93 |         unit: "minutes",
 94 |       });
 95 |     });
 96 | 
 97 |     it("should identify files without metadata", async () => {
 98 |       const docsPath = path.join(tempDir, "docs");
 99 |       await fs.mkdir(docsPath);
100 | 
101 |       await fs.writeFile(
102 |         path.join(docsPath, "no-metadata.md"),
103 |         "# No Metadata",
104 |       );
105 | 
106 |       const input: TrackDocumentationFreshnessInput = {
107 |         docsPath,
108 |         preset: "monthly",
109 |       };
110 | 
111 |       const result = await trackDocumentationFreshness(input);
112 | 
113 |       expect(result.success).toBe(true);
114 |       expect(result.data.report.filesWithoutMetadata).toBe(1);
115 |       expect(result.data.report.totalFiles).toBe(1);
116 |     });
117 |   });
118 | 
119 |   describe("Staleness Levels", () => {
120 |     it("should correctly categorize fresh files", async () => {
121 |       const docsPath = path.join(tempDir, "docs");
122 |       await fs.mkdir(docsPath);
123 | 
124 |       await fs.writeFile(
125 |         path.join(docsPath, "fresh.md"),
126 |         `---
127 | documcp:
128 |   last_updated: "${new Date(
129 |     Date.now() - 2 * 24 * 60 * 60 * 1000,
130 |   ).toISOString()}"
131 | ---
132 | # Fresh`,
133 |       );
134 | 
135 |       const input: TrackDocumentationFreshnessInput = {
136 |         docsPath,
137 |         preset: "monthly",
138 |       };
139 | 
140 |       const result = await trackDocumentationFreshness(input);
141 | 
142 |       expect(result.data.report.freshFiles).toBe(1);
143 |       expect(result.data.report.staleFiles).toBe(0);
144 |       expect(result.data.report.criticalFiles).toBe(0);
145 |     });
146 | 
147 |     it("should correctly categorize stale files", async () => {
148 |       const docsPath = path.join(tempDir, "docs");
149 |       await fs.mkdir(docsPath);
150 | 
151 |       await fs.writeFile(
152 |         path.join(docsPath, "stale.md"),
153 |         `---
154 | documcp:
155 |   last_updated: "${new Date(
156 |     Date.now() - 70 * 24 * 60 * 60 * 1000,
157 |   ).toISOString()}"
158 | ---
159 | # Stale`,
160 |       );
161 | 
162 |       const input: TrackDocumentationFreshnessInput = {
163 |         docsPath,
164 |         preset: "monthly",
165 |       };
166 | 
167 |       const result = await trackDocumentationFreshness(input);
168 | 
169 |       expect(result.data.report.staleFiles).toBeGreaterThan(0);
170 |     });
171 | 
172 |     it("should correctly categorize critical files", async () => {
173 |       const docsPath = path.join(tempDir, "docs");
174 |       await fs.mkdir(docsPath);
175 | 
176 |       await fs.writeFile(
177 |         path.join(docsPath, "critical.md"),
178 |         `---
179 | documcp:
180 |   last_updated: "${new Date(
181 |     Date.now() - 100 * 24 * 60 * 60 * 1000,
182 |   ).toISOString()}"
183 | ---
184 | # Critical`,
185 |       );
186 | 
187 |       const input: TrackDocumentationFreshnessInput = {
188 |         docsPath,
189 |         preset: "monthly",
190 |       };
191 | 
192 |       const result = await trackDocumentationFreshness(input);
193 | 
194 |       expect(result.data.report.criticalFiles).toBe(1);
195 |     });
196 |   });
197 | 
198 |   describe("File Listing Options", () => {
199 |     it("should include file list when requested", async () => {
200 |       const docsPath = path.join(tempDir, "docs");
201 |       await fs.mkdir(docsPath);
202 | 
203 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
204 | 
205 |       const input: TrackDocumentationFreshnessInput = {
206 |         docsPath,
207 |         preset: "monthly",
208 |         includeFileList: true,
209 |       };
210 | 
211 |       const result = await trackDocumentationFreshness(input);
212 | 
213 |       expect(result.data.report.files).toBeDefined();
214 |       expect(result.data.report.files.length).toBe(1);
215 |       expect(result.data.formattedReport).toContain("File Details");
216 |     });
217 | 
218 |     it("should exclude file list when not requested", async () => {
219 |       const docsPath = path.join(tempDir, "docs");
220 |       await fs.mkdir(docsPath);
221 | 
222 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
223 | 
224 |       const input: TrackDocumentationFreshnessInput = {
225 |         docsPath,
226 |         preset: "monthly",
227 |         includeFileList: false,
228 |       };
229 | 
230 |       const result = await trackDocumentationFreshness(input);
231 | 
232 |       expect(result.data.formattedReport).not.toContain("File Details");
233 |     });
234 |   });
235 | 
236 |   describe("Sorting Options", () => {
237 |     it("should sort files by staleness", async () => {
238 |       const docsPath = path.join(tempDir, "docs");
239 |       await fs.mkdir(docsPath);
240 | 
241 |       const now = Date.now();
242 |       await fs.writeFile(
243 |         path.join(docsPath, "fresh.md"),
244 |         `---
245 | documcp:
246 |   last_updated: "${new Date(now - 1 * 24 * 60 * 60 * 1000).toISOString()}"
247 | ---
248 | # Fresh`,
249 |       );
250 | 
251 |       await fs.writeFile(
252 |         path.join(docsPath, "stale.md"),
253 |         `---
254 | documcp:
255 |   last_updated: "${new Date(now - 60 * 24 * 60 * 60 * 1000).toISOString()}"
256 | ---
257 | # Stale`,
258 |       );
259 | 
260 |       const input: TrackDocumentationFreshnessInput = {
261 |         docsPath,
262 |         preset: "monthly",
263 |         sortBy: "staleness",
264 |       };
265 | 
266 |       const result = await trackDocumentationFreshness(input);
267 | 
268 |       expect(result.success).toBe(true);
269 |       // Stale files should appear first when sorted by staleness
270 |       const formattedReport = result.data.formattedReport;
271 |       const staleIndex = formattedReport.indexOf("stale.md");
272 |       const freshIndex = formattedReport.indexOf("fresh.md");
273 | 
274 |       if (staleIndex !== -1 && freshIndex !== -1) {
275 |         expect(staleIndex).toBeLessThan(freshIndex);
276 |       }
277 |     });
278 | 
279 |     it("should sort files by age", async () => {
280 |       const docsPath = path.join(tempDir, "docs");
281 |       await fs.mkdir(docsPath);
282 | 
283 |       const now = Date.now();
284 |       await fs.writeFile(
285 |         path.join(docsPath, "newer.md"),
286 |         `---
287 | documcp:
288 |   last_updated: "${new Date(now - 10 * 24 * 60 * 60 * 1000).toISOString()}"
289 | ---
290 | # Newer`,
291 |       );
292 | 
293 |       await fs.writeFile(
294 |         path.join(docsPath, "older.md"),
295 |         `---
296 | documcp:
297 |   last_updated: "${new Date(now - 50 * 24 * 60 * 60 * 1000).toISOString()}"
298 | ---
299 | # Older`,
300 |       );
301 | 
302 |       const input: TrackDocumentationFreshnessInput = {
303 |         docsPath,
304 |         preset: "monthly",
305 |         sortBy: "age",
306 |       };
307 | 
308 |       const result = await trackDocumentationFreshness(input);
309 | 
310 |       expect(result.success).toBe(true);
311 |     });
312 |   });
313 | 
314 |   describe("Nested Directories", () => {
315 |     it("should scan nested directories recursively", async () => {
316 |       const docsPath = path.join(tempDir, "docs");
317 |       await fs.mkdir(docsPath);
318 |       await fs.mkdir(path.join(docsPath, "api"));
319 |       await fs.mkdir(path.join(docsPath, "guides"));
320 | 
321 |       await fs.writeFile(path.join(docsPath, "index.md"), "# Index");
322 |       await fs.writeFile(path.join(docsPath, "api", "endpoints.md"), "# API");
323 |       await fs.writeFile(
324 |         path.join(docsPath, "guides", "tutorial.md"),
325 |         "# Guide",
326 |       );
327 | 
328 |       const input: TrackDocumentationFreshnessInput = {
329 |         docsPath,
330 |         preset: "monthly",
331 |       };
332 | 
333 |       const result = await trackDocumentationFreshness(input);
334 | 
335 |       expect(result.data.report.totalFiles).toBe(3);
336 |     });
337 | 
338 |     it("should skip common ignored directories", async () => {
339 |       const docsPath = path.join(tempDir, "docs");
340 |       await fs.mkdir(docsPath);
341 |       await fs.mkdir(path.join(docsPath, "node_modules"));
342 |       await fs.mkdir(path.join(docsPath, ".git"));
343 | 
344 |       await fs.writeFile(path.join(docsPath, "index.md"), "# Index");
345 |       await fs.writeFile(
346 |         path.join(docsPath, "node_modules", "skip.md"),
347 |         "# Skip",
348 |       );
349 |       await fs.writeFile(path.join(docsPath, ".git", "skip.md"), "# Skip");
350 | 
351 |       const input: TrackDocumentationFreshnessInput = {
352 |         docsPath,
353 |         preset: "monthly",
354 |       };
355 | 
356 |       const result = await trackDocumentationFreshness(input);
357 | 
358 |       expect(result.data.report.totalFiles).toBe(1);
359 |     });
360 |   });
361 | 
362 |   describe("Error Handling", () => {
363 |     it("should handle non-existent directory", async () => {
364 |       const input: TrackDocumentationFreshnessInput = {
365 |         docsPath: "/nonexistent/path",
366 |         preset: "monthly",
367 |       };
368 | 
369 |       const result = await trackDocumentationFreshness(input);
370 | 
371 |       expect(result.success).toBe(false);
372 |       expect(result.error).toBeDefined();
373 |       expect(result.error?.code).toBe("FRESHNESS_TRACKING_FAILED");
374 |     });
375 | 
376 |     it("should handle empty directory", async () => {
377 |       const docsPath = path.join(tempDir, "empty-docs");
378 |       await fs.mkdir(docsPath);
379 | 
380 |       const input: TrackDocumentationFreshnessInput = {
381 |         docsPath,
382 |         preset: "monthly",
383 |       };
384 | 
385 |       const result = await trackDocumentationFreshness(input);
386 | 
387 |       expect(result.success).toBe(true);
388 |       expect(result.data.report.totalFiles).toBe(0);
389 |     });
390 |   });
391 | 
392 |   describe("Preset Thresholds", () => {
393 |     const presets: Array<
394 |       keyof typeof import("../../src/utils/freshness-tracker.js").STALENESS_PRESETS
395 |     > = ["realtime", "active", "recent", "weekly", "monthly", "quarterly"];
396 | 
397 |     presets.forEach((preset) => {
398 |       it(`should work with ${preset} preset`, async () => {
399 |         const docsPath = path.join(tempDir, `docs-${preset}`);
400 |         await fs.mkdir(docsPath);
401 | 
402 |         await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
403 | 
404 |         const input: TrackDocumentationFreshnessInput = {
405 |           docsPath,
406 |           preset,
407 |         };
408 | 
409 |         const result = await trackDocumentationFreshness(input);
410 | 
411 |         expect(result.success).toBe(true);
412 |         expect(result.data.thresholds).toBeDefined();
413 |       });
414 |     });
415 |   });
416 | 
417 |   describe("Output Format", () => {
418 |     it("should include formatted report in response", async () => {
419 |       const docsPath = path.join(tempDir, "docs");
420 |       await fs.mkdir(docsPath);
421 | 
422 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
423 | 
424 |       const input: TrackDocumentationFreshnessInput = {
425 |         docsPath,
426 |         preset: "monthly",
427 |       };
428 | 
429 |       const result = await trackDocumentationFreshness(input);
430 | 
431 |       expect(result.data.formattedReport).toBeDefined();
432 |       expect(result.data.formattedReport).toContain(
433 |         "Documentation Freshness Report",
434 |       );
435 |       expect(result.data.formattedReport).toContain("Summary Statistics");
436 |       expect(result.data.formattedReport).toContain("Freshness Breakdown");
437 |     });
438 | 
439 |     it("should include summary in response", async () => {
440 |       const docsPath = path.join(tempDir, "docs");
441 |       await fs.mkdir(docsPath);
442 | 
443 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
444 | 
445 |       const input: TrackDocumentationFreshnessInput = {
446 |         docsPath,
447 |         preset: "monthly",
448 |       };
449 | 
450 |       const result = await trackDocumentationFreshness(input);
451 | 
452 |       expect(result.data.summary).toBeDefined();
453 |       expect(result.data.summary).toContain("Scanned");
454 |       expect(result.data.summary).toContain("files");
455 |     });
456 | 
457 |     it("should include metadata in response", async () => {
458 |       const docsPath = path.join(tempDir, "docs");
459 |       await fs.mkdir(docsPath);
460 | 
461 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
462 | 
463 |       const input: TrackDocumentationFreshnessInput = {
464 |         docsPath,
465 |         preset: "monthly",
466 |       };
467 | 
468 |       const result = await trackDocumentationFreshness(input);
469 | 
470 |       expect(result.metadata).toBeDefined();
471 |       expect(result.metadata.toolVersion).toBe("1.0.0");
472 |       expect(result.metadata.timestamp).toBeDefined();
473 |       expect(result.metadata.executionTime).toBeGreaterThanOrEqual(0);
474 |     });
475 | 
476 |     it("should handle KG storage disabled", async () => {
477 |       const docsPath = path.join(tempDir, "docs");
478 |       const projectPath = tempDir;
479 |       await fs.mkdir(docsPath);
480 | 
481 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
482 | 
483 |       const input: TrackDocumentationFreshnessInput = {
484 |         docsPath,
485 |         projectPath,
486 |         preset: "monthly",
487 |         storeInKG: false,
488 |       };
489 | 
490 |       const result = await trackDocumentationFreshness(input);
491 | 
492 |       expect(result.success).toBe(true);
493 |       expect(result.data.kgInsights).toBeUndefined();
494 |     });
495 | 
496 |     it("should handle projectPath without KG storage", async () => {
497 |       const docsPath = path.join(tempDir, "docs");
498 |       await fs.mkdir(docsPath);
499 | 
500 |       await fs.writeFile(path.join(docsPath, "test.md"), "# Test");
501 | 
502 |       const input: TrackDocumentationFreshnessInput = {
503 |         docsPath,
504 |         // No projectPath provided
505 |         preset: "monthly",
506 |         storeInKG: true, // Won't store because projectPath is missing
507 |       };
508 | 
509 |       const result = await trackDocumentationFreshness(input);
510 | 
511 |       expect(result.success).toBe(true);
512 |     });
513 | 
514 |     it("should handle error gracefully", async () => {
515 |       const input: TrackDocumentationFreshnessInput = {
516 |         docsPath: "/nonexistent/path/that/does/not/exist",
517 |         preset: "monthly",
518 |       };
519 | 
520 |       const result = await trackDocumentationFreshness(input);
521 | 
522 |       expect(result.success).toBe(false);
523 |       expect(result.error).toBeDefined();
524 |       expect(result.error?.code).toBe("FRESHNESS_TRACKING_FAILED");
525 |       expect(result.metadata).toBeDefined();
526 |     });
527 | 
528 |     it("should sort files by age", async () => {
529 |       const docsPath = path.join(tempDir, "docs");
530 |       await fs.mkdir(docsPath);
531 | 
532 |       const now = Date.now();
533 |       await fs.writeFile(
534 |         path.join(docsPath, "newer.md"),
535 |         `---
536 | documcp:
537 |   last_updated: "${new Date(now - 1 * 24 * 60 * 60 * 1000).toISOString()}"
538 | ---
539 | # Newer`,
540 |       );
541 | 
542 |       await fs.writeFile(
543 |         path.join(docsPath, "older.md"),
544 |         `---
545 | documcp:
546 |   last_updated: "${new Date(now - 10 * 24 * 60 * 60 * 1000).toISOString()}"
547 | ---
548 | # Older`,
549 |       );
550 | 
551 |       const input: TrackDocumentationFreshnessInput = {
552 |         docsPath,
553 |         preset: "monthly",
554 |         sortBy: "age",
555 |       };
556 | 
557 |       const result = await trackDocumentationFreshness(input);
558 | 
559 |       expect(result.success).toBe(true);
560 |       expect(result.data.report.files.length).toBe(2);
561 |     });
562 | 
563 |     it("should sort files by path", async () => {
564 |       const docsPath = path.join(tempDir, "docs");
565 |       await fs.mkdir(docsPath);
566 | 
567 |       await fs.writeFile(path.join(docsPath, "z.md"), "# Z");
568 |       await fs.writeFile(path.join(docsPath, "a.md"), "# A");
569 | 
570 |       const input: TrackDocumentationFreshnessInput = {
571 |         docsPath,
572 |         preset: "monthly",
573 |         sortBy: "path",
574 |       };
575 | 
576 |       const result = await trackDocumentationFreshness(input);
577 | 
578 |       expect(result.success).toBe(true);
579 |     });
580 | 
581 |     it("should display commit hash for files validated against commits", async () => {
582 |       const docsPath = path.join(tempDir, "docs");
583 |       const projectPath = tempDir;
584 |       await fs.mkdir(docsPath);
585 | 
586 |       // Create file with validated_against_commit metadata
587 |       const fileContent = `---
588 | documcp:
589 |   last_updated: ${new Date().toISOString()}
590 |   last_validated: ${new Date().toISOString()}
591 |   validated_against_commit: ${SHA_EXAMPLE}
592 | ---
593 | # Test Document
594 | Content`;
595 | 
596 |       await fs.writeFile(path.join(docsPath, "test.md"), fileContent);
597 | 
598 |       const input: TrackDocumentationFreshnessInput = {
599 |         docsPath,
600 |         projectPath,
601 |         preset: "monthly",
602 |         includeFileList: true,
603 |       };
604 | 
605 |       const result = await trackDocumentationFreshness(input);
606 |       expect(result.success).toBe(true);
607 |       expect(result.data.formattedReport).toContain(
608 |         SHA_EXAMPLE.substring(0, 7),
609 |       );
610 |     });
611 | 
612 |     it("should format warning recommendations correctly", async () => {
613 |       const docsPath = path.join(tempDir, "docs");
614 |       const projectPath = tempDir;
615 |       await fs.mkdir(docsPath);
616 | 
617 |       // Create a file with warning-level staleness
618 |       const warnDate = new Date();
619 |       warnDate.setDate(warnDate.getDate() - 45); // 45 days ago (monthly preset: warning=30d, stale=60d, critical=90d)
620 | 
621 |       const fileContent = `---
622 | documcp:
623 |   last_updated: ${warnDate.toISOString()}
624 |   last_validated: ${warnDate.toISOString()}
625 | ---
626 | # Test Document`;
627 | 
628 |       await fs.writeFile(path.join(docsPath, "warn.md"), fileContent);
629 | 
630 |       const input: TrackDocumentationFreshnessInput = {
631 |         docsPath,
632 |         projectPath,
633 |         preset: "monthly",
634 |         storeInKG: true,
635 |       };
636 | 
637 |       const result = await trackDocumentationFreshness(input);
638 |       expect(result.success).toBe(true);
639 |       expect(result.data.report.warningFiles).toBeGreaterThan(0);
640 |     });
641 | 
642 |     it("should format critical recommendations correctly", async () => {
643 |       const docsPath = path.join(tempDir, "docs");
644 |       const projectPath = tempDir;
645 |       await fs.mkdir(docsPath);
646 | 
647 |       // Create a file with critical-level staleness
648 |       const criticalDate = new Date();
649 |       criticalDate.setDate(criticalDate.getDate() - 100); // 100 days ago (critical for monthly preset)
650 | 
651 |       const fileContent = `---
652 | documcp:
653 |   last_updated: ${criticalDate.toISOString()}
654 |   last_validated: ${criticalDate.toISOString()}
655 | ---
656 | # Old Document`;
657 | 
658 |       await fs.writeFile(path.join(docsPath, "critical.md"), fileContent);
659 | 
660 |       const input: TrackDocumentationFreshnessInput = {
661 |         docsPath,
662 |         projectPath,
663 |         preset: "monthly",
664 |         storeInKG: true,
665 |       };
666 | 
667 |       const result = await trackDocumentationFreshness(input);
668 |       expect(result.success).toBe(true);
669 |       expect(result.data.report.criticalFiles).toBeGreaterThan(0);
670 |     });
671 |   });
672 | });
673 | 
```

--------------------------------------------------------------------------------
/tests/utils/ast-analyzer.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * AST Analyzer Tests (Phase 3)
  3 |  */
  4 | 
  5 | import {
  6 |   ASTAnalyzer,
  7 |   FunctionSignature,
  8 |   ClassInfo,
  9 | } from "../../src/utils/ast-analyzer.js";
 10 | import { promises as fs } from "fs";
 11 | import { tmpdir } from "os";
 12 | import { join } from "path";
 13 | import { mkdtemp, rm } from "fs/promises";
 14 | 
 15 | describe("ASTAnalyzer", () => {
 16 |   let analyzer: ASTAnalyzer;
 17 |   let tempDir: string;
 18 | 
 19 |   beforeAll(async () => {
 20 |     analyzer = new ASTAnalyzer();
 21 |     await analyzer.initialize();
 22 |     tempDir = await mkdtemp(join(tmpdir(), "ast-test-"));
 23 |   });
 24 | 
 25 |   afterAll(async () => {
 26 |     await rm(tempDir, { recursive: true, force: true });
 27 |   });
 28 | 
 29 |   describe("TypeScript/JavaScript Analysis", () => {
 30 |     test("should extract function declarations", async () => {
 31 |       const code = `
 32 | export async function testFunction(param1: string, param2: number): Promise<void> {
 33 |   console.log(param1, param2);
 34 | }
 35 | 
 36 | export function syncFunction(name: string): string {
 37 |   return name.toUpperCase();
 38 | }
 39 |       `.trim();
 40 | 
 41 |       const filePath = join(tempDir, "test-functions.ts");
 42 |       await fs.writeFile(filePath, code);
 43 | 
 44 |       const result = await analyzer.analyzeFile(filePath);
 45 | 
 46 |       expect(result).not.toBeNull();
 47 |       expect(result?.functions).toHaveLength(2);
 48 | 
 49 |       const asyncFunc = result?.functions.find(
 50 |         (f) => f.name === "testFunction",
 51 |       );
 52 |       expect(asyncFunc).toBeDefined();
 53 |       expect(asyncFunc?.isAsync).toBe(true);
 54 |       expect(asyncFunc?.isExported).toBe(true);
 55 |       expect(asyncFunc?.parameters).toHaveLength(2);
 56 |       expect(asyncFunc?.returnType).toBe("Promise");
 57 | 
 58 |       const syncFunc = result?.functions.find((f) => f.name === "syncFunction");
 59 |       expect(syncFunc).toBeDefined();
 60 |       expect(syncFunc?.isAsync).toBe(false);
 61 |       expect(syncFunc?.returnType).toBe("string");
 62 |     });
 63 | 
 64 |     test("should extract arrow function declarations", async () => {
 65 |       const code = `
 66 | export const arrowFunc = async (x: number, y: number): Promise<number> => {
 67 |   return x + y;
 68 | };
 69 | 
 70 | const privateFunc = (name: string) => {
 71 |   return name.toLowerCase();
 72 | };
 73 |       `.trim();
 74 | 
 75 |       const filePath = join(tempDir, "test-arrow.ts");
 76 |       await fs.writeFile(filePath, code);
 77 | 
 78 |       const result = await analyzer.analyzeFile(filePath);
 79 | 
 80 |       expect(result).not.toBeNull();
 81 |       expect(result?.functions).toHaveLength(2);
 82 | 
 83 |       const exportedArrow = result?.functions.find(
 84 |         (f) => f.name === "arrowFunc",
 85 |       );
 86 |       expect(exportedArrow).toBeDefined();
 87 |       expect(exportedArrow?.isAsync).toBe(true);
 88 |       expect(exportedArrow?.parameters).toHaveLength(2);
 89 |     });
 90 | 
 91 |     test("should extract class information", async () => {
 92 |       const code = `
 93 | /**
 94 |  * Test class documentation
 95 |  */
 96 | export class TestClass extends BaseClass {
 97 |   private value: number;
 98 |   public readonly name: string;
 99 | 
100 |   constructor(name: string) {
101 |     super();
102 |     this.name = name;
103 |     this.value = 0;
104 |   }
105 | 
106 |   /**
107 |    * Public method
108 |    */
109 |   public async getValue(): Promise<number> {
110 |     return this.value;
111 |   }
112 | 
113 |   private setValue(val: number): void {
114 |     this.value = val;
115 |   }
116 | }
117 |       `.trim();
118 | 
119 |       const filePath = join(tempDir, "test-class.ts");
120 |       await fs.writeFile(filePath, code);
121 | 
122 |       const result = await analyzer.analyzeFile(filePath);
123 | 
124 |       expect(result).not.toBeNull();
125 |       expect(result?.classes).toHaveLength(1);
126 | 
127 |       const testClass = result?.classes[0];
128 |       expect(testClass?.name).toBe("TestClass");
129 |       expect(testClass?.isExported).toBe(true);
130 |       expect(testClass?.extends).toBe("BaseClass");
131 |       expect(testClass?.properties).toHaveLength(2);
132 |       expect(testClass?.methods.length).toBeGreaterThan(0);
133 | 
134 |       const publicMethod = testClass?.methods.find(
135 |         (m) => m.name === "getValue",
136 |       );
137 |       expect(publicMethod).toBeDefined();
138 |       expect(publicMethod?.isAsync).toBe(true);
139 |       expect(publicMethod?.isPublic).toBe(true);
140 |     });
141 | 
142 |     test("should extract interface information", async () => {
143 |       const code = `
144 | /**
145 |  * User interface
146 |  */
147 | export interface User {
148 |   id: string;
149 |   name: string;
150 |   age: number;
151 |   readonly email: string;
152 |   getProfile(): Promise<Profile>;
153 | }
154 | 
155 | interface Profile {
156 |   bio: string;
157 | }
158 |       `.trim();
159 | 
160 |       const filePath = join(tempDir, "test-interface.ts");
161 |       await fs.writeFile(filePath, code);
162 | 
163 |       const result = await analyzer.analyzeFile(filePath);
164 | 
165 |       expect(result).not.toBeNull();
166 |       expect(result?.interfaces).toHaveLength(2);
167 | 
168 |       const userInterface = result?.interfaces.find((i) => i.name === "User");
169 |       expect(userInterface).toBeDefined();
170 |       expect(userInterface?.isExported).toBe(true);
171 |       expect(userInterface?.properties).toHaveLength(4);
172 |       expect(userInterface?.methods).toHaveLength(1);
173 | 
174 |       const emailProp = userInterface?.properties.find(
175 |         (p) => p.name === "email",
176 |       );
177 |       expect(emailProp?.isReadonly).toBe(true);
178 |     });
179 | 
180 |     test("should extract type aliases", async () => {
181 |       const code = `
182 | export type ID = string | number;
183 | export type Status = "pending" | "active" | "inactive";
184 | type PrivateType = { x: number; y: number };
185 |       `.trim();
186 | 
187 |       const filePath = join(tempDir, "test-types.ts");
188 |       await fs.writeFile(filePath, code);
189 | 
190 |       const result = await analyzer.analyzeFile(filePath);
191 | 
192 |       expect(result).not.toBeNull();
193 |       expect(result?.types).toHaveLength(3);
194 | 
195 |       const idType = result?.types.find((t) => t.name === "ID");
196 |       expect(idType?.isExported).toBe(true);
197 |     });
198 | 
199 |     test("should extract imports and exports", async () => {
200 |       const code = `
201 | import { func1, func2 } from "./module1";
202 | import type { Type1 } from "./types";
203 | import defaultExport from "./default";
204 | 
205 | export { func1, func2 };
206 | export default class MyClass {}
207 |       `.trim();
208 | 
209 |       const filePath = join(tempDir, "test-imports.ts");
210 |       await fs.writeFile(filePath, code);
211 | 
212 |       const result = await analyzer.analyzeFile(filePath);
213 | 
214 |       expect(result).not.toBeNull();
215 |       expect(result?.imports.length).toBeGreaterThan(0);
216 |       expect(result?.exports).toContain("func1");
217 |       expect(result?.exports).toContain("func2");
218 |     });
219 | 
220 |     test("should calculate complexity metrics", async () => {
221 |       const code = `
222 | export function complexFunction(x: number): number {
223 |   if (x > 10) {
224 |     for (let i = 0; i < x; i++) {
225 |       if (i % 2 === 0) {
226 |         try {
227 |           return i;
228 |         } catch (error) {
229 |           continue;
230 |         }
231 |       }
232 |     }
233 |   } else {
234 |     return 0;
235 |   }
236 |   return -1;
237 | }
238 |       `.trim();
239 | 
240 |       const filePath = join(tempDir, "test-complexity.ts");
241 |       await fs.writeFile(filePath, code);
242 | 
243 |       const result = await analyzer.analyzeFile(filePath);
244 | 
245 |       expect(result).not.toBeNull();
246 |       const func = result?.functions[0];
247 |       expect(func?.complexity).toBeGreaterThan(1);
248 |     });
249 | 
250 |     test("should extract JSDoc comments", async () => {
251 |       const code = `
252 | /**
253 |  * This function adds two numbers
254 |  * @param a First number
255 |  * @param b Second number
256 |  * @returns The sum
257 |  */
258 | export function add(a: number, b: number): number {
259 |   return a + b;
260 | }
261 |       `.trim();
262 | 
263 |       const filePath = join(tempDir, "test-jsdoc.ts");
264 |       await fs.writeFile(filePath, code);
265 | 
266 |       const result = await analyzer.analyzeFile(filePath);
267 | 
268 |       expect(result).not.toBeNull();
269 |       const func = result?.functions[0];
270 |       expect(func?.docComment).toBeTruthy();
271 |       expect(func?.docComment).toContain("adds two numbers");
272 |     });
273 |   });
274 | 
275 |   describe("Drift Detection", () => {
276 |     test("should detect function signature changes", async () => {
277 |       const oldCode = `
278 | export function processData(data: string): void {
279 |   console.log(data);
280 | }
281 |       `.trim();
282 | 
283 |       const newCode = `
284 | export function processData(data: string, options: object): Promise<string> {
285 |   console.log(data, options);
286 |   return Promise.resolve("done");
287 | }
288 |       `.trim();
289 | 
290 |       const oldFile = join(tempDir, "old-file.ts");
291 |       const newFile = join(tempDir, "new-file.ts");
292 | 
293 |       await fs.writeFile(oldFile, oldCode);
294 |       await fs.writeFile(newFile, newCode);
295 | 
296 |       const oldAnalysis = await analyzer.analyzeFile(oldFile);
297 |       const newAnalysis = await analyzer.analyzeFile(newFile);
298 | 
299 |       expect(oldAnalysis).not.toBeNull();
300 |       expect(newAnalysis).not.toBeNull();
301 | 
302 |       const diffs = await analyzer.detectDrift(oldAnalysis!, newAnalysis!);
303 | 
304 |       expect(diffs.length).toBeGreaterThan(0);
305 |       const funcDiff = diffs.find(
306 |         (d) => d.category === "function" && d.name === "processData",
307 |       );
308 |       expect(funcDiff).toBeDefined();
309 |       expect(funcDiff?.type).toBe("modified");
310 |       expect(funcDiff?.impactLevel).toBe("breaking");
311 |     });
312 | 
313 |     test("should detect removed functions", async () => {
314 |       const oldCode = `
315 | export function oldFunction(): void {}
316 | export function keepFunction(): void {}
317 |       `.trim();
318 | 
319 |       const newCode = `
320 | export function keepFunction(): void {}
321 |       `.trim();
322 | 
323 |       const oldFile = join(tempDir, "old-removed.ts");
324 |       const newFile = join(tempDir, "new-removed.ts");
325 | 
326 |       await fs.writeFile(oldFile, oldCode);
327 |       await fs.writeFile(newFile, newCode);
328 | 
329 |       const oldAnalysis = await analyzer.analyzeFile(oldFile);
330 |       const newAnalysis = await analyzer.analyzeFile(newFile);
331 | 
332 |       const diffs = await analyzer.detectDrift(oldAnalysis!, newAnalysis!);
333 | 
334 |       const removedDiff = diffs.find((d) => d.name === "oldFunction");
335 |       expect(removedDiff).toBeDefined();
336 |       expect(removedDiff?.type).toBe("removed");
337 |       expect(removedDiff?.impactLevel).toBe("breaking");
338 |     });
339 | 
340 |     test("should detect added functions", async () => {
341 |       const oldCode = `
342 | export function existingFunction(): void {}
343 |       `.trim();
344 | 
345 |       const newCode = `
346 | export function existingFunction(): void {}
347 | export function newFunction(): void {}
348 |       `.trim();
349 | 
350 |       const oldFile = join(tempDir, "old-added.ts");
351 |       const newFile = join(tempDir, "new-added.ts");
352 | 
353 |       await fs.writeFile(oldFile, oldCode);
354 |       await fs.writeFile(newFile, newCode);
355 | 
356 |       const oldAnalysis = await analyzer.analyzeFile(oldFile);
357 |       const newAnalysis = await analyzer.analyzeFile(newFile);
358 | 
359 |       const diffs = await analyzer.detectDrift(oldAnalysis!, newAnalysis!);
360 | 
361 |       const addedDiff = diffs.find((d) => d.name === "newFunction");
362 |       expect(addedDiff).toBeDefined();
363 |       expect(addedDiff?.type).toBe("added");
364 |       expect(addedDiff?.impactLevel).toBe("patch");
365 |     });
366 | 
367 |     test("should detect minor changes", async () => {
368 |       const oldCode = `
369 | function internalFunction(): void {}
370 |       `.trim();
371 | 
372 |       const newCode = `
373 | export function internalFunction(): void {}
374 |       `.trim();
375 | 
376 |       const oldFile = join(tempDir, "old-minor.ts");
377 |       const newFile = join(tempDir, "new-minor.ts");
378 | 
379 |       await fs.writeFile(oldFile, oldCode);
380 |       await fs.writeFile(newFile, newCode);
381 | 
382 |       const oldAnalysis = await analyzer.analyzeFile(oldFile);
383 |       const newAnalysis = await analyzer.analyzeFile(newFile);
384 | 
385 |       const diffs = await analyzer.detectDrift(oldAnalysis!, newAnalysis!);
386 | 
387 |       const minorDiff = diffs.find((d) => d.name === "internalFunction");
388 |       expect(minorDiff).toBeDefined();
389 |       expect(minorDiff?.type).toBe("modified");
390 |       expect(minorDiff?.impactLevel).toBe("minor");
391 |     });
392 |   });
393 | 
394 |   describe("Edge Cases", () => {
395 |     test("should handle empty files", async () => {
396 |       const filePath = join(tempDir, "empty.ts");
397 |       await fs.writeFile(filePath, "");
398 | 
399 |       const result = await analyzer.analyzeFile(filePath);
400 | 
401 |       expect(result).not.toBeNull();
402 |       expect(result?.functions).toHaveLength(0);
403 |       expect(result?.classes).toHaveLength(0);
404 |     });
405 | 
406 |     test("should handle files with only comments", async () => {
407 |       const code = `
408 | // This is a comment
409 | /* Multi-line
410 |    comment */
411 |       `.trim();
412 | 
413 |       const filePath = join(tempDir, "comments-only.ts");
414 |       await fs.writeFile(filePath, code);
415 | 
416 |       const result = await analyzer.analyzeFile(filePath);
417 | 
418 |       expect(result).not.toBeNull();
419 |       expect(result?.functions).toHaveLength(0);
420 |     });
421 | 
422 |     test("should handle syntax errors gracefully", async () => {
423 |       const code = `
424 | export function broken(
425 |   // Missing closing paren and body
426 |       `.trim();
427 | 
428 |       const filePath = join(tempDir, "syntax-error.ts");
429 |       await fs.writeFile(filePath, code);
430 | 
431 |       const result = await analyzer.analyzeFile(filePath);
432 | 
433 |       // Should still return a result, even if incomplete
434 |       expect(result).not.toBeNull();
435 |     });
436 | 
437 |     test("should return null for unsupported file types", async () => {
438 |       const filePath = join(tempDir, "test.txt");
439 |       await fs.writeFile(filePath, "Some text content");
440 | 
441 |       const result = await analyzer.analyzeFile(filePath);
442 | 
443 |       expect(result).toBeNull();
444 |     });
445 |   });
446 | 
447 |   describe("Content Hashing", () => {
448 |     test("should generate consistent content hashes", async () => {
449 |       const code = `export function test(): void {}`;
450 | 
451 |       const file1 = join(tempDir, "hash1.ts");
452 |       const file2 = join(tempDir, "hash2.ts");
453 | 
454 |       await fs.writeFile(file1, code);
455 |       await fs.writeFile(file2, code);
456 | 
457 |       const result1 = await analyzer.analyzeFile(file1);
458 |       const result2 = await analyzer.analyzeFile(file2);
459 | 
460 |       expect(result1?.contentHash).toBe(result2?.contentHash);
461 |     });
462 | 
463 |     test("should generate different hashes for different content", async () => {
464 |       const code1 = `export function test1(): void {}`;
465 |       const code2 = `export function test2(): void {}`;
466 | 
467 |       const file1 = join(tempDir, "diff1.ts");
468 |       const file2 = join(tempDir, "diff2.ts");
469 | 
470 |       await fs.writeFile(file1, code1);
471 |       await fs.writeFile(file2, code2);
472 | 
473 |       const result1 = await analyzer.analyzeFile(file1);
474 |       const result2 = await analyzer.analyzeFile(file2);
475 | 
476 |       expect(result1?.contentHash).not.toBe(result2?.contentHash);
477 |     });
478 |   });
479 | 
480 |   describe("Multi-Language Support", () => {
481 |     test("should handle Python files with tree-sitter", async () => {
482 |       const pythonCode = `
483 | def hello_world():
484 |     print("Hello, World!")
485 | 
486 | class MyClass:
487 |     def __init__(self):
488 |         self.value = 42
489 |       `.trim();
490 | 
491 |       const filePath = join(tempDir, "test.py");
492 |       await fs.writeFile(filePath, pythonCode);
493 | 
494 |       const result = await analyzer.analyzeFile(filePath);
495 | 
496 |       expect(result).toBeDefined();
497 |       expect(result?.language).toBe("python");
498 |       expect(result?.filePath).toBe(filePath);
499 |       expect(result?.linesOfCode).toBeGreaterThan(0);
500 |     });
501 | 
502 |     test("should handle Go files with tree-sitter", async () => {
503 |       const goCode = `
504 | package main
505 | 
506 | func main() {
507 |     println("Hello, World!")
508 | }
509 |       `.trim();
510 | 
511 |       const filePath = join(tempDir, "test.go");
512 |       await fs.writeFile(filePath, goCode);
513 | 
514 |       const result = await analyzer.analyzeFile(filePath);
515 | 
516 |       expect(result).toBeDefined();
517 |       expect(result?.language).toBe("go");
518 |     });
519 | 
520 |     test("should handle Rust files with tree-sitter", async () => {
521 |       const rustCode = `
522 | fn main() {
523 |     println!("Hello, World!");
524 | }
525 |       `.trim();
526 | 
527 |       const filePath = join(tempDir, "test.rs");
528 |       await fs.writeFile(filePath, rustCode);
529 | 
530 |       const result = await analyzer.analyzeFile(filePath);
531 | 
532 |       expect(result).toBeDefined();
533 |       expect(result?.language).toBe("rust");
534 |     });
535 |   });
536 | 
537 |   describe("Advanced TypeScript Features", () => {
538 |     test("should extract default values from parameters", async () => {
539 |       const code = `
540 | export function withDefaults(
541 |   name: string = "default",
542 |   count: number = 42,
543 |   flag: boolean = true
544 | ): void {
545 |   console.log(name, count, flag);
546 | }
547 |       `.trim();
548 | 
549 |       const filePath = join(tempDir, "defaults.ts");
550 |       await fs.writeFile(filePath, code);
551 | 
552 |       const result = await analyzer.analyzeFile(filePath);
553 | 
554 |       expect(result).not.toBeNull();
555 |       const func = result?.functions.find((f) => f.name === "withDefaults");
556 |       expect(func).toBeDefined();
557 |       expect(func?.parameters.length).toBe(3);
558 | 
559 |       const nameParam = func?.parameters.find((p) => p.name === "name");
560 |       expect(nameParam?.defaultValue).toBeTruthy();
561 |     });
562 | 
563 |     test("should detect private methods with underscore prefix", async () => {
564 |       const code = `
565 | export class TestClass {
566 |   public publicMethod(): void {}
567 | 
568 |   private _privateMethod(): void {}
569 | 
570 |   #reallyPrivate(): void {}
571 | }
572 |       `.trim();
573 | 
574 |       const filePath = join(tempDir, "private-methods.ts");
575 |       await fs.writeFile(filePath, code);
576 | 
577 |       const result = await analyzer.analyzeFile(filePath);
578 | 
579 |       expect(result).not.toBeNull();
580 |       const testClass = result?.classes[0];
581 |       expect(testClass).toBeDefined();
582 |       expect(testClass?.methods.length).toBeGreaterThanOrEqual(1);
583 |     });
584 | 
585 |     test("should detect exported declarations correctly", async () => {
586 |       const code = `
587 | export function exportedFunc(): void {}
588 | 
589 | function nonExportedFunc(): void {}
590 | 
591 | export const exportedConst = () => {};
592 | 
593 | const nonExportedConst = () => {};
594 |       `.trim();
595 | 
596 |       const filePath = join(tempDir, "exports.ts");
597 |       await fs.writeFile(filePath, code);
598 | 
599 |       const result = await analyzer.analyzeFile(filePath);
600 | 
601 |       expect(result).not.toBeNull();
602 | 
603 |       const exportedFunc = result?.functions.find(
604 |         (f) => f.name === "exportedFunc",
605 |       );
606 |       expect(exportedFunc?.isExported).toBe(true);
607 | 
608 |       const exportedArrow = result?.functions.find(
609 |         (f) => f.name === "exportedConst",
610 |       );
611 |       expect(exportedArrow?.isExported).toBe(true);
612 |     });
613 | 
614 |     test("should handle files without initialization", async () => {
615 |       const newAnalyzer = new ASTAnalyzer();
616 |       // Don't call initialize() - should auto-initialize
617 | 
618 |       const code = `export function test(): void {}`;
619 |       const filePath = join(tempDir, "auto-init.ts");
620 |       await fs.writeFile(filePath, code);
621 | 
622 |       const result = await newAnalyzer.analyzeFile(filePath);
623 | 
624 |       expect(result).not.toBeNull();
625 |       expect(result?.functions.length).toBeGreaterThan(0);
626 |     });
627 |   });
628 | 
629 |   describe("Interface and Type Detection", () => {
630 |     test("should detect interface vs type differences", async () => {
631 |       const code = `
632 | export interface UserInterface {
633 |   id: string;
634 |   name: string;
635 | }
636 | 
637 | export type UserType = {
638 |   id: string;
639 |   name: string;
640 | };
641 | 
642 | export type StatusType = "active" | "inactive";
643 |       `.trim();
644 | 
645 |       const filePath = join(tempDir, "types-vs-interfaces.ts");
646 |       await fs.writeFile(filePath, code);
647 | 
648 |       const result = await analyzer.analyzeFile(filePath);
649 | 
650 |       expect(result).not.toBeNull();
651 |       expect(result?.interfaces.length).toBe(1);
652 |       expect(result?.types.length).toBe(2);
653 | 
654 |       const userInterface = result?.interfaces.find(
655 |         (i) => i.name === "UserInterface",
656 |       );
657 |       expect(userInterface?.isExported).toBe(true);
658 | 
659 |       const statusType = result?.types.find((t) => t.name === "StatusType");
660 |       expect(statusType?.isExported).toBe(true);
661 |     });
662 | 
663 |     test("should handle interface methods", async () => {
664 |       const code = `
665 | export interface Repository {
666 |   save(data: string): Promise<void>;
667 |   load(): Promise<string>;
668 |   delete(id: string): boolean;
669 | }
670 |       `.trim();
671 | 
672 |       const filePath = join(tempDir, "interface-methods.ts");
673 |       await fs.writeFile(filePath, code);
674 | 
675 |       const result = await analyzer.analyzeFile(filePath);
676 | 
677 |       expect(result).not.toBeNull();
678 |       const repo = result?.interfaces.find((i) => i.name === "Repository");
679 |       expect(repo?.methods.length).toBe(3);
680 |     });
681 |   });
682 | });
683 | 
```

--------------------------------------------------------------------------------
/src/tools/validate-readme-checklist.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { z } from "zod";
  2 | import { promises as fs } from "fs";
  3 | 
  4 | // Input schema
  5 | export const ValidateReadmeChecklistSchema = z.object({
  6 |   readmePath: z.string().min(1, "README path is required"),
  7 |   projectPath: z.string().optional(),
  8 |   strict: z.boolean().default(false),
  9 |   outputFormat: z.enum(["json", "markdown", "console"]).default("console"),
 10 | });
 11 | 
 12 | export type ValidateReadmeChecklistInput = z.infer<
 13 |   typeof ValidateReadmeChecklistSchema
 14 | >;
 15 | 
 16 | interface ChecklistItem {
 17 |   id: string;
 18 |   category: string;
 19 |   name: string;
 20 |   description: string;
 21 |   required: boolean;
 22 |   weight: number;
 23 | }
 24 | 
 25 | interface ValidationResult {
 26 |   item: ChecklistItem;
 27 |   passed: boolean;
 28 |   details: string;
 29 |   suggestions?: string[];
 30 | }
 31 | 
 32 | interface ChecklistReport {
 33 |   overallScore: number;
 34 |   totalItems: number;
 35 |   passedItems: number;
 36 |   failedItems: number;
 37 |   categories: {
 38 |     [category: string]: {
 39 |       score: number;
 40 |       passed: number;
 41 |       total: number;
 42 |       results: ValidationResult[];
 43 |     };
 44 |   };
 45 |   recommendations: string[];
 46 |   estimatedReadTime: number;
 47 |   wordCount: number;
 48 | }
 49 | 
 50 | export class ReadmeChecklistValidator {
 51 |   private checklist: ChecklistItem[] = [];
 52 | 
 53 |   constructor() {
 54 |     this.initializeChecklist();
 55 |   }
 56 | 
 57 |   private initializeChecklist(): void {
 58 |     this.checklist = [
 59 |       // Essential Sections
 60 |       {
 61 |         id: "title",
 62 |         category: "Essential Sections",
 63 |         name: "Project Title",
 64 |         description: "Clear, descriptive project title as main heading",
 65 |         required: true,
 66 |         weight: 10,
 67 |       },
 68 |       {
 69 |         id: "description",
 70 |         category: "Essential Sections",
 71 |         name: "Project Description",
 72 |         description: "Brief one-liner describing what the project does",
 73 |         required: true,
 74 |         weight: 10,
 75 |       },
 76 |       {
 77 |         id: "tldr",
 78 |         category: "Essential Sections",
 79 |         name: "TL;DR Section",
 80 |         description: "2-3 sentence summary of the project",
 81 |         required: true,
 82 |         weight: 8,
 83 |       },
 84 |       {
 85 |         id: "quickstart",
 86 |         category: "Essential Sections",
 87 |         name: "Quick Start Guide",
 88 |         description: "Instructions to get running in under 5 minutes",
 89 |         required: true,
 90 |         weight: 10,
 91 |       },
 92 |       {
 93 |         id: "installation",
 94 |         category: "Essential Sections",
 95 |         name: "Installation Instructions",
 96 |         description: "Clear installation steps with code examples",
 97 |         required: true,
 98 |         weight: 9,
 99 |       },
100 |       {
101 |         id: "usage",
102 |         category: "Essential Sections",
103 |         name: "Basic Usage Examples",
104 |         description: "Simple working code examples",
105 |         required: true,
106 |         weight: 9,
107 |       },
108 |       {
109 |         id: "license",
110 |         category: "Essential Sections",
111 |         name: "License Information",
112 |         description: "Clear license information",
113 |         required: true,
114 |         weight: 7,
115 |       },
116 | 
117 |       // Community Health
118 |       {
119 |         id: "contributing",
120 |         category: "Community Health",
121 |         name: "Contributing Guidelines",
122 |         description: "Link to CONTRIBUTING.md or inline guidelines",
123 |         required: false,
124 |         weight: 6,
125 |       },
126 |       {
127 |         id: "code-of-conduct",
128 |         category: "Community Health",
129 |         name: "Code of Conduct",
130 |         description: "Link to CODE_OF_CONDUCT.md",
131 |         required: false,
132 |         weight: 4,
133 |       },
134 |       {
135 |         id: "security",
136 |         category: "Community Health",
137 |         name: "Security Policy",
138 |         description: "Link to SECURITY.md or security reporting info",
139 |         required: false,
140 |         weight: 4,
141 |       },
142 | 
143 |       // Visual Elements
144 |       {
145 |         id: "badges",
146 |         category: "Visual Elements",
147 |         name: "Status Badges",
148 |         description: "Build status, version, license badges",
149 |         required: false,
150 |         weight: 3,
151 |       },
152 |       {
153 |         id: "screenshots",
154 |         category: "Visual Elements",
155 |         name: "Screenshots/Demos",
156 |         description: "Visual representation for applications/tools",
157 |         required: false,
158 |         weight: 5,
159 |       },
160 |       {
161 |         id: "formatting",
162 |         category: "Visual Elements",
163 |         name: "Consistent Formatting",
164 |         description: "Proper markdown formatting and structure",
165 |         required: true,
166 |         weight: 6,
167 |       },
168 | 
169 |       // Content Quality
170 |       {
171 |         id: "working-examples",
172 |         category: "Content Quality",
173 |         name: "Working Code Examples",
174 |         description: "All code examples are functional and tested",
175 |         required: true,
176 |         weight: 8,
177 |       },
178 |       {
179 |         id: "external-links",
180 |         category: "Content Quality",
181 |         name: "Functional External Links",
182 |         description: "All external links are working",
183 |         required: true,
184 |         weight: 5,
185 |       },
186 |       {
187 |         id: "appropriate-length",
188 |         category: "Content Quality",
189 |         name: "Appropriate Length",
190 |         description: "README under 300 lines for community projects",
191 |         required: false,
192 |         weight: 4,
193 |       },
194 |       {
195 |         id: "scannable-structure",
196 |         category: "Content Quality",
197 |         name: "Scannable Structure",
198 |         description: "Good heading hierarchy and organization",
199 |         required: true,
200 |         weight: 7,
201 |       },
202 |     ];
203 |   }
204 | 
205 |   async validateReadme(
206 |     input: ValidateReadmeChecklistInput,
207 |   ): Promise<ChecklistReport> {
208 |     const readmeContent = await fs.readFile(input.readmePath, "utf-8");
209 |     const projectFiles = input.projectPath
210 |       ? await this.getProjectFiles(input.projectPath)
211 |       : [];
212 | 
213 |     const results: ValidationResult[] = [];
214 |     const categories: { [key: string]: ValidationResult[] } = {};
215 | 
216 |     // Run validation for each checklist item
217 |     for (const item of this.checklist) {
218 |       const result = await this.validateItem(
219 |         item,
220 |         readmeContent,
221 |         projectFiles,
222 |         input,
223 |       );
224 |       results.push(result);
225 | 
226 |       if (!categories[item.category]) {
227 |         categories[item.category] = [];
228 |       }
229 |       categories[item.category].push(result);
230 |     }
231 | 
232 |     return this.generateReport(results, readmeContent);
233 |   }
234 | 
235 |   private async validateItem(
236 |     item: ChecklistItem,
237 |     content: string,
238 |     projectFiles: string[],
239 |     _input: ValidateReadmeChecklistInput,
240 |   ): Promise<ValidationResult> {
241 |     let passed = false;
242 |     let details = "";
243 |     const suggestions: string[] = [];
244 | 
245 |     switch (item.id) {
246 |       case "title": {
247 |         const titleRegex = /^#\s+.+/m;
248 |         const hasTitle = titleRegex.test(content);
249 |         passed = hasTitle;
250 |         details = passed
251 |           ? "Project title found"
252 |           : "No main heading (# Title) found";
253 |         if (!passed)
254 |           suggestions.push(
255 |             "Add a clear project title as the first heading: # Your Project Name",
256 |           );
257 |         break;
258 |       }
259 | 
260 |       case "description": {
261 |         const descRegex = /(^>\s+.+|^[^#\n].{20,})/m;
262 |         const hasDesc = descRegex.test(content);
263 |         passed = hasDesc;
264 |         details = passed
265 |           ? "Project description found"
266 |           : "Missing project description";
267 |         if (!passed)
268 |           suggestions.push(
269 |             "Add a brief description using > quote syntax or paragraph after title",
270 |           );
271 |         break;
272 |       }
273 | 
274 |       case "tldr": {
275 |         const tldrRegex = /##?\s*(tl;?dr|quick start|at a glance)/i;
276 |         const hasTldr = tldrRegex.test(content);
277 |         passed = hasTldr;
278 |         details = passed
279 |           ? "TL;DR section found"
280 |           : "Missing TL;DR or quick overview";
281 |         if (!passed)
282 |           suggestions.push(
283 |             "Add a ## TL;DR section with 2-3 sentences explaining what your project does",
284 |           );
285 |         break;
286 |       }
287 | 
288 |       case "quickstart":
289 |         passed = /##\s*(Quick\s*Start|Getting\s*Started|Installation)/i.test(
290 |           content,
291 |         );
292 |         details = passed
293 |           ? "Quick start section found"
294 |           : "No quick start section found";
295 |         if (!passed)
296 |           suggestions.push(
297 |             "Add a ## Quick Start section with immediate setup instructions",
298 |           );
299 |         break;
300 | 
301 |       case "installation": {
302 |         const installRegex = /##?\s*(install|installation|setup)/i;
303 |         const hasInstall = installRegex.test(content);
304 |         passed = hasInstall;
305 |         details = passed
306 |           ? "Installation instructions found"
307 |           : "Missing installation instructions";
308 |         if (!passed)
309 |           suggestions.push(
310 |             "Add installation instructions with code blocks showing exact commands",
311 |           );
312 |         break;
313 |       }
314 | 
315 |       case "usage": {
316 |         const usageRegex = /##?\s*(usage|example|getting started)/i;
317 |         const hasUsage = usageRegex.test(content);
318 |         passed = hasUsage;
319 |         details = passed ? "Usage examples found" : "Missing usage examples";
320 |         if (!passed)
321 |           suggestions.push("Add usage examples with working code snippets");
322 |         break;
323 |       }
324 | 
325 |       case "license": {
326 |         const licenseRegex = /##?\s*license/i;
327 |         const hasLicense = licenseRegex.test(content);
328 |         const hasLicenseFile =
329 |           projectFiles.includes("LICENSE") ||
330 |           projectFiles.includes("LICENSE.md");
331 |         passed = hasLicense || hasLicenseFile;
332 |         details = passed
333 |           ? "License information found"
334 |           : "Missing license information";
335 |         if (!passed)
336 |           suggestions.push("Add a ## License section or LICENSE file");
337 |         break;
338 |       }
339 | 
340 |       case "contributing": {
341 |         const hasContributing = /##\s*Contribut/i.test(content);
342 |         const hasContributingFile = projectFiles.includes("CONTRIBUTING.md");
343 |         passed = hasContributing || hasContributingFile;
344 |         details = passed
345 |           ? "Contributing guidelines found"
346 |           : "No contributing guidelines found";
347 |         if (!passed)
348 |           suggestions.push(
349 |             "Add contributing guidelines or link to CONTRIBUTING.md",
350 |           );
351 |         break;
352 |       }
353 | 
354 |       case "code-of-conduct": {
355 |         const hasCodeOfConduct = /code.of.conduct/i.test(content);
356 |         const hasCodeFile = projectFiles.includes("CODE_OF_CONDUCT.md");
357 |         passed = hasCodeOfConduct || hasCodeFile;
358 |         details = passed ? "Code of conduct found" : "No code of conduct found";
359 |         break;
360 |       }
361 | 
362 |       case "security": {
363 |         const hasSecurity = /security/i.test(content);
364 |         const hasSecurityFile = projectFiles.includes("SECURITY.md");
365 |         passed = hasSecurity || hasSecurityFile;
366 |         details = passed
367 |           ? "Security information found"
368 |           : "No security policy found";
369 |         break;
370 |       }
371 | 
372 |       case "badges": {
373 |         const badgeRegex =
374 |           /\[!\[.*?\]\(.*?\)\]\(.*?\)|!\[.*?\]\(.*?badge.*?\)/i;
375 |         const hasBadges = badgeRegex.test(content);
376 |         passed = hasBadges;
377 |         details = passed
378 |           ? "Status badges found"
379 |           : "Consider adding status badges";
380 |         if (!passed)
381 |           suggestions.push(
382 |             "Consider adding badges for build status, version, license",
383 |           );
384 |         break;
385 |       }
386 | 
387 |       case "screenshots": {
388 |         const imageRegex = /!\[.*?\]\(.*?\.(png|jpg|jpeg|gif|svg).*?\)/i;
389 |         const hasImages = imageRegex.test(content);
390 |         passed = hasImages;
391 |         details = passed
392 |           ? "Screenshots/images found"
393 |           : "Consider adding screenshots or images";
394 |         if (!passed && content.includes("application")) {
395 |           suggestions.push(
396 |             "Consider adding screenshots or demo GIFs for visual applications",
397 |           );
398 |         }
399 |         break;
400 |       }
401 | 
402 |       case "formatting": {
403 |         const hasHeaders = (content.match(/^##?\s+/gm) || []).length >= 3;
404 |         const hasProperSpacing = !/#{1,6}\s*\n\s*#{1,6}/.test(content);
405 |         passed = hasHeaders && hasProperSpacing;
406 |         details = passed
407 |           ? "Good markdown formatting"
408 |           : "Improve markdown formatting and structure";
409 |         if (!passed)
410 |           suggestions.push(
411 |             "Improve markdown formatting with proper heading hierarchy and spacing",
412 |           );
413 |         break;
414 |       }
415 | 
416 |       case "working-examples": {
417 |         const codeRegex = /```[\s\S]*?```/g;
418 |         const codeBlocks = content.match(codeRegex) || [];
419 |         passed = codeBlocks.length > 0;
420 |         details = `${codeBlocks.length} code examples found`;
421 |         if (!passed)
422 |           suggestions.push("Add working code examples to demonstrate usage");
423 |         break;
424 |       }
425 | 
426 |       case "external-links": {
427 |         const links = content.match(/\[.*?\]\((https?:\/\/.*?)\)/g) || [];
428 |         passed = true; // Assume links work unless we can verify
429 |         details = `${links.length} external links found`;
430 |         break;
431 |       }
432 | 
433 |       case "appropriate-length": {
434 |         const wordCount = content.split(/\s+/).length;
435 |         passed = wordCount <= 300;
436 |         details = `${wordCount} words (target: ≤300)`;
437 |         if (!passed)
438 |           suggestions.push(
439 |             "Consider shortening README or moving detailed content to separate docs",
440 |           );
441 |         break;
442 |       }
443 | 
444 |       case "scannable-structure": {
445 |         const sections = (content.match(/^##?\s+/gm) || []).length;
446 |         const lists = (content.match(/^\s*[-*+]\s+/gm) || []).length;
447 |         passed = sections >= 3 && lists >= 2;
448 |         details = passed
449 |           ? "Good scannable structure"
450 |           : "Improve structure with more sections and lists";
451 |         if (!passed)
452 |           suggestions.push(
453 |             "Improve heading structure with logical hierarchy (H1 → H2 → H3)",
454 |           );
455 |         break;
456 |       }
457 | 
458 |       default:
459 |         passed = false;
460 |         details = "Validation not implemented";
461 |     }
462 | 
463 |     return {
464 |       item,
465 |       passed,
466 |       details,
467 |       suggestions: suggestions.length > 0 ? suggestions : undefined,
468 |     };
469 |   }
470 | 
471 |   private async getProjectFiles(projectPath: string): Promise<string[]> {
472 |     try {
473 |       const files = await fs.readdir(projectPath);
474 |       return files;
475 |     } catch {
476 |       return [];
477 |     }
478 |   }
479 | 
480 |   private generateReport(
481 |     results: ValidationResult[],
482 |     content: string,
483 |   ): ChecklistReport {
484 |     const categories: {
485 |       [category: string]: {
486 |         score: number;
487 |         passed: number;
488 |         total: number;
489 |         results: ValidationResult[];
490 |       };
491 |     } = {};
492 | 
493 |     let totalWeight = 0;
494 |     let passedWeight = 0;
495 |     let passedItems = 0;
496 |     const totalItems = results.length;
497 | 
498 |     // Group results by category and calculate scores
499 |     for (const result of results) {
500 |       const category = result.item.category;
501 |       if (!categories[category]) {
502 |         categories[category] = { score: 0, passed: 0, total: 0, results: [] };
503 |       }
504 | 
505 |       categories[category].results.push(result);
506 |       categories[category].total++;
507 | 
508 |       totalWeight += result.item.weight;
509 | 
510 |       if (result.passed) {
511 |         categories[category].passed++;
512 |         passedWeight += result.item.weight;
513 |         passedItems++;
514 |       }
515 |     }
516 | 
517 |     // Calculate category scores
518 |     for (const category in categories) {
519 |       const cat = categories[category];
520 |       cat.score = Math.round((cat.passed / cat.total) * 100);
521 |     }
522 | 
523 |     const overallScore = Math.round((passedWeight / totalWeight) * 100);
524 |     const wordCount = content.split(/\s+/).length;
525 |     const estimatedReadTime = Math.ceil(wordCount / 200); // 200 words per minute
526 | 
527 |     // Generate recommendations
528 |     const recommendations: string[] = [];
529 |     if (overallScore < 70) {
530 |       recommendations.push(
531 |         "README needs significant improvement to meet community standards",
532 |       );
533 |     }
534 |     if (categories["Essential Sections"]?.score < 80) {
535 |       recommendations.push("Focus on completing essential sections first");
536 |     }
537 |     if (wordCount > 2000) {
538 |       recommendations.push(
539 |         "Consider breaking up content into separate documentation files",
540 |       );
541 |     }
542 |     if (!results.find((r) => r.item.id === "badges")?.passed) {
543 |       recommendations.push("Add status badges to improve project credibility");
544 |     }
545 | 
546 |     return {
547 |       overallScore,
548 |       totalItems,
549 |       passedItems,
550 |       failedItems: totalItems - passedItems,
551 |       categories,
552 |       recommendations,
553 |       estimatedReadTime,
554 |       wordCount,
555 |     };
556 |   }
557 | 
558 |   formatReport(
559 |     report: ChecklistReport,
560 |     format: "json" | "markdown" | "console",
561 |   ): string {
562 |     switch (format) {
563 |       case "json":
564 |         return JSON.stringify(report, null, 2);
565 | 
566 |       case "markdown":
567 |         return this.formatMarkdownReport(report);
568 | 
569 |       case "console":
570 |       default:
571 |         return this.formatConsoleReport(report);
572 |     }
573 |   }
574 | 
575 |   private formatMarkdownReport(report: ChecklistReport): string {
576 |     let output = "# README Checklist Report\n\n";
577 | 
578 |     output += `## Overall Score: ${report.overallScore}%\n\n`;
579 |     output += `- **Passed**: ${report.passedItems}/${report.totalItems} items\n`;
580 |     output += `- **Word Count**: ${report.wordCount} words\n`;
581 |     output += `- **Estimated Read Time**: ${report.estimatedReadTime} minutes\n\n`;
582 | 
583 |     output += "## Category Breakdown\n\n";
584 |     for (const [categoryName, category] of Object.entries(report.categories)) {
585 |       output += `### ${categoryName} (${category.score}%)\n\n`;
586 | 
587 |       for (const result of category.results) {
588 |         const status = result.passed ? "✅" : "❌";
589 |         output += `- ${status} **${result.item.name}**: ${result.details}\n`;
590 | 
591 |         if (result.suggestions) {
592 |           for (const suggestion of result.suggestions) {
593 |             output += `  - 💡 ${suggestion}\n`;
594 |           }
595 |         }
596 |       }
597 |       output += "\n";
598 |     }
599 | 
600 |     if (report.recommendations.length > 0) {
601 |       output += "## Recommendations\n\n";
602 |       for (const rec of report.recommendations) {
603 |         output += `- ${rec}\n`;
604 |       }
605 |     }
606 | 
607 |     return output;
608 |   }
609 | 
610 |   private formatConsoleReport(report: ChecklistReport): string {
611 |     let output = "\n📋 README Checklist Report\n";
612 |     output += "=".repeat(50) + "\n";
613 | 
614 |     const scoreColor =
615 |       report.overallScore >= 80
616 |         ? "🟢"
617 |         : report.overallScore >= 60
618 |           ? "🟡"
619 |           : "🔴";
620 |     output += `${scoreColor} Overall Score: ${report.overallScore}%\n`;
621 |     output += `✅ Passed: ${report.passedItems}/${report.totalItems} items\n`;
622 |     output += `📄 Word Count: ${report.wordCount} words\n`;
623 |     output += `⏱️  Read Time: ${report.estimatedReadTime} minutes\n\n`;
624 | 
625 |     for (const [categoryName, category] of Object.entries(report.categories)) {
626 |       const catColor =
627 |         category.score >= 80 ? "🟢" : category.score >= 60 ? "🟡" : "🔴";
628 |       output += `${catColor} ${categoryName} (${category.score}%)\n`;
629 |       output += "-".repeat(30) + "\n";
630 | 
631 |       for (const result of category.results) {
632 |         const status = result.passed ? "✅" : "❌";
633 |         output += `${status} ${result.item.name}: ${result.details}\n`;
634 | 
635 |         if (result.suggestions) {
636 |           for (const suggestion of result.suggestions) {
637 |             output += `   💡 ${suggestion}\n`;
638 |           }
639 |         }
640 |       }
641 |       output += "\n";
642 |     }
643 | 
644 |     if (report.recommendations.length > 0) {
645 |       output += "🎯 Recommendations:\n";
646 |       for (const rec of report.recommendations) {
647 |         output += `• ${rec}\n`;
648 |       }
649 |     }
650 | 
651 |     return output;
652 |   }
653 | }
654 | 
655 | export async function validateReadmeChecklist(
656 |   input: ValidateReadmeChecklistInput,
657 | ): Promise<ChecklistReport> {
658 |   const validatedInput = ValidateReadmeChecklistSchema.parse(input);
659 |   const validator = new ReadmeChecklistValidator();
660 | 
661 |   return await validator.validateReadme(validatedInput);
662 | }
663 | 
```

--------------------------------------------------------------------------------
/docs/adrs/009-content-accuracy-validation-framework.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | id: 009-content-accuracy-validation-framework
  3 | title: "ADR-009: Content Accuracy Validation Framework"
  4 | sidebar_label: "ADR-9: Content Accuracy Validation Framework"
  5 | sidebar_position: 9
  6 | documcp:
  7 |   last_updated: "2025-11-20T00:46:21.943Z"
  8 |   last_validated: "2025-11-20T00:46:21.943Z"
  9 |   auto_updated: false
 10 |   update_frequency: monthly
 11 | ---
 12 | 
 13 | # ADR-009: Content Accuracy and Validation Framework for Generated Documentation
 14 | 
 15 | ## Status
 16 | 
 17 | Accepted
 18 | 
 19 | ## Context
 20 | 
 21 | The Intelligent Content Population Engine (ADR-008) introduces sophisticated content generation capabilities, but with this power comes the critical challenge of ensuring content accuracy and handling scenarios where generated documentation is incorrect, outdated, or missing crucial context. This represents a fundamental risk to user trust and system adoption.
 22 | 
 23 | **Core Problem**: Automated content generation can fail in multiple ways:
 24 | 
 25 | - **Analysis Misinterpretation**: Repository analysis detects Express.js but project primarily uses GraphQL
 26 | - **Outdated Patterns**: Generated content assumes current best practices for deprecated framework versions
 27 | - **Missing Context**: Analysis cannot understand business domain, team conventions, or architectural constraints
 28 | - **Code Reality Mismatch**: Generated examples don't work with actual project structure
 29 | - **Confidence Overstatement**: System appears confident about uncertain conclusions
 30 | 
 31 | **Real-World Scenarios**:
 32 | 
 33 | 1. Analysis detects PostgreSQL in docker-compose but app actually uses MongoDB in production
 34 | 2. TypeScript project generates JavaScript examples due to build artifact analysis
 35 | 3. Monorepo analysis sees partial picture, generating incomplete architectural guidance
 36 | 4. Custom framework wrappers confuse standard pattern detection
 37 | 5. Legacy code patterns generate deprecated recommendation content
 38 | 
 39 | **Current State**: ADR-008 includes basic content validation but lacks comprehensive accuracy assurance, user correction workflows, and systematic approaches to handling uncertainty and missing information.
 40 | 
 41 | **Strategic Importance**: Content accuracy directly impacts:
 42 | 
 43 | - User trust and adoption rates
 44 | - Time savings vs. time wasted on incorrect guidance
 45 | - System credibility in professional development environments
 46 | - Long-term viability as intelligent documentation assistant
 47 | 
 48 | ## Decision
 49 | 
 50 | We will implement a comprehensive Content Accuracy and Validation Framework that treats content correctness as a first-class architectural concern, with systematic approaches to uncertainty management, reality verification, and continuous accuracy improvement.
 51 | 
 52 | ### Framework Architecture:
 53 | 
 54 | #### 1. Multi-Layer Validation System
 55 | 
 56 | **Purpose**: Systematic verification at multiple stages of content generation
 57 | **Layers**:
 58 | 
 59 | - **Pre-Generation Validation**: Verify analysis accuracy before content creation
 60 | - **Generation-Time Validation**: Real-time checks during content assembly
 61 | - **Post-Generation Validation**: Comprehensive verification against project reality
 62 | - **User-Guided Validation**: Interactive accuracy confirmation and correction
 63 | 
 64 | #### 2. Confidence-Aware Content Generation
 65 | 
 66 | **Purpose**: Explicit uncertainty management and confidence scoring
 67 | **Capabilities**:
 68 | 
 69 | - Granular confidence metrics for different content aspects
 70 | - Uncertainty flagging for areas requiring user verification
 71 | - Content degradation strategies when confidence is insufficient
 72 | - Alternative content paths for ambiguous scenarios
 73 | 
 74 | #### 3. Reality-Check Validation Engine
 75 | 
 76 | **Purpose**: Verify generated content against actual project characteristics
 77 | **Verification Types**:
 78 | 
 79 | - Code example compilation and execution validation
 80 | - Pattern existence verification in actual codebase
 81 | - Dependency version compatibility checking
 82 | - Framework usage pattern matching
 83 | 
 84 | #### 4. Interactive Accuracy Workflow
 85 | 
 86 | **Purpose**: User-guided accuracy improvement and correction
 87 | **Components**:
 88 | 
 89 | - Pre-generation clarification requests for uncertain areas
 90 | - Inline content correction and improvement interfaces
 91 | - Accuracy feedback collection and learning system
 92 | - Project-specific accuracy profile building
 93 | 
 94 | ### Implementation Details:
 95 | 
 96 | #### Confidence-Aware Generation System
 97 | 
 98 | ```typescript
 99 | interface ConfidenceAwareGenerator {
100 |   generateWithConfidence(
101 |     contentRequest: ContentRequest,
102 |     projectContext: ProjectContext,
103 |   ): ConfidenceAwareContent;
104 | 
105 |   handleUncertainty(
106 |     uncertainty: UncertaintyArea,
107 |     alternatives: ContentAlternative[],
108 |   ): UncertaintyHandlingStrategy;
109 | 
110 |   degradeContentSafely(
111 |     highRiskContent: GeneratedContent,
112 |     safetyThreshold: number,
113 |   ): SaferContent;
114 | }
115 | 
116 | interface ConfidenceAwareContent {
117 |   content: GeneratedContent;
118 |   confidence: ConfidenceMetrics;
119 |   uncertainties: UncertaintyFlag[];
120 |   validationRequests: ValidationRequest[];
121 |   alternatives: ContentAlternative[];
122 | }
123 | 
124 | interface ConfidenceMetrics {
125 |   overall: number; // 0-100
126 |   breakdown: {
127 |     technologyDetection: number;
128 |     frameworkVersionAccuracy: number;
129 |     codeExampleRelevance: number;
130 |     architecturalAssumptions: number;
131 |     businessContextAlignment: number;
132 |   };
133 |   riskFactors: RiskFactor[];
134 | }
135 | 
136 | interface UncertaintyFlag {
137 |   area: UncertaintyArea;
138 |   severity: "low" | "medium" | "high" | "critical";
139 |   description: string;
140 |   potentialImpact: string;
141 |   clarificationNeeded: string;
142 |   fallbackStrategy: string;
143 | }
144 | ```
145 | 
146 | #### Reality-Check Validation Engine
147 | 
148 | ```typescript
149 | interface RealityCheckValidator {
150 |   // Validate against actual project structure and code
151 |   validateAgainstCodebase(
152 |     content: GeneratedContent,
153 |     projectPath: string,
154 |   ): Promise<ValidationResult>;
155 | 
156 |   // Check if generated code examples actually work
157 |   validateCodeExamples(
158 |     examples: CodeExample[],
159 |     projectContext: ProjectContext,
160 |   ): Promise<CodeValidationResult>;
161 | 
162 |   // Verify framework patterns exist in project
163 |   verifyFrameworkPatterns(
164 |     patterns: FrameworkPattern[],
165 |     projectFiles: ProjectFile[],
166 |   ): PatternValidationResult;
167 | 
168 |   // Check dependency compatibility
169 |   validateDependencyCompatibility(
170 |     suggestions: DependencySuggestion[],
171 |     projectManifest: ProjectManifest,
172 |   ): CompatibilityResult;
173 | }
174 | 
175 | interface ValidationResult {
176 |   isValid: boolean;
177 |   confidence: number;
178 |   issues: ValidationIssue[];
179 |   suggestions: ImprovementSuggestion[];
180 |   corrections: AutomaticCorrection[];
181 | }
182 | 
183 | interface ValidationIssue {
184 |   type: IssueType;
185 |   severity: "error" | "warning" | "info";
186 |   location: ContentLocation;
187 |   description: string;
188 |   evidence: Evidence[];
189 |   suggestedFix: string;
190 |   confidence: number;
191 | }
192 | 
193 | class TypeScriptRealityChecker implements RealityCheckValidator {
194 |   async validateCodeExamples(
195 |     examples: CodeExample[],
196 |     projectContext: ProjectContext,
197 |   ): Promise<CodeValidationResult> {
198 |     const results: ExampleValidation[] = [];
199 | 
200 |     for (const example of examples) {
201 |       try {
202 |         // Create temporary test file
203 |         const testFile = await this.createTestFile(example, projectContext);
204 | 
205 |         // Attempt TypeScript compilation
206 |         const compileResult = await this.compileTypeScript(testFile);
207 | 
208 |         // Run basic execution test if compilation succeeds
209 |         const executionResult = compileResult.success
210 |           ? await this.testExecution(testFile)
211 |           : null;
212 | 
213 |         results.push({
214 |           example: example.id,
215 |           compilationSuccess: compileResult.success,
216 |           executionSuccess: executionResult?.success ?? false,
217 |           issues: [...compileResult.errors, ...(executionResult?.errors ?? [])],
218 |           confidence: this.calculateExampleConfidence(
219 |             compileResult,
220 |             executionResult,
221 |           ),
222 |         });
223 |       } catch (error) {
224 |         results.push({
225 |           example: example.id,
226 |           compilationSuccess: false,
227 |           executionSuccess: false,
228 |           issues: [{ type: "validation_error", message: error.message }],
229 |           confidence: 0,
230 |         });
231 |       }
232 |     }
233 | 
234 |     return {
235 |       overallSuccess: results.every((r) => r.compilationSuccess),
236 |       exampleResults: results,
237 |       confidence: this.calculateOverallConfidence(results),
238 |     };
239 |   }
240 | }
241 | ```
242 | 
243 | #### Interactive Accuracy Workflow
244 | 
245 | ```typescript
246 | interface InteractiveAccuracyWorkflow {
247 |   // Pre-generation clarification
248 |   requestClarification(
249 |     uncertainties: UncertaintyFlag[],
250 |     analysisContext: AnalysisContext,
251 |   ): Promise<UserClarification>;
252 | 
253 |   // Real-time accuracy feedback during generation
254 |   enableRealTimeFeedback(
255 |     generationSession: GenerationSession,
256 |   ): AccuracyFeedbackInterface;
257 | 
258 |   // Post-generation correction and improvement
259 |   facilitateCorrections(
260 |     generatedContent: GeneratedContent,
261 |     userContext: UserContext,
262 |   ): CorrectionInterface;
263 | 
264 |   // Learning from corrections
265 |   recordAccuracyLearning(
266 |     original: GeneratedContent,
267 |     corrected: GeneratedContent,
268 |     userFeedback: UserFeedback,
269 |   ): AccuracyLearning;
270 | }
271 | 
272 | interface UserClarification {
273 |   uncertaintyArea: UncertaintyArea;
274 |   userResponse: string;
275 |   confidence: number;
276 |   additionalContext?: string;
277 | }
278 | 
279 | interface CorrectionInterface {
280 |   // Inline editing capabilities
281 |   enableInlineEditing(content: GeneratedContent): EditableContent;
282 | 
283 |   // Structured feedback collection
284 |   collectStructuredFeedback(
285 |     content: GeneratedContent,
286 |   ): Promise<StructuredFeedback>;
287 | 
288 |   // Quick accuracy rating
289 |   requestAccuracyRating(
290 |     contentSection: ContentSection,
291 |   ): Promise<AccuracyRating>;
292 | 
293 |   // Pattern correction learning
294 |   identifyPatternCorrections(
295 |     corrections: ContentCorrection[],
296 |   ): PatternLearning[];
297 | }
298 | ```
299 | 
300 | #### Fallback and Recovery Strategies
301 | 
302 | ```typescript
303 | interface ContentFallbackStrategy {
304 |   // Progressive content degradation
305 |   degradeToSaferContent(
306 |     failedContent: GeneratedContent,
307 |     validationFailures: ValidationFailure[],
308 |   ): SaferContent;
309 | 
310 |   // Multiple alternative generation
311 |   generateAlternatives(
312 |     contentRequest: ContentRequest,
313 |     primaryFailure: GenerationFailure,
314 |   ): ContentAlternative[];
315 | 
316 |   // Graceful uncertainty handling
317 |   handleInsufficientInformation(
318 |     analysisGaps: AnalysisGap[],
319 |     contentRequirements: ContentRequirement[],
320 |   ): PartialContent;
321 | 
322 |   // Safe default content
323 |   provideSafeDefaults(
324 |     projectType: ProjectType,
325 |     framework: Framework,
326 |     confidence: number,
327 |   ): DefaultContent;
328 | }
329 | 
330 | interface SafetyThresholds {
331 |   minimumConfidenceForCodeExamples: 85;
332 |   minimumConfidenceForArchitecturalAdvice: 75;
333 |   minimumConfidenceForProductionGuidance: 90;
334 |   uncertaintyThresholdForUserConfirmation: 70;
335 | }
336 | 
337 | const fallbackHierarchy = [
338 |   {
339 |     level: "project-specific-optimized",
340 |     confidence: 85,
341 |     description: "Highly confident project-specific content",
342 |   },
343 |   {
344 |     level: "framework-specific-validated",
345 |     confidence: 95,
346 |     description: "Framework patterns validated against project",
347 |   },
348 |   {
349 |     level: "technology-generic-safe",
350 |     confidence: 98,
351 |     description: "Generic patterns known to work",
352 |   },
353 |   {
354 |     level: "diataxis-structure-only",
355 |     confidence: 100,
356 |     description: "Structure with clear placeholders for manual completion",
357 |   },
358 | ];
359 | ```
360 | 
361 | ## Alternatives Considered
362 | 
363 | ### Trust-But-Verify Approach (Basic Validation Only)
364 | 
365 | - **Pros**: Simpler implementation, faster content generation, less user friction
366 | - **Cons**: High risk of incorrect content, potential user frustration, system credibility damage
367 | - **Decision**: Rejected - accuracy is fundamental to system value proposition
368 | 
369 | ### AI-Only Validation (External LLM Review)
370 | 
371 | - **Pros**: Advanced natural language understanding, sophisticated error detection
372 | - **Cons**: External dependencies, costs, latency, inconsistent results, black box validation
373 | - **Decision**: Rejected for primary validation - may integrate as supplementary check
374 | 
375 | ### Manual Review Required (Human-in-the-Loop Always)
376 | 
377 | - **Pros**: Maximum accuracy assurance, user control, learning opportunities
378 | - **Cons**: Eliminates automation benefits, slows workflow, high user burden
379 | - **Decision**: Rejected as default - integrate as optional high-accuracy mode
380 | 
381 | ### Static Analysis Only (No Dynamic Validation)
382 | 
383 | - **Pros**: Fast execution, no code execution risks, consistent results
384 | - **Cons**: Misses runtime issues, limited pattern verification, poor accuracy detection
385 | - **Decision**: Rejected as sole approach - integrate as first-pass validation
386 | 
387 | ### Crowdsourced Accuracy (Community Validation)
388 | 
389 | - **Pros**: Diverse perspectives, real-world validation, community engagement
390 | - **Cons**: Inconsistent quality, coordination complexity, slow feedback loops
391 | - **Decision**: Deferred to future enhancement - focus on systematic validation first
392 | 
393 | ## Consequences
394 | 
395 | ### Positive
396 | 
397 | - **Trust and Credibility**: Systematic accuracy assurance builds user confidence
398 | - **Reduced Risk**: Explicit uncertainty handling prevents misleading guidance
399 | - **Continuous Improvement**: Learning from corrections improves future accuracy
400 | - **Professional Reliability**: Reality-check validation ensures professional-grade output
401 | - **User Empowerment**: Interactive workflows give users control over accuracy
402 | 
403 | ### Negative
404 | 
405 | - **Implementation Complexity**: Multi-layer validation requires significant engineering effort
406 | - **Performance Impact**: Validation processes may slow content generation
407 | - **User Experience Friction**: Clarification requests may interrupt workflow
408 | - **Maintenance Overhead**: Validation rules require updates as technologies evolve
409 | 
410 | ### Risks and Mitigations
411 | 
412 | - **Validation Accuracy**: Validate the validators through comprehensive testing
413 | - **Performance Impact**: Implement parallel validation and smart caching
414 | - **User Fatigue**: Balance accuracy requests with workflow efficiency
415 | - **Technology Coverage**: Start with well-known patterns, expand methodically
416 | 
417 | ## Integration Points
418 | 
419 | ### Repository Analysis Integration (ADR-002)
420 | 
421 | - Use analysis confidence metrics to inform content generation confidence
422 | - Validate analysis assumptions against actual project characteristics
423 | - Identify analysis gaps that require user clarification
424 | 
425 | ### Content Population Integration (ADR-008)
426 | 
427 | - Integrate validation framework into content generation pipeline
428 | - Use confidence metrics to guide content generation strategies
429 | - Apply reality-check validation to all generated content
430 | 
431 | ### MCP Tools API Integration (ADR-006)
432 | 
433 | - Add validation results to MCP tool responses
434 | - Provide user interfaces for accuracy feedback and correction
435 | - Maintain consistency with existing error handling patterns
436 | 
437 | ### Diataxis Framework Integration (ADR-004)
438 | 
439 | - Ensure validation preserves Diataxis category integrity
440 | - Validate content type appropriateness within framework
441 | - Maintain cross-reference accuracy across content categories
442 | 
443 | ## Implementation Roadmap
444 | 
445 | ### Phase 1: Core Validation Infrastructure (High Priority)
446 | 
447 | - Confidence scoring system implementation
448 | - Basic reality-check validation for common patterns
449 | - User clarification workflow for high-uncertainty areas
450 | - Fallback content generation strategies
451 | 
452 | ### Phase 2: Advanced Validation (Medium Priority)
453 | 
454 | - Code example compilation and execution testing
455 | - Framework pattern existence verification
456 | - Interactive correction interfaces
457 | - Accuracy learning and improvement systems
458 | 
459 | ### Phase 3: Intelligent Accuracy Features (Future)
460 | 
461 | - Machine learning-based accuracy prediction
462 | - Community-driven validation and improvement
463 | - Advanced uncertainty reasoning and handling
464 | - Personalized accuracy preferences and thresholds
465 | 
466 | ## Quality Assurance
467 | 
468 | ### Validation Testing Framework
469 | 
470 | ```typescript
471 | describe("ContentAccuracyFramework", () => {
472 |   describe("Confidence Scoring", () => {
473 |     it("should correctly identify low-confidence scenarios");
474 |     it("should provide appropriate uncertainty flags");
475 |     it("should degrade content safely when confidence is insufficient");
476 |   });
477 | 
478 |   describe("Reality-Check Validation", () => {
479 |     it("should detect when generated code examples fail compilation");
480 |     it("should identify pattern mismatches with actual codebase");
481 |     it("should validate dependency compatibility accurately");
482 |   });
483 | 
484 |   describe("Interactive Workflows", () => {
485 |     it("should request clarification for appropriate uncertainty levels");
486 |     it("should enable effective user corrections and learning");
487 |     it("should maintain accuracy improvements across sessions");
488 |   });
489 | });
490 | ```
491 | 
492 | ### Accuracy Metrics and Monitoring
493 | 
494 | - **Content Accuracy Rate**: Percentage of generated content validated as correct
495 | - **User Correction Rate**: Frequency of user corrections per content section
496 | - **Confidence Calibration**: Alignment between confidence scores and actual accuracy
497 | - **Validation Performance**: Speed and accuracy of validation processes
498 | 
499 | ### Continuous Improvement Process
500 | 
501 | - Regular validation of validation systems (meta-validation)
502 | - User feedback analysis and pattern identification
503 | - Technology pattern database updates and maintenance
504 | - Accuracy threshold tuning based on real-world usage
505 | 
506 | ## Success Metrics
507 | 
508 | ### Accuracy Metrics
509 | 
510 | - **Content Accuracy Rate**: 85%+ technical accuracy for generated content
511 | - **Confidence Calibration**: ±10% alignment between confidence and actual accuracy
512 | - **False Positive Rate**: &lt;5% validation failures for actually correct content
513 | - **User Correction Rate**: &lt;20% of content sections require user correction
514 | 
515 | ### User Experience Metrics
516 | 
517 | - **Trust Score**: 90%+ user confidence in system accuracy
518 | - **Workflow Efficiency**: Validation processes add &lt;15% to generation time
519 | - **Clarification Effectiveness**: 80%+ of clarification requests improve accuracy
520 | - **Learning Effectiveness**: 70% reduction in repeat accuracy issues
521 | 
522 | ### System Reliability Metrics
523 | 
524 | - **Validation Coverage**: 95%+ of generated content passes through validation
525 | - **Fallback Effectiveness**: 100% of failed generations provide safe alternatives
526 | - **Error Recovery**: 90%+ of validation failures result in improved content
527 | - **Performance Impact**: &lt;30 seconds total for accuracy-validated content generation
528 | 
529 | ## Future Enhancements
530 | 
531 | ### Advanced Validation Technologies
532 | 
533 | - **Static Analysis Integration**: Deeper code analysis for pattern verification
534 | - **Dynamic Testing**: Automated testing of generated examples in project context
535 | - **Semantic Validation**: AI-powered understanding of content meaning and correctness
536 | - **Cross-Project Learning**: Accuracy improvements shared across similar projects
537 | 
538 | ### User Experience Improvements
539 | 
540 | - **Accuracy Preferences**: User-configurable accuracy vs. speed trade-offs
541 | - **Domain-Specific Validation**: Specialized validation for different technical domains
542 | - **Real-Time Collaboration**: Team-based accuracy review and improvement workflows
543 | - **Accuracy Analytics**: Detailed insights into content accuracy patterns and trends
544 | 
545 | ### Integration Expansions
546 | 
547 | - **IDE Integration**: Real-time accuracy feedback in development environments
548 | - **CI/CD Integration**: Accuracy validation as part of documentation deployment
549 | - **Documentation Management**: Integration with existing documentation systems
550 | - **Quality Metrics**: Accuracy tracking as part of documentation quality scoring
551 | 
552 | ## References
553 | 
554 | - [ADR-002: Multi-Layered Repository Analysis Engine Design](002-repository-analysis-engine.md)
555 | - [ADR-008: Intelligent Content Population Engine](008-intelligent-content-population-engine.md)
556 | - [Software Verification and Validation](https://en.wikipedia.org/wiki/Software_verification_and_validation)
557 | - [Web Content Accessibility Guidelines](https://www.w3.org/WAI/WCAG21/quickref/)
558 | - [AI Documentation Best Practices](https://developers.google.com/machine-learning/guides/rules-of-ml)
559 | 
```

--------------------------------------------------------------------------------
/src/utils/code-scanner.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { parse } from "@typescript-eslint/typescript-estree";
  2 | import { globby } from "globby";
  3 | import { promises as fs } from "fs";
  4 | import path from "path";
  5 | import {
  6 |   MultiLanguageCodeScanner,
  7 |   LanguageParseResult,
  8 | } from "./language-parsers-simple.js";
  9 | 
 10 | export interface CodeElement {
 11 |   name: string;
 12 |   type:
 13 |     | "function"
 14 |     | "class"
 15 |     | "interface"
 16 |     | "type"
 17 |     | "enum"
 18 |     | "variable"
 19 |     | "export"
 20 |     | "import";
 21 |   filePath: string;
 22 |   line: number;
 23 |   column: number;
 24 |   exported: boolean;
 25 |   isAsync?: boolean;
 26 |   isPrivate?: boolean;
 27 |   hasJSDoc?: boolean;
 28 |   jsDocDescription?: string;
 29 |   parameters?: string[];
 30 |   returnType?: string;
 31 |   decorators?: string[];
 32 | }
 33 | 
 34 | export interface APIEndpoint {
 35 |   method: "GET" | "POST" | "PUT" | "DELETE" | "PATCH" | "ALL";
 36 |   path: string;
 37 |   filePath: string;
 38 |   line: number;
 39 |   handlerName?: string;
 40 |   hasDocumentation?: boolean;
 41 | }
 42 | 
 43 | export interface CodeAnalysisResult {
 44 |   functions: CodeElement[];
 45 |   classes: CodeElement[];
 46 |   interfaces: CodeElement[];
 47 |   types: CodeElement[];
 48 |   enums: CodeElement[];
 49 |   exports: CodeElement[];
 50 |   imports: CodeElement[];
 51 |   apiEndpoints: APIEndpoint[];
 52 |   constants: CodeElement[];
 53 |   variables: CodeElement[];
 54 |   hasTests: boolean;
 55 |   testFiles: string[];
 56 |   configFiles: string[];
 57 |   packageManagers: string[];
 58 |   frameworks: string[];
 59 |   dependencies: string[];
 60 |   devDependencies: string[];
 61 |   supportedLanguages: string[];
 62 | }
 63 | 
 64 | export class CodeScanner {
 65 |   private rootPath: string;
 66 |   private multiLanguageScanner: MultiLanguageCodeScanner;
 67 | 
 68 |   constructor(rootPath: string) {
 69 |     this.rootPath = rootPath;
 70 |     this.multiLanguageScanner = new MultiLanguageCodeScanner();
 71 |   }
 72 | 
 73 |   /**
 74 |    * Performs comprehensive code analysis of the repository.
 75 |    *
 76 |    * Analyzes all source files in the repository to extract code elements including
 77 |    * functions, classes, interfaces, API endpoints, and framework detection. Uses
 78 |    * multi-language parsing to support various programming languages and provides
 79 |    * detailed metadata about each code element including documentation status.
 80 |    *
 81 |    * @returns Promise resolving to comprehensive code analysis results
 82 |    * @returns functions - Array of discovered functions with metadata
 83 |    * @returns classes - Array of discovered classes with metadata
 84 |    * @returns interfaces - Array of discovered interfaces with metadata
 85 |    * @returns apiEndpoints - Array of discovered API endpoints
 86 |    * @returns frameworks - Array of detected frameworks and libraries
 87 |    * @returns hasTests - Boolean indicating if test files are present
 88 |    *
 89 |    * @throws {Error} When repository analysis fails
 90 |    * @throws {Error} When file system access is denied
 91 |    *
 92 |    * @example
 93 |    * ```typescript
 94 |    * const scanner = new CodeScanner("/path/to/repository");
 95 |    * const analysis = await scanner.analyzeRepository();
 96 |    * console.log(`Found ${analysis.functions.length} functions`);
 97 |    * console.log(`Detected frameworks: ${analysis.frameworks.join(', ')}`);
 98 |    * ```
 99 |    *
100 |    * @since 1.0.0
101 |    */
102 |   async analyzeRepository(): Promise<CodeAnalysisResult> {
103 |     const result: CodeAnalysisResult = {
104 |       functions: [],
105 |       classes: [],
106 |       interfaces: [],
107 |       types: [],
108 |       enums: [],
109 |       exports: [],
110 |       imports: [],
111 |       apiEndpoints: [],
112 |       constants: [],
113 |       variables: [],
114 |       hasTests: false,
115 |       testFiles: [],
116 |       configFiles: [],
117 |       packageManagers: [],
118 |       frameworks: [],
119 |       dependencies: [],
120 |       devDependencies: [],
121 |       supportedLanguages: [],
122 |     };
123 | 
124 |     // Find all relevant files (now including Python, Go, YAML, Bash)
125 |     const codeFiles = await this.findCodeFiles();
126 |     const configFiles = await this.findConfigFiles();
127 |     const testFiles = await this.findTestFiles();
128 | 
129 |     result.configFiles = configFiles;
130 |     result.testFiles = testFiles;
131 |     result.hasTests = testFiles.length > 0;
132 |     result.supportedLanguages =
133 |       this.multiLanguageScanner.getSupportedExtensions();
134 | 
135 |     // Analyze package.json for dependencies and frameworks
136 |     await this.analyzePackageJson(result);
137 | 
138 |     // Analyze code files with multi-language support
139 |     for (const filePath of codeFiles) {
140 |       try {
141 |         await this.analyzeFile(filePath, result);
142 |       } catch (error) {
143 |         // Skip files that can't be parsed (e.g., invalid syntax)
144 |         console.warn(`Failed to analyze ${filePath}:`, error);
145 |       }
146 |     }
147 | 
148 |     return result;
149 |   }
150 | 
151 |   private async findCodeFiles(): Promise<string[]> {
152 |     const patterns = [
153 |       // TypeScript/JavaScript
154 |       "**/*.ts",
155 |       "**/*.tsx",
156 |       "**/*.js",
157 |       "**/*.jsx",
158 |       "**/*.mjs",
159 |       "**/*.cjs",
160 |       // Python
161 |       "**/*.py",
162 |       "**/*.pyi",
163 |       "**/*.pyx",
164 |       "**/*.pxd",
165 |       // Go
166 |       "**/*.go",
167 |       // YAML/Config
168 |       "**/*.yml",
169 |       "**/*.yaml",
170 |       // Shell scripts
171 |       "**/*.sh",
172 |       "**/*.bash",
173 |       "**/*.zsh",
174 |       // Exclusions
175 |       "!**/node_modules/**",
176 |       "!**/dist/**",
177 |       "!**/build/**",
178 |       "!**/.git/**",
179 |       "!**/coverage/**",
180 |       "!**/*.min.js",
181 |       "!**/venv/**",
182 |       "!**/__pycache__/**",
183 |       "!**/vendor/**",
184 |     ];
185 | 
186 |     return await globby(patterns, { cwd: this.rootPath, absolute: true });
187 |   }
188 | 
189 |   private async findConfigFiles(): Promise<string[]> {
190 |     const patterns = [
191 |       // JavaScript/Node.js configs
192 |       "package.json",
193 |       "tsconfig.json",
194 |       "jsconfig.json",
195 |       ".eslintrc*",
196 |       "prettier.config.*",
197 |       "webpack.config.*",
198 |       "vite.config.*",
199 |       "rollup.config.*",
200 |       "babel.config.*",
201 |       "next.config.*",
202 |       "nuxt.config.*",
203 |       "vue.config.*",
204 |       "svelte.config.*",
205 |       "tailwind.config.*",
206 |       "jest.config.*",
207 |       "vitest.config.*",
208 |       // Python configs
209 |       "setup.py",
210 |       "setup.cfg",
211 |       "pyproject.toml",
212 |       "requirements*.txt",
213 |       "Pipfile",
214 |       "poetry.lock",
215 |       "tox.ini",
216 |       "pytest.ini",
217 |       ".flake8",
218 |       "mypy.ini",
219 |       // Go configs
220 |       "go.mod",
221 |       "go.sum",
222 |       "Makefile",
223 |       // Container configs
224 |       "dockerfile*",
225 |       "docker-compose*",
226 |       ".dockerignore",
227 |       "Containerfile*",
228 |       // Kubernetes configs
229 |       "k8s/**/*.yml",
230 |       "k8s/**/*.yaml",
231 |       "kubernetes/**/*.yml",
232 |       "kubernetes/**/*.yaml",
233 |       "manifests/**/*.yml",
234 |       "manifests/**/*.yaml",
235 |       // Terraform configs
236 |       "**/*.tf",
237 |       "**/*.tfvars",
238 |       "terraform.tfstate*",
239 |       // CI/CD configs
240 |       ".github/workflows/*.yml",
241 |       ".github/workflows/*.yaml",
242 |       ".gitlab-ci.yml",
243 |       "Jenkinsfile",
244 |       ".circleci/config.yml",
245 |       // Ansible configs
246 |       "ansible.cfg",
247 |       "playbook*.yml",
248 |       "inventory*",
249 |       // Cloud configs
250 |       "serverless.yml",
251 |       "sam.yml",
252 |       "template.yml",
253 |       "cloudformation*.yml",
254 |       "pulumi*.yml",
255 |     ];
256 | 
257 |     return await globby(patterns, {
258 |       cwd: this.rootPath,
259 |       absolute: true,
260 |       caseSensitiveMatch: false,
261 |     });
262 |   }
263 | 
264 |   private async findTestFiles(): Promise<string[]> {
265 |     const patterns = [
266 |       // JavaScript/TypeScript tests
267 |       "**/*.test.ts",
268 |       "**/*.test.tsx",
269 |       "**/*.test.js",
270 |       "**/*.test.jsx",
271 |       "**/*.spec.ts",
272 |       "**/*.spec.tsx",
273 |       "**/*.spec.js",
274 |       "**/*.spec.jsx",
275 |       "**/test/**/*.ts",
276 |       "**/test/**/*.js",
277 |       "**/tests/**/*.ts",
278 |       "**/tests/**/*.js",
279 |       "**/__tests__/**/*.ts",
280 |       "**/__tests__/**/*.js",
281 |       // Python tests
282 |       "**/*_test.py",
283 |       "**/test_*.py",
284 |       "**/tests/**/*.py",
285 |       "**/test/**/*.py",
286 |       // Go tests
287 |       "**/*_test.go",
288 |       // Shell script tests
289 |       "**/test*.sh",
290 |       "**/tests/**/*.sh",
291 |       "!**/node_modules/**",
292 |       "!**/venv/**",
293 |       "!**/vendor/**",
294 |     ];
295 | 
296 |     return await globby(patterns, { cwd: this.rootPath, absolute: true });
297 |   }
298 | 
299 |   private async analyzePackageJson(result: CodeAnalysisResult): Promise<void> {
300 |     try {
301 |       const packageJsonPath = path.join(this.rootPath, "package.json");
302 |       const packageJsonContent = await fs.readFile(packageJsonPath, "utf-8");
303 |       const packageJson = JSON.parse(packageJsonContent);
304 | 
305 |       // Extract dependencies
306 |       if (packageJson.dependencies) {
307 |         result.dependencies = Object.keys(packageJson.dependencies);
308 |       }
309 |       if (packageJson.devDependencies) {
310 |         result.devDependencies = Object.keys(packageJson.devDependencies);
311 |       }
312 | 
313 |       // Detect package managers
314 |       const allDeps = [...result.dependencies, ...result.devDependencies];
315 |       if (allDeps.some((dep) => dep.startsWith("@npm/")))
316 |         result.packageManagers.push("npm");
317 |       if (allDeps.some((dep) => dep.includes("yarn")))
318 |         result.packageManagers.push("yarn");
319 |       if (allDeps.some((dep) => dep.includes("pnpm")))
320 |         result.packageManagers.push("pnpm");
321 | 
322 |       // Check for scripts that might indicate package managers
323 |       if (packageJson.scripts) {
324 |         const scripts = Object.keys(packageJson.scripts).join(" ");
325 |         if (scripts.includes("yarn")) result.packageManagers.push("yarn");
326 |         if (scripts.includes("pnpm")) result.packageManagers.push("pnpm");
327 |       }
328 | 
329 |       // Detect frameworks (expanded for your DevOps/Cloud stack)
330 |       const frameworkMap: Record<string, string[]> = {
331 |         // Web Frameworks
332 |         React: ["react", "@types/react"],
333 |         Vue: ["vue", "@vue/core"],
334 |         Angular: ["@angular/core"],
335 |         Svelte: ["svelte"],
336 |         "Next.js": ["next"],
337 |         Nuxt: ["nuxt"],
338 |         Express: ["express"],
339 |         Fastify: ["fastify"],
340 |         Koa: ["koa"],
341 |         NestJS: ["@nestjs/core"],
342 |         // Build Tools
343 |         Vite: ["vite"],
344 |         Webpack: ["webpack"],
345 |         Rollup: ["rollup"],
346 |         // Testing
347 |         Jest: ["jest"],
348 |         Vitest: ["vitest"],
349 |         Playwright: ["@playwright/test"],
350 |         Cypress: ["cypress"],
351 |         // Cloud/DevOps (Python)
352 |         Flask: ["flask"],
353 |         Django: ["django"],
354 |         FastAPI: ["fastapi"],
355 |         Ansible: ["ansible"],
356 |         Boto3: ["boto3"],
357 |         // Infrastructure
358 |         "AWS CDK": ["aws-cdk-lib", "@aws-cdk/core"],
359 |         Pulumi: ["@pulumi/pulumi"],
360 |         "Terraform CDK": ["cdktf"],
361 |       };
362 | 
363 |       for (const [framework, deps] of Object.entries(frameworkMap)) {
364 |         if (deps.some((dep) => allDeps.includes(dep))) {
365 |           result.frameworks.push(framework);
366 |         }
367 |       }
368 |     } catch (error) {
369 |       // package.json not found or invalid
370 |     }
371 |   }
372 | 
373 |   private async analyzeFile(
374 |     filePath: string,
375 |     result: CodeAnalysisResult,
376 |   ): Promise<void> {
377 |     try {
378 |       const content = await fs.readFile(filePath, "utf-8");
379 |       const relativePath = path.relative(this.rootPath, filePath);
380 |       const extension = path.extname(filePath).slice(1).toLowerCase();
381 | 
382 |       // Try multi-language parsing first
383 |       if (
384 |         this.multiLanguageScanner.getSupportedExtensions().includes(extension)
385 |       ) {
386 |         const parseResult = await this.multiLanguageScanner.parseFile(
387 |           content,
388 |           filePath,
389 |         );
390 |         this.mergeParseResults(parseResult, result);
391 |       }
392 |       // Fall back to TypeScript/JavaScript parsing for .ts/.js files
393 |       else if (["ts", "tsx", "js", "jsx", "mjs", "cjs"].includes(extension)) {
394 |         await this.analyzeTypeScriptFile(content, relativePath, result);
395 |       }
396 |       // Otherwise skip or warn
397 |       else {
398 |         console.warn(`Unsupported file type: ${filePath}`);
399 |       }
400 |     } catch (error) {
401 |       // File parsing failed - could be due to syntax errors or unsupported syntax
402 |       console.warn(`Failed to parse file ${filePath}:`, error);
403 |     }
404 |   }
405 | 
406 |   private async analyzeTypeScriptFile(
407 |     content: string,
408 |     relativePath: string,
409 |     result: CodeAnalysisResult,
410 |   ): Promise<void> {
411 |     try {
412 |       // Parse with TypeScript-ESLint parser
413 |       const ast = parse(content, {
414 |         filePath: relativePath,
415 |         sourceType: "module",
416 |         ecmaVersion: 2022,
417 |         ecmaFeatures: {
418 |           jsx: true,
419 |         },
420 |         comment: true,
421 |         range: true,
422 |         loc: true,
423 |       });
424 | 
425 |       // Traverse AST to extract code elements
426 |       this.traverseAST(ast, result, relativePath);
427 | 
428 |       // Look for API endpoints in the content
429 |       this.findAPIEndpoints(content, result, relativePath);
430 |     } catch (error) {
431 |       throw new Error(
432 |         `TypeScript parsing failed for ${relativePath}: ${error}`,
433 |       );
434 |     }
435 |   }
436 | 
437 |   private mergeParseResults(
438 |     parseResult: LanguageParseResult,
439 |     result: CodeAnalysisResult,
440 |   ): void {
441 |     result.functions.push(...parseResult.functions);
442 |     result.classes.push(...parseResult.classes);
443 |     result.interfaces.push(...parseResult.interfaces);
444 |     result.types.push(...parseResult.types);
445 |     result.enums.push(...parseResult.enums);
446 |     result.exports.push(...parseResult.exports);
447 |     result.imports.push(...parseResult.imports);
448 |     result.apiEndpoints.push(...parseResult.apiEndpoints);
449 |     result.constants.push(...parseResult.constants);
450 |     result.variables.push(...parseResult.variables);
451 |   }
452 | 
453 |   private traverseAST(
454 |     node: any,
455 |     result: CodeAnalysisResult,
456 |     filePath: string,
457 |     isInExport = false,
458 |   ): void {
459 |     if (!node || typeof node !== "object") return;
460 | 
461 |     const line = node.loc?.start?.line || 0;
462 |     const column = node.loc?.start?.column || 0;
463 | 
464 |     switch (node.type) {
465 |       case "FunctionDeclaration":
466 |         if (node.id?.name) {
467 |           result.functions.push({
468 |             name: node.id.name,
469 |             type: "function",
470 |             filePath,
471 |             line,
472 |             column,
473 |             exported: isInExport || this.isExported(node),
474 |             isAsync: node.async,
475 |             hasJSDoc: this.hasJSDoc(node),
476 |             jsDocDescription: this.getJSDocDescription(node),
477 |             parameters: node.params?.map((p: any) => p.name || "param") || [],
478 |           });
479 |         }
480 |         break;
481 | 
482 |       case "ClassDeclaration":
483 |         if (node.id?.name) {
484 |           result.classes.push({
485 |             name: node.id.name,
486 |             type: "class",
487 |             filePath,
488 |             line,
489 |             column,
490 |             exported: isInExport || this.isExported(node),
491 |             hasJSDoc: this.hasJSDoc(node),
492 |             jsDocDescription: this.getJSDocDescription(node),
493 |             decorators:
494 |               node.decorators?.map(
495 |                 (d: any) => d.expression?.name || "decorator",
496 |               ) || [],
497 |           });
498 |         }
499 |         break;
500 | 
501 |       case "TSInterfaceDeclaration":
502 |         if (node.id?.name) {
503 |           result.interfaces.push({
504 |             name: node.id.name,
505 |             type: "interface",
506 |             filePath,
507 |             line,
508 |             column,
509 |             exported: isInExport || this.isExported(node),
510 |             hasJSDoc: this.hasJSDoc(node),
511 |             jsDocDescription: this.getJSDocDescription(node),
512 |           });
513 |         }
514 |         break;
515 | 
516 |       case "TSTypeAliasDeclaration":
517 |         if (node.id?.name) {
518 |           result.types.push({
519 |             name: node.id.name,
520 |             type: "type",
521 |             filePath,
522 |             line,
523 |             column,
524 |             exported: isInExport || this.isExported(node),
525 |             hasJSDoc: this.hasJSDoc(node),
526 |             jsDocDescription: this.getJSDocDescription(node),
527 |           });
528 |         }
529 |         break;
530 | 
531 |       case "TSEnumDeclaration":
532 |         if (node.id?.name) {
533 |           result.enums.push({
534 |             name: node.id.name,
535 |             type: "enum",
536 |             filePath,
537 |             line,
538 |             column,
539 |             exported: isInExport || this.isExported(node),
540 |             hasJSDoc: this.hasJSDoc(node),
541 |             jsDocDescription: this.getJSDocDescription(node),
542 |           });
543 |         }
544 |         break;
545 | 
546 |       case "ExportNamedDeclaration":
547 |       case "ExportDefaultDeclaration":
548 |         // Handle exports
549 |         if (node.declaration) {
550 |           this.traverseAST(node.declaration, result, filePath, true);
551 |         }
552 |         if (node.specifiers) {
553 |           node.specifiers.forEach((spec: any) => {
554 |             if (spec.exported?.name) {
555 |               result.exports.push({
556 |                 name: spec.exported.name,
557 |                 type: "export",
558 |                 filePath,
559 |                 line,
560 |                 column,
561 |                 exported: true,
562 |               });
563 |             }
564 |           });
565 |         }
566 |         break;
567 | 
568 |       case "ImportDeclaration":
569 |         if (node.source?.value) {
570 |           result.imports.push({
571 |             name: node.source.value,
572 |             type: "import",
573 |             filePath,
574 |             line,
575 |             column,
576 |             exported: false,
577 |           });
578 |         }
579 |         break;
580 |     }
581 | 
582 |     // Recursively traverse child nodes
583 |     for (const key in node) {
584 |       if (key === "parent" || key === "loc" || key === "range") continue;
585 |       const child = node[key];
586 |       if (Array.isArray(child)) {
587 |         child.forEach((c) => this.traverseAST(c, result, filePath, isInExport));
588 |       } else if (child && typeof child === "object") {
589 |         this.traverseAST(child, result, filePath, isInExport);
590 |       }
591 |     }
592 |   }
593 | 
594 |   private findAPIEndpoints(
595 |     content: string,
596 |     result: CodeAnalysisResult,
597 |     filePath: string,
598 |   ): void {
599 |     // Common patterns for API endpoints
600 |     const patterns = [
601 |       // Express/Fastify/Koa patterns
602 |       /\.(get|post|put|delete|patch|all)\s*\(\s*['"`]([^'"`]+)['"`]/gi,
603 |       // Router patterns
604 |       /router\.(get|post|put|delete|patch|all)\s*\(\s*['"`]([^'"`]+)['"`]/gi,
605 |       // App patterns
606 |       /app\.(get|post|put|delete|patch|all)\s*\(\s*['"`]([^'"`]+)['"`]/gi,
607 |     ];
608 | 
609 |     const lines = content.split("\n");
610 | 
611 |     patterns.forEach((pattern) => {
612 |       let match;
613 |       while ((match = pattern.exec(content)) !== null) {
614 |         const method = match[1].toUpperCase() as APIEndpoint["method"];
615 |         const path = match[2];
616 | 
617 |         // Find line number
618 |         let line = 1;
619 |         let pos = 0;
620 |         for (let i = 0; i < lines.length; i++) {
621 |           if (pos + lines[i].length >= match.index!) {
622 |             line = i + 1;
623 |             break;
624 |           }
625 |           pos += lines[i].length + 1; // +1 for newline
626 |         }
627 | 
628 |         result.apiEndpoints.push({
629 |           method,
630 |           path,
631 |           filePath,
632 |           line,
633 |           hasDocumentation: this.hasEndpointDocumentation(
634 |             content,
635 |             match.index!,
636 |           ),
637 |         });
638 |       }
639 |     });
640 |   }
641 | 
642 |   private isExported(node: any): boolean {
643 |     // Check if node is part of an export declaration
644 |     return (
645 |       node.parent?.type === "ExportNamedDeclaration" ||
646 |       node.parent?.type === "ExportDefaultDeclaration"
647 |     );
648 |   }
649 | 
650 |   private hasJSDoc(node: any): boolean {
651 |     return (
652 |       node.comments?.some(
653 |         (comment: any) =>
654 |           comment.type === "Block" && comment.value.startsWith("*"),
655 |       ) || false
656 |     );
657 |   }
658 | 
659 |   private getJSDocDescription(node: any): string | undefined {
660 |     const jsDocComment = node.comments?.find(
661 |       (comment: any) =>
662 |         comment.type === "Block" && comment.value.startsWith("*"),
663 |     );
664 | 
665 |     if (jsDocComment) {
666 |       // Extract first line of JSDoc as description
667 |       const lines = jsDocComment.value.split("\n");
668 |       for (const line of lines) {
669 |         const cleaned = line.replace(/^\s*\*\s?/, "").trim();
670 |         if (cleaned && !cleaned.startsWith("@")) {
671 |           return cleaned;
672 |         }
673 |       }
674 |     }
675 | 
676 |     return undefined;
677 |   }
678 | 
679 |   private hasEndpointDocumentation(
680 |     content: string,
681 |     matchIndex: number,
682 |   ): boolean {
683 |     // Look for JSDoc or comments before the endpoint
684 |     const beforeMatch = content.substring(0, matchIndex);
685 |     const lines = beforeMatch.split("\n");
686 | 
687 |     // Check last few lines for documentation
688 |     for (let i = Math.max(0, lines.length - 5); i < lines.length; i++) {
689 |       const line = lines[i].trim();
690 |       if (
691 |         line.startsWith("/**") ||
692 |         line.startsWith("/*") ||
693 |         line.startsWith("//")
694 |       ) {
695 |         return true;
696 |       }
697 |     }
698 | 
699 |     return false;
700 |   }
701 | }
702 | 
```

--------------------------------------------------------------------------------
/tests/edge-cases/error-handling.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // Edge cases and error handling tests
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import os from "os";
  5 | import { analyzeRepository } from "../../src/tools/analyze-repository";
  6 | import { recommendSSG } from "../../src/tools/recommend-ssg";
  7 | import { generateConfig } from "../../src/tools/generate-config";
  8 | import { setupStructure } from "../../src/tools/setup-structure";
  9 | import { deployPages } from "../../src/tools/deploy-pages";
 10 | import { verifyDeployment } from "../../src/tools/verify-deployment";
 11 | 
 12 | describe("Edge Cases and Error Handling", () => {
 13 |   let tempDir: string;
 14 | 
 15 |   beforeAll(async () => {
 16 |     tempDir = path.join(os.tmpdir(), "documcp-edge-case-tests");
 17 |     await fs.mkdir(tempDir, { recursive: true });
 18 |   });
 19 | 
 20 |   afterAll(async () => {
 21 |     try {
 22 |       await fs.rm(tempDir, { recursive: true, force: true });
 23 |     } catch (error) {
 24 |       console.warn("Failed to cleanup edge case test directory:", error);
 25 |     }
 26 |   });
 27 | 
 28 |   describe("Input Validation Edge Cases", () => {
 29 |     it("should handle null and undefined inputs gracefully", async () => {
 30 |       // Test analyze_repository with invalid inputs
 31 |       const invalidInputs = [
 32 |         null,
 33 |         undefined,
 34 |         {},
 35 |         { path: null },
 36 |         { path: undefined },
 37 |         { path: "", depth: "invalid" },
 38 |       ];
 39 | 
 40 |       for (const input of invalidInputs) {
 41 |         try {
 42 |           const result = await analyzeRepository(input as any);
 43 |           expect((result as any).isError).toBe(true);
 44 |         } catch (error) {
 45 |           // Catching errors is also acceptable for invalid inputs
 46 |           expect(error).toBeDefined();
 47 |         }
 48 |       }
 49 |     });
 50 | 
 51 |     it("should validate SSG enum values strictly", async () => {
 52 |       const invalidSSGs = [
 53 |         "react",
 54 |         "vue",
 55 |         "angular",
 56 |         "gatsby",
 57 |         "",
 58 |         null,
 59 |         undefined,
 60 |       ];
 61 | 
 62 |       for (const invalidSSG of invalidSSGs) {
 63 |         try {
 64 |           const result = await generateConfig({
 65 |             ssg: invalidSSG as any,
 66 |             projectName: "Test",
 67 |             outputPath: tempDir,
 68 |           });
 69 |           expect((result as any).isError).toBe(true);
 70 |         } catch (error) {
 71 |           expect(error).toBeDefined();
 72 |         }
 73 |       }
 74 |     });
 75 | 
 76 |     it("should handle extremely long input strings", async () => {
 77 |       const longString = "a".repeat(10000);
 78 | 
 79 |       const result = await generateConfig({
 80 |         ssg: "docusaurus",
 81 |         projectName: longString,
 82 |         projectDescription: longString,
 83 |         outputPath: path.join(tempDir, "long-strings"),
 84 |       });
 85 | 
 86 |       // Should handle long strings without crashing
 87 |       expect(result.content).toBeDefined();
 88 |     });
 89 | 
 90 |     it("should handle special characters in project names", async () => {
 91 |       const specialNames = [
 92 |         "Project with spaces",
 93 |         "Project-with-hyphens",
 94 |         "Project_with_underscores",
 95 |         "Project.with.dots",
 96 |         "Project@with#special$chars",
 97 |         "Проект на русском",
 98 |         "项目中文名",
 99 |         "プロジェクト日本語",
100 |       ];
101 | 
102 |       for (const name of specialNames) {
103 |         const outputDir = path.join(
104 |           tempDir,
105 |           "special-chars",
106 |           Buffer.from(name).toString("hex"),
107 |         );
108 |         await fs.mkdir(outputDir, { recursive: true });
109 | 
110 |         const result = await generateConfig({
111 |           ssg: "docusaurus",
112 |           projectName: name,
113 |           outputPath: outputDir,
114 |         });
115 | 
116 |         expect(result.content).toBeDefined();
117 |         expect((result as any).isError).toBeFalsy();
118 |       }
119 |     });
120 |   });
121 | 
122 |   describe("File System Edge Cases", () => {
123 |     it("should handle permission-denied scenarios", async () => {
124 |       if (process.platform === "win32") {
125 |         // Skip on Windows as permission handling is different
126 |         return;
127 |       }
128 | 
129 |       // Create a directory with restricted permissions
130 |       const restrictedDir = path.join(tempDir, "no-permissions");
131 |       await fs.mkdir(restrictedDir, { recursive: true });
132 | 
133 |       try {
134 |         await fs.chmod(restrictedDir, 0o000);
135 | 
136 |         const result = await analyzeRepository({
137 |           path: restrictedDir,
138 |           depth: "standard",
139 |         });
140 | 
141 |         expect((result as any).isError).toBe(true);
142 |       } finally {
143 |         // Restore permissions for cleanup
144 |         await fs.chmod(restrictedDir, 0o755);
145 |       }
146 |     });
147 | 
148 |     it("should handle symlinks and circular references", async () => {
149 |       const symlinkTest = path.join(tempDir, "symlink-test");
150 |       await fs.mkdir(symlinkTest, { recursive: true });
151 | 
152 |       // Create a file
153 |       const originalFile = path.join(symlinkTest, "original.txt");
154 |       await fs.writeFile(originalFile, "original content");
155 | 
156 |       // Create symlink
157 |       const symlinkFile = path.join(symlinkTest, "link.txt");
158 |       try {
159 |         await fs.symlink(originalFile, symlinkFile);
160 | 
161 |         const result = await analyzeRepository({
162 |           path: symlinkTest,
163 |           depth: "standard",
164 |         });
165 | 
166 |         expect(result.content).toBeDefined();
167 |         expect((result as any).isError).toBeFalsy();
168 |       } catch (error) {
169 |         // Symlinks might not be supported on all systems
170 |         console.warn("Symlink test skipped:", error);
171 |       }
172 |     });
173 | 
174 |     it("should handle very deep directory structures", async () => {
175 |       const deepTest = path.join(tempDir, "deep-structure");
176 |       let currentPath = deepTest;
177 | 
178 |       // Create 20 levels deep structure
179 |       for (let i = 0; i < 20; i++) {
180 |         currentPath = path.join(currentPath, `level-${i}`);
181 |         await fs.mkdir(currentPath, { recursive: true });
182 |         await fs.writeFile(
183 |           path.join(currentPath, `file-${i}.txt`),
184 |           `Content ${i}`,
185 |         );
186 |       }
187 | 
188 |       const result = await analyzeRepository({
189 |         path: deepTest,
190 |         depth: "deep",
191 |       });
192 | 
193 |       expect(result.content).toBeDefined();
194 |       expect((result as any).isError).toBeFalsy();
195 |     });
196 | 
197 |     it("should handle files with unusual extensions", async () => {
198 |       const unusualFiles = path.join(tempDir, "unusual-files");
199 |       await fs.mkdir(unusualFiles, { recursive: true });
200 | 
201 |       const unusualExtensions = [
202 |         "file.xyz",
203 |         "file.123",
204 |         "file.",
205 |         ".hidden",
206 |         "no-extension",
207 |         "file..double.dot",
208 |         "file with spaces.txt",
209 |         "file-with-émojis-🚀.md",
210 |       ];
211 | 
212 |       for (const filename of unusualExtensions) {
213 |         await fs.writeFile(path.join(unusualFiles, filename), "test content");
214 |       }
215 | 
216 |       const result = await analyzeRepository({
217 |         path: unusualFiles,
218 |         depth: "standard",
219 |       });
220 | 
221 |       expect(result.content).toBeDefined();
222 |       expect((result as any).isError).toBeFalsy();
223 | 
224 |       // Should count all files (excluding hidden files that start with .)
225 |       const analysisData = JSON.parse(
226 |         result.content.find((c) => c.text.includes('"totalFiles"'))!.text,
227 |       );
228 |       // The analyze function filters out .hidden files, so we expect 7 files instead of 8
229 |       expect(analysisData.structure.totalFiles).toBe(7); // 8 files minus .hidden
230 |     });
231 | 
232 |     it("should handle binary files gracefully", async () => {
233 |       const binaryTest = path.join(tempDir, "binary-files");
234 |       await fs.mkdir(binaryTest, { recursive: true });
235 | 
236 |       // Create binary-like files
237 |       const binaryData = Buffer.from([0x00, 0x01, 0x02, 0xff, 0xfe, 0xfd]);
238 |       await fs.writeFile(path.join(binaryTest, "binary.bin"), binaryData);
239 |       await fs.writeFile(path.join(binaryTest, "image.png"), binaryData);
240 |       await fs.writeFile(path.join(binaryTest, "archive.zip"), binaryData);
241 | 
242 |       const result = await analyzeRepository({
243 |         path: binaryTest,
244 |         depth: "standard",
245 |       });
246 | 
247 |       expect(result.content).toBeDefined();
248 |       expect((result as any).isError).toBeFalsy();
249 |     });
250 |   });
251 | 
252 |   describe("Memory and Performance Edge Cases", () => {
253 |     it("should handle repositories with many small files", async () => {
254 |       const manyFilesTest = path.join(tempDir, "many-small-files");
255 |       await fs.mkdir(manyFilesTest, { recursive: true });
256 | 
257 |       // Create 500 small files
258 |       for (let i = 0; i < 500; i++) {
259 |         await fs.writeFile(
260 |           path.join(manyFilesTest, `small-${i}.txt`),
261 |           `content ${i}`,
262 |         );
263 |       }
264 | 
265 |       const startTime = Date.now();
266 |       const result = await analyzeRepository({
267 |         path: manyFilesTest,
268 |         depth: "standard",
269 |       });
270 |       const executionTime = Date.now() - startTime;
271 | 
272 |       expect(result.content).toBeDefined();
273 |       expect((result as any).isError).toBeFalsy();
274 |       expect(executionTime).toBeLessThan(10000); // Should complete within 10 seconds
275 |     });
276 | 
277 |     it("should handle repositories with very large files", async () => {
278 |       const largeFilesTest = path.join(tempDir, "large-files");
279 |       await fs.mkdir(largeFilesTest, { recursive: true });
280 | 
281 |       // Create large files (1MB each)
282 |       const largeContent = "x".repeat(1024 * 1024);
283 |       await fs.writeFile(path.join(largeFilesTest, "large1.txt"), largeContent);
284 |       await fs.writeFile(path.join(largeFilesTest, "large2.log"), largeContent);
285 | 
286 |       const result = await analyzeRepository({
287 |         path: largeFilesTest,
288 |         depth: "quick", // Use quick to avoid timeout
289 |       });
290 | 
291 |       expect(result.content).toBeDefined();
292 |       expect((result as any).isError).toBeFalsy();
293 |     });
294 | 
295 |     it("should handle concurrent tool executions", async () => {
296 |       const concurrentTest = path.join(tempDir, "concurrent-test");
297 |       await fs.mkdir(concurrentTest, { recursive: true });
298 |       await fs.writeFile(
299 |         path.join(concurrentTest, "test.js"),
300 |         'console.log("test");',
301 |       );
302 |       await fs.writeFile(path.join(concurrentTest, "README.md"), "# Test");
303 | 
304 |       // Run multiple analyses concurrently
305 |       const promises = Array.from({ length: 5 }, () =>
306 |         analyzeRepository({
307 |           path: concurrentTest,
308 |           depth: "quick",
309 |         }),
310 |       );
311 | 
312 |       const results = await Promise.all(promises);
313 | 
314 |       results.forEach((result) => {
315 |         expect(result.content).toBeDefined();
316 |         expect((result as any).isError).toBeFalsy();
317 |       });
318 |     });
319 |   });
320 | 
321 |   describe("Configuration Edge Cases", () => {
322 |     it("should handle output paths with special characters", async () => {
323 |       const specialPaths = [
324 |         path.join(tempDir, "path with spaces"),
325 |         path.join(tempDir, "path-with-hyphens"),
326 |         path.join(tempDir, "path_with_underscores"),
327 |         path.join(tempDir, "path.with.dots"),
328 |       ];
329 | 
330 |       for (const specialPath of specialPaths) {
331 |         const result = await generateConfig({
332 |           ssg: "docusaurus",
333 |           projectName: "Special Path Test",
334 |           outputPath: specialPath,
335 |         });
336 | 
337 |         expect(result.content).toBeDefined();
338 |         expect((result as any).isError).toBeFalsy();
339 | 
340 |         // Verify files were actually created
341 |         const files = await fs.readdir(specialPath);
342 |         expect(files.length).toBeGreaterThan(0);
343 |       }
344 |     });
345 | 
346 |     it("should handle nested output directory creation", async () => {
347 |       const nestedPath = path.join(
348 |         tempDir,
349 |         "deeply",
350 |         "nested",
351 |         "output",
352 |         "directory",
353 |       );
354 | 
355 |       const result = await generateConfig({
356 |         ssg: "mkdocs",
357 |         projectName: "Nested Test",
358 |         outputPath: nestedPath,
359 |       });
360 | 
361 |       expect(result.content).toBeDefined();
362 |       expect((result as any).isError).toBeFalsy();
363 | 
364 |       // Verify nested directories were created
365 |       expect(
366 |         await fs
367 |           .access(nestedPath)
368 |           .then(() => true)
369 |           .catch(() => false),
370 |       ).toBe(true);
371 |     });
372 | 
373 |     it("should handle existing files without overwriting destructively", async () => {
374 |       const existingFiles = path.join(tempDir, "existing-files");
375 |       await fs.mkdir(existingFiles, { recursive: true });
376 | 
377 |       // Create existing file
378 |       const existingContent =
379 |         "This is existing content that should not be lost";
380 |       await fs.writeFile(
381 |         path.join(existingFiles, "important.txt"),
382 |         existingContent,
383 |       );
384 | 
385 |       const result = await generateConfig({
386 |         ssg: "docusaurus",
387 |         projectName: "Existing Files Test",
388 |         outputPath: existingFiles,
389 |       });
390 | 
391 |       expect(result.content).toBeDefined();
392 |       expect((result as any).isError).toBeFalsy();
393 | 
394 |       // Verify our important file still exists
395 |       expect(
396 |         await fs
397 |           .access(path.join(existingFiles, "important.txt"))
398 |           .then(() => true)
399 |           .catch(() => false),
400 |       ).toBe(true);
401 |     });
402 |   });
403 | 
404 |   describe("Deployment Edge Cases", () => {
405 |     it("should handle repositories with existing workflows", async () => {
406 |       const existingWorkflow = path.join(tempDir, "existing-workflow");
407 |       await fs.mkdir(path.join(existingWorkflow, ".github", "workflows"), {
408 |         recursive: true,
409 |       });
410 | 
411 |       // Create existing workflow
412 |       await fs.writeFile(
413 |         path.join(existingWorkflow, ".github", "workflows", "existing.yml"),
414 |         "name: Existing Workflow\non: push",
415 |       );
416 | 
417 |       const result = await deployPages({
418 |         repository: existingWorkflow,
419 |         ssg: "docusaurus",
420 |       });
421 | 
422 |       expect(result.content).toBeDefined();
423 |       expect((result as any).isError).toBeFalsy();
424 | 
425 |       // Both workflows should exist
426 |       const workflows = await fs.readdir(
427 |         path.join(existingWorkflow, ".github", "workflows"),
428 |       );
429 |       expect(workflows).toContain("existing.yml");
430 |       expect(workflows).toContain("deploy-docs.yml");
431 |     });
432 | 
433 |     it("should handle custom domain validation", async () => {
434 |       const customDomains = [
435 |         "docs.example.com",
436 |         "my-docs.github.io",
437 |         "documentation.mycompany.org",
438 |         "subdomain.example.co.uk",
439 |       ];
440 | 
441 |       for (const domain of customDomains) {
442 |         const domainTest = path.join(
443 |           tempDir,
444 |           "domain-test",
445 |           domain.replace(/[^a-z0-9]/gi, "-"),
446 |         );
447 | 
448 |         const result = await deployPages({
449 |           repository: domainTest,
450 |           ssg: "jekyll",
451 |           customDomain: domain,
452 |         });
453 | 
454 |         expect(result.content).toBeDefined();
455 |         expect((result as any).isError).toBeFalsy();
456 | 
457 |         // Verify CNAME file
458 |         const cnameContent = await fs.readFile(
459 |           path.join(domainTest, "CNAME"),
460 |           "utf-8",
461 |         );
462 |         expect(cnameContent.trim()).toBe(domain);
463 |       }
464 |     });
465 | 
466 |     it("should handle repository URL variations", async () => {
467 |       const urlVariations = [
468 |         "https://github.com/user/repo",
469 |         "https://github.com/user/repo.git",
470 |         "[email protected]:user/repo.git",
471 |         "/absolute/local/path",
472 |         "./relative/path",
473 |         ".",
474 |       ];
475 | 
476 |       for (const repo of urlVariations) {
477 |         const result = await verifyDeployment({
478 |           repository: repo,
479 |         });
480 | 
481 |         expect(result.content).toBeDefined();
482 |         expect((result as any).isError).toBeFalsy();
483 |       }
484 |     });
485 |   });
486 | 
487 |   describe("Unicode and Internationalization", () => {
488 |     it("should handle Unicode file names and content", async () => {
489 |       const unicodeTest = path.join(tempDir, "unicode-test");
490 |       await fs.mkdir(unicodeTest, { recursive: true });
491 | 
492 |       const unicodeFiles = [
493 |         { name: "中文文件.md", content: "# 中文标题\n这是中文内容。" },
494 |         { name: "русский.txt", content: "Привет мир!" },
495 |         {
496 |           name: "日本語.js",
497 |           content: '// 日本語のコメント\nconsole.log("こんにちは");',
498 |         },
499 |         {
500 |           name: "émojis-🚀-test.py",
501 |           content: '# -*- coding: utf-8 -*-\nprint("🚀 Unicode test")',
502 |         },
503 |       ];
504 | 
505 |       for (const file of unicodeFiles) {
506 |         await fs.writeFile(
507 |           path.join(unicodeTest, file.name),
508 |           file.content,
509 |           "utf8",
510 |         );
511 |       }
512 | 
513 |       const result = await analyzeRepository({
514 |         path: unicodeTest,
515 |         depth: "standard",
516 |       });
517 | 
518 |       expect(result.content).toBeDefined();
519 |       expect((result as any).isError).toBeFalsy();
520 | 
521 |       const analysisData = JSON.parse(
522 |         result.content.find((c) => c.text.includes('"totalFiles"'))!.text,
523 |       );
524 |       expect(analysisData.structure.totalFiles).toBe(unicodeFiles.length); // No README created in this test
525 |     });
526 | 
527 |     it("should handle different line ending styles", async () => {
528 |       const lineEndingTest = path.join(tempDir, "line-ending-test");
529 |       await fs.mkdir(lineEndingTest, { recursive: true });
530 | 
531 |       // Create files with different line endings
532 |       await fs.writeFile(
533 |         path.join(lineEndingTest, "unix.txt"),
534 |         "line1\nline2\nline3\n",
535 |       );
536 |       await fs.writeFile(
537 |         path.join(lineEndingTest, "windows.txt"),
538 |         "line1\r\nline2\r\nline3\r\n",
539 |       );
540 |       await fs.writeFile(
541 |         path.join(lineEndingTest, "mac.txt"),
542 |         "line1\rline2\rline3\r",
543 |       );
544 |       await fs.writeFile(
545 |         path.join(lineEndingTest, "mixed.txt"),
546 |         "line1\nline2\r\nline3\rline4\n",
547 |       );
548 | 
549 |       const result = await analyzeRepository({
550 |         path: lineEndingTest,
551 |         depth: "standard",
552 |       });
553 | 
554 |       expect(result.content).toBeDefined();
555 |       expect((result as any).isError).toBeFalsy();
556 |     });
557 |   });
558 | 
559 |   describe("Recovery and Resilience", () => {
560 |     it("should recover from partial failures gracefully", async () => {
561 |       const partialFailure = path.join(tempDir, "partial-failure");
562 |       await fs.mkdir(partialFailure, { recursive: true });
563 | 
564 |       // Create some valid files
565 |       await fs.writeFile(
566 |         path.join(partialFailure, "valid.js"),
567 |         'console.log("valid");',
568 |       );
569 |       await fs.writeFile(
570 |         path.join(partialFailure, "package.json"),
571 |         '{"name": "test"}',
572 |       );
573 | 
574 |       // Create some problematic scenarios
575 |       await fs.mkdir(path.join(partialFailure, "empty-dir"));
576 | 
577 |       const result = await analyzeRepository({
578 |         path: partialFailure,
579 |         depth: "standard",
580 |       });
581 | 
582 |       expect(result.content).toBeDefined();
583 |       expect((result as any).isError).toBeFalsy();
584 | 
585 |       // Should still provide useful analysis despite issues
586 |       const analysisData = JSON.parse(
587 |         result.content.find((c) => c.text.includes('"ecosystem"'))!.text,
588 |       );
589 |       expect(analysisData.dependencies.ecosystem).toBe("javascript");
590 |     });
591 | 
592 |     it("should provide meaningful error messages", async () => {
593 |       const result = await analyzeRepository({
594 |         path: "/absolutely/does/not/exist/anywhere",
595 |         depth: "standard",
596 |       });
597 | 
598 |       expect((result as any).isError).toBe(true);
599 |       const errorText = result.content.map((c) => c.text).join(" ");
600 | 
601 |       // Error message should be helpful
602 |       expect(errorText.toLowerCase()).toContain("error");
603 |       expect(errorText.toLowerCase()).toMatch(
604 |         /resolution|solution|fix|check|ensure/,
605 |       );
606 |     });
607 | 
608 |     it("should handle timeout scenarios gracefully", async () => {
609 |       // This test verifies that long-running operations don't hang indefinitely
610 |       const longOperation = analyzeRepository({
611 |         path: tempDir, // Large temp directory
612 |         depth: "deep",
613 |       });
614 | 
615 |       // Set a reasonable timeout with proper cleanup
616 |       let timeoutId: NodeJS.Timeout | undefined;
617 |       const timeoutPromise = new Promise((_, reject) => {
618 |         timeoutId = setTimeout(
619 |           () => reject(new Error("Operation timed out")),
620 |           30000,
621 |         ); // 30 seconds
622 |       });
623 | 
624 |       try {
625 |         await Promise.race([longOperation, timeoutPromise]);
626 |       } catch (error) {
627 |         if ((error as Error).message === "Operation timed out") {
628 |           console.warn(
629 |             "Long operation test timed out - this is expected behavior",
630 |           );
631 |         } else {
632 |           throw error;
633 |         }
634 |       } finally {
635 |         // Clean up the timeout to prevent Jest hanging
636 |         if (timeoutId) {
637 |           clearTimeout(timeoutId);
638 |         }
639 |       }
640 |     });
641 |   });
642 | });
643 | 
```

--------------------------------------------------------------------------------
/src/utils/content-extractor.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { promises as fs } from "fs";
  2 | import path from "path";
  3 | 
  4 | export interface ExtractedContent {
  5 |   readme?: {
  6 |     content: string;
  7 |     sections: Array<{
  8 |       title: string;
  9 |       content: string;
 10 |       level: number;
 11 |     }>;
 12 |   };
 13 |   existingDocs: Array<{
 14 |     path: string;
 15 |     title: string;
 16 |     content: string;
 17 |     category?: "tutorial" | "how-to" | "reference" | "explanation";
 18 |   }>;
 19 |   adrs: Array<{
 20 |     number: string;
 21 |     title: string;
 22 |     status: string;
 23 |     content: string;
 24 |     decision: string;
 25 |     consequences: string;
 26 |   }>;
 27 |   codeExamples: Array<{
 28 |     file: string;
 29 |     description: string;
 30 |     code: string;
 31 |     language: string;
 32 |   }>;
 33 |   apiDocs: Array<{
 34 |     endpoint?: string;
 35 |     function?: string;
 36 |     description: string;
 37 |     parameters: Array<{ name: string; type: string; description: string }>;
 38 |     returns?: string;
 39 |   }>;
 40 | }
 41 | 
 42 | /**
 43 |  * Extracts comprehensive content from a repository for documentation generation.
 44 |  *
 45 |  * Performs multi-layered content extraction including README analysis, existing
 46 |  * documentation discovery, ADR (Architectural Decision Record) parsing, code
 47 |  * examples identification, and API documentation extraction. This function is
 48 |  * the foundation for intelligent content population and documentation generation.
 49 |  *
 50 |  * @param repoPath - The file system path to the repository to extract content from
 51 |  *
 52 |  * @returns Promise resolving to comprehensive extracted content
 53 |  * @returns readme - README content with structured sections
 54 |  * @returns existingDocs - Array of existing documentation files found
 55 |  * @returns adrs - Array of Architectural Decision Records
 56 |  * @returns codeExamples - Array of code examples with descriptions
 57 |  * @returns apiDocs - Array of API documentation extracted from JSDoc comments
 58 |  *
 59 |  * @throws {Error} When repository path is inaccessible
 60 |  * @throws {Error} When content extraction fails
 61 |  *
 62 |  * @example
 63 |  * ```typescript
 64 |  * const content = await extractRepositoryContent("/path/to/repository");
 65 |  * console.log(`Found ${content.existingDocs.length} existing docs`);
 66 |  * console.log(`Extracted ${content.codeExamples.length} code examples`);
 67 |  * ```
 68 |  *
 69 |  * @since 1.0.0
 70 |  */
 71 | export async function extractRepositoryContent(
 72 |   repoPath: string,
 73 | ): Promise<ExtractedContent> {
 74 |   const content: ExtractedContent = {
 75 |     existingDocs: [],
 76 |     adrs: [],
 77 |     codeExamples: [],
 78 |     apiDocs: [],
 79 |   };
 80 | 
 81 |   // Extract README content
 82 |   content.readme = await extractReadme(repoPath);
 83 | 
 84 |   // Extract existing documentation
 85 |   content.existingDocs = await extractExistingDocs(repoPath);
 86 | 
 87 |   // Extract ADRs
 88 |   content.adrs = await extractADRs(repoPath);
 89 | 
 90 |   // Extract code examples from main source files
 91 |   content.codeExamples = await extractCodeExamples(repoPath);
 92 | 
 93 |   // Extract API documentation from code comments
 94 |   content.apiDocs = await extractAPIDocs(repoPath);
 95 | 
 96 |   return content;
 97 | }
 98 | 
 99 | async function extractReadme(
100 |   repoPath: string,
101 | ): Promise<ExtractedContent["readme"] | undefined> {
102 |   const readmeFiles = ["README.md", "readme.md", "Readme.md"];
103 | 
104 |   for (const filename of readmeFiles) {
105 |     try {
106 |       const readmePath = path.join(repoPath, filename);
107 |       const content = await fs.readFile(readmePath, "utf-8");
108 | 
109 |       const sections = parseMarkdownSections(content);
110 | 
111 |       return { content, sections };
112 |     } catch {
113 |       // Continue to next potential README file
114 |     }
115 |   }
116 | 
117 |   return undefined;
118 | }
119 | 
120 | async function extractExistingDocs(
121 |   repoPath: string,
122 | ): Promise<ExtractedContent["existingDocs"]> {
123 |   const docs: ExtractedContent["existingDocs"] = [];
124 |   const docDirs = ["docs", "documentation", "doc"];
125 | 
126 |   for (const dir of docDirs) {
127 |     try {
128 |       const docsPath = path.join(repoPath, dir);
129 |       await extractDocsFromDir(docsPath, docs, "");
130 |     } catch {
131 |       // Directory doesn't exist, continue
132 |     }
133 |   }
134 | 
135 |   return docs;
136 | }
137 | 
138 | async function extractDocsFromDir(
139 |   dirPath: string,
140 |   docs: ExtractedContent["existingDocs"],
141 |   relativePath: string,
142 | ): Promise<void> {
143 |   const entries = await fs.readdir(dirPath, { withFileTypes: true });
144 | 
145 |   for (const entry of entries) {
146 |     const fullPath = path.join(dirPath, entry.name);
147 |     const relPath = path.join(relativePath, entry.name);
148 | 
149 |     if (entry.isDirectory() && !entry.name.startsWith(".")) {
150 |       await extractDocsFromDir(fullPath, docs, relPath);
151 |     } else if (
152 |       entry.isFile() &&
153 |       (entry.name.endsWith(".md") || entry.name.endsWith(".mdx"))
154 |     ) {
155 |       try {
156 |         const content = await fs.readFile(fullPath, "utf-8");
157 |         const title = extractTitle(content, entry.name);
158 |         const category = categorizeDocument(content, relPath);
159 | 
160 |         docs.push({
161 |           path: relPath,
162 |           title,
163 |           content,
164 |           category,
165 |         });
166 |       } catch {
167 |         // Skip files that can't be read
168 |       }
169 |     }
170 |   }
171 | }
172 | 
173 | async function extractADRs(
174 |   repoPath: string,
175 | ): Promise<ExtractedContent["adrs"]> {
176 |   const adrs: ExtractedContent["adrs"] = [];
177 |   const adrPaths = [
178 |     "docs/adrs",
179 |     "docs/adr",
180 |     "docs/decisions",
181 |     "docs/architecture/decisions",
182 |     "adr",
183 |   ];
184 | 
185 |   for (const adrDir of adrPaths) {
186 |     try {
187 |       const dirPath = path.join(repoPath, adrDir);
188 |       const files = await fs.readdir(dirPath);
189 | 
190 |       for (const file of files) {
191 |         if (file.endsWith(".md") && /\d{3,4}/.test(file)) {
192 |           const content = await fs.readFile(path.join(dirPath, file), "utf-8");
193 |           const adr = parseADR(content, file);
194 |           if (adr) {
195 |             adrs.push(adr);
196 |           }
197 |         }
198 |       }
199 | 
200 |       // If we found ADRs, don't check other directories
201 |       if (adrs.length > 0) break;
202 |     } catch {
203 |       // Directory doesn't exist, continue
204 |     }
205 |   }
206 | 
207 |   return adrs;
208 | }
209 | 
210 | async function extractCodeExamples(
211 |   repoPath: string,
212 | ): Promise<ExtractedContent["codeExamples"]> {
213 |   const examples: ExtractedContent["codeExamples"] = [];
214 | 
215 |   // Look for example directories
216 |   const exampleDirs = ["examples", "samples", "demo"];
217 | 
218 |   for (const dir of exampleDirs) {
219 |     try {
220 |       const examplesPath = path.join(repoPath, dir);
221 |       await extractExamplesFromDir(examplesPath, examples);
222 |     } catch {
223 |       // Directory doesn't exist
224 |     }
225 |   }
226 | 
227 |   // Also extract from main source files if they contain example comments
228 |   try {
229 |     const srcPath = path.join(repoPath, "src");
230 |     await extractInlineExamples(srcPath, examples);
231 |   } catch {
232 |     // No src directory
233 |   }
234 | 
235 |   return examples;
236 | }
237 | 
238 | async function extractExamplesFromDir(
239 |   dirPath: string,
240 |   examples: ExtractedContent["codeExamples"],
241 | ): Promise<void> {
242 |   const entries = await fs.readdir(dirPath, { withFileTypes: true });
243 | 
244 |   for (const entry of entries) {
245 |     const fullPath = path.join(dirPath, entry.name);
246 | 
247 |     if (entry.isFile()) {
248 |       const ext = path.extname(entry.name);
249 |       const language = getLanguageFromExtension(ext);
250 | 
251 |       if (language) {
252 |         try {
253 |           const code = await fs.readFile(fullPath, "utf-8");
254 |           const description = extractExampleDescription(code);
255 | 
256 |           examples.push({
257 |             file: entry.name,
258 |             description: description || `Example: ${entry.name}`,
259 |             code: code.slice(0, 500), // First 500 chars
260 |             language,
261 |           });
262 |         } catch {
263 |           // Skip unreadable files
264 |         }
265 |       }
266 |     }
267 |   }
268 | }
269 | 
270 | async function extractInlineExamples(
271 |   srcPath: string,
272 |   examples: ExtractedContent["codeExamples"],
273 | ): Promise<void> {
274 |   // Extract examples from comments marked with @example
275 |   const files = await walkSourceFiles(srcPath);
276 | 
277 |   for (const file of files) {
278 |     try {
279 |       const content = await fs.readFile(file, "utf-8");
280 |       const exampleBlocks = content.match(/@example[\s\S]*?(?=@\w|$)/g);
281 | 
282 |       if (exampleBlocks) {
283 |         for (const block of exampleBlocks) {
284 |           const code = block.replace(/@example\s*/, "").trim();
285 |           const language = getLanguageFromExtension(path.extname(file));
286 | 
287 |           if (code && language) {
288 |             examples.push({
289 |               file: path.basename(file),
290 |               description: `Example from ${path.basename(file)}`,
291 |               code: code.slice(0, 500),
292 |               language,
293 |             });
294 |           }
295 |         }
296 |       }
297 |     } catch {
298 |       // Skip unreadable files
299 |     }
300 |   }
301 | }
302 | 
303 | async function extractAPIDocs(
304 |   repoPath: string,
305 | ): Promise<ExtractedContent["apiDocs"]> {
306 |   const apiDocs: ExtractedContent["apiDocs"] = [];
307 | 
308 |   // Look for API documentation in various formats
309 |   const apiFiles = [
310 |     "api.md",
311 |     "API.md",
312 |     "docs/api.md",
313 |     "docs/API.md",
314 |     "openapi.json",
315 |     "openapi.yaml",
316 |     "swagger.json",
317 |     "swagger.yaml",
318 |   ];
319 | 
320 |   for (const file of apiFiles) {
321 |     try {
322 |       const filePath = path.join(repoPath, file);
323 |       const content = await fs.readFile(filePath, "utf-8");
324 | 
325 |       if (file.endsWith(".md")) {
326 |         // Parse markdown API documentation
327 |         const apis = parseMarkdownAPI(content);
328 |         apiDocs.push(...apis);
329 |       } else if (file.includes("openapi") || file.includes("swagger")) {
330 |         // Parse OpenAPI/Swagger spec
331 |         const apis = parseOpenAPISpec(content);
332 |         apiDocs.push(...apis);
333 |       }
334 |     } catch {
335 |       // File doesn't exist
336 |     }
337 |   }
338 | 
339 |   // Also extract from source code comments
340 |   try {
341 |     const srcPath = path.join(repoPath, "src");
342 |     const jsDocAPIs = await extractJSDocAPIs(srcPath);
343 |     apiDocs.push(...jsDocAPIs);
344 |   } catch {
345 |     // No src directory
346 |   }
347 | 
348 |   return apiDocs;
349 | }
350 | 
351 | // Helper functions
352 | 
353 | function parseMarkdownSections(
354 |   content: string,
355 | ): Array<{ title: string; content: string; level: number }> {
356 |   const sections: Array<{ title: string; content: string; level: number }> = [];
357 |   const lines = content.split("\n");
358 |   let currentSection: { title: string; content: string; level: number } | null =
359 |     null;
360 | 
361 |   for (const line of lines) {
362 |     const headerMatch = line.match(/^(#{1,6})\s+(.+)$/);
363 | 
364 |     if (headerMatch) {
365 |       if (currentSection) {
366 |         sections.push(currentSection);
367 |       }
368 |       currentSection = {
369 |         title: headerMatch[2],
370 |         content: "",
371 |         level: headerMatch[1].length,
372 |       };
373 |     } else if (currentSection) {
374 |       currentSection.content += line + "\n";
375 |     }
376 |   }
377 | 
378 |   if (currentSection) {
379 |     sections.push(currentSection);
380 |   }
381 | 
382 |   return sections;
383 | }
384 | 
385 | function extractTitle(content: string, filename: string): string {
386 |   const lines = content.split("\n");
387 | 
388 |   for (const line of lines) {
389 |     if (line.startsWith("# ")) {
390 |       return line.replace("# ", "").trim();
391 |     }
392 |   }
393 | 
394 |   return filename.replace(/\.(md|mdx)$/, "").replace(/[-_]/g, " ");
395 | }
396 | 
397 | function categorizeDocument(
398 |   content: string,
399 |   filePath: string,
400 | ): ExtractedContent["existingDocs"][0]["category"] {
401 |   const lowerContent = content.toLowerCase();
402 |   const lowerPath = filePath.toLowerCase();
403 | 
404 |   if (
405 |     lowerPath.includes("tutorial") ||
406 |     lowerPath.includes("getting-started") ||
407 |     lowerContent.includes("## getting started") ||
408 |     lowerContent.includes("# tutorial")
409 |   ) {
410 |     return "tutorial";
411 |   }
412 | 
413 |   if (
414 |     lowerPath.includes("how-to") ||
415 |     lowerPath.includes("guide") ||
416 |     lowerContent.includes("## how to") ||
417 |     lowerContent.includes("# guide")
418 |   ) {
419 |     return "how-to";
420 |   }
421 | 
422 |   if (
423 |     lowerPath.includes("api") ||
424 |     lowerPath.includes("reference") ||
425 |     lowerContent.includes("## api") ||
426 |     lowerContent.includes("# reference")
427 |   ) {
428 |     return "reference";
429 |   }
430 | 
431 |   if (
432 |     lowerPath.includes("concept") ||
433 |     lowerPath.includes("explanation") ||
434 |     lowerPath.includes("architecture") ||
435 |     lowerPath.includes("adr")
436 |   ) {
437 |     return "explanation";
438 |   }
439 | 
440 |   // Default categorization based on content patterns
441 |   if (lowerContent.includes("step 1") || lowerContent.includes("first,")) {
442 |     return "tutorial";
443 |   }
444 | 
445 |   if (lowerContent.includes("to do this") || lowerContent.includes("you can")) {
446 |     return "how-to";
447 |   }
448 | 
449 |   return "reference";
450 | }
451 | 
452 | function parseADR(
453 |   content: string,
454 |   filename: string,
455 | ): ExtractedContent["adrs"][0] | null {
456 |   const lines = content.split("\n");
457 |   const adr: Partial<ExtractedContent["adrs"][0]> = {
458 |     content,
459 |   };
460 | 
461 |   // Extract ADR number from filename
462 |   const numberMatch = filename.match(/(\d{3,4})/);
463 |   if (numberMatch) {
464 |     adr.number = numberMatch[1];
465 |   }
466 | 
467 |   // Extract title
468 |   for (const line of lines) {
469 |     if (line.startsWith("# ")) {
470 |       adr.title = line
471 |         .replace("# ", "")
472 |         .replace(/^\d+\.?\s*/, "")
473 |         .trim();
474 |       break;
475 |     }
476 |   }
477 | 
478 |   // Extract status
479 |   const statusMatch = content.match(/## Status\s*\n+([^\n]+)/i);
480 |   if (statusMatch) {
481 |     adr.status = statusMatch[1].trim();
482 |   }
483 | 
484 |   // Extract decision
485 |   const decisionMatch = content.match(/## Decision\s*\n+([\s\S]*?)(?=##|$)/i);
486 |   if (decisionMatch) {
487 |     adr.decision = decisionMatch[1].trim();
488 |   }
489 | 
490 |   // Extract consequences
491 |   const consequencesMatch = content.match(
492 |     /## Consequences\s*\n+([\s\S]*?)(?=##|$)/i,
493 |   );
494 |   if (consequencesMatch) {
495 |     adr.consequences = consequencesMatch[1].trim();
496 |   }
497 | 
498 |   if (adr.number && adr.title) {
499 |     return adr as ExtractedContent["adrs"][0];
500 |   }
501 | 
502 |   return null;
503 | }
504 | 
505 | function getLanguageFromExtension(ext: string): string | null {
506 |   const languageMap: Record<string, string> = {
507 |     ".js": "javascript",
508 |     ".jsx": "javascript",
509 |     ".ts": "typescript",
510 |     ".tsx": "typescript",
511 |     ".py": "python",
512 |     ".rb": "ruby",
513 |     ".go": "go",
514 |     ".java": "java",
515 |     ".cpp": "cpp",
516 |     ".c": "c",
517 |     ".rs": "rust",
518 |     ".php": "php",
519 |     ".swift": "swift",
520 |     ".kt": "kotlin",
521 |     ".scala": "scala",
522 |     ".sh": "bash",
523 |     ".yaml": "yaml",
524 |     ".yml": "yaml",
525 |     ".json": "json",
526 |   };
527 | 
528 |   return languageMap[ext] || null;
529 | }
530 | 
531 | function extractExampleDescription(code: string): string | null {
532 |   const lines = code.split("\n").slice(0, 10);
533 | 
534 |   for (const line of lines) {
535 |     if (
536 |       line.includes("Example:") ||
537 |       line.includes("Demo:") ||
538 |       line.includes("Sample:")
539 |     ) {
540 |       return line.replace(/[/*#-]/, "").trim();
541 |     }
542 |   }
543 | 
544 |   return null;
545 | }
546 | 
547 | async function walkSourceFiles(
548 |   dir: string,
549 |   files: string[] = [],
550 | ): Promise<string[]> {
551 |   try {
552 |     const entries = await fs.readdir(dir, { withFileTypes: true });
553 | 
554 |     for (const entry of entries) {
555 |       const fullPath = path.join(dir, entry.name);
556 | 
557 |       if (
558 |         entry.isDirectory() &&
559 |         !entry.name.startsWith(".") &&
560 |         entry.name !== "node_modules"
561 |       ) {
562 |         await walkSourceFiles(fullPath, files);
563 |       } else if (entry.isFile()) {
564 |         const ext = path.extname(entry.name);
565 |         if (
566 |           [".js", ".ts", ".jsx", ".tsx", ".py", ".rb", ".go", ".java"].includes(
567 |             ext,
568 |           )
569 |         ) {
570 |           files.push(fullPath);
571 |         }
572 |       }
573 |     }
574 |   } catch {
575 |     // Directory doesn't exist or can't be read
576 |   }
577 | 
578 |   return files;
579 | }
580 | 
581 | function parseMarkdownAPI(content: string): ExtractedContent["apiDocs"] {
582 |   const apis: ExtractedContent["apiDocs"] = [];
583 |   const sections = content.split(/^##\s+/m);
584 | 
585 |   for (const section of sections) {
586 |     if (
587 |       section.includes("API") ||
588 |       section.includes("endpoint") ||
589 |       section.includes("function")
590 |     ) {
591 |       const lines = section.split("\n");
592 |       const api: Partial<ExtractedContent["apiDocs"][0]> = {
593 |         parameters: [],
594 |       };
595 | 
596 |       // Extract function/endpoint name
597 |       const titleMatch = lines[0].match(/`([^`]+)`/);
598 |       if (titleMatch) {
599 |         if (titleMatch[1].includes("/")) {
600 |           api.endpoint = titleMatch[1];
601 |         } else {
602 |           api.function = titleMatch[1];
603 |         }
604 |       }
605 | 
606 |       // Extract description
607 |       for (let i = 1; i < lines.length; i++) {
608 |         if (lines[i] && !lines[i].startsWith("###")) {
609 |           api.description = lines[i].trim();
610 |           break;
611 |         }
612 |       }
613 | 
614 |       // Extract parameters
615 |       const paramsIndex = lines.findIndex(
616 |         (l) => l.includes("Parameters") || l.includes("Arguments"),
617 |       );
618 |       if (paramsIndex !== -1) {
619 |         for (let i = paramsIndex + 1; i < lines.length; i++) {
620 |           const paramMatch = lines[i].match(
621 |             /[-*]\s*`([^`]+)`\s*\(([^)]+)\)\s*[-:]?\s*(.+)/,
622 |           );
623 |           if (paramMatch) {
624 |             api.parameters?.push({
625 |               name: paramMatch[1],
626 |               type: paramMatch[2],
627 |               description: paramMatch[3],
628 |             });
629 |           }
630 |         }
631 |       }
632 | 
633 |       // Extract returns
634 |       const returnsIndex = lines.findIndex(
635 |         (l) => l.includes("Returns") || l.includes("Response"),
636 |       );
637 |       if (returnsIndex !== -1 && returnsIndex + 1 < lines.length) {
638 |         api.returns = lines[returnsIndex + 1].trim();
639 |       }
640 | 
641 |       if ((api.endpoint || api.function) && api.description) {
642 |         apis.push(api as ExtractedContent["apiDocs"][0]);
643 |       }
644 |     }
645 |   }
646 | 
647 |   return apis;
648 | }
649 | 
650 | function parseOpenAPISpec(content: string): ExtractedContent["apiDocs"] {
651 |   const apis: ExtractedContent["apiDocs"] = [];
652 | 
653 |   try {
654 |     const spec = JSON.parse(content);
655 | 
656 |     if (spec.paths) {
657 |       for (const [path, methods] of Object.entries(spec.paths)) {
658 |         for (const [method, operation] of Object.entries(methods as any)) {
659 |           if (typeof operation === "object" && operation) {
660 |             const api: ExtractedContent["apiDocs"][0] = {
661 |               endpoint: `${method.toUpperCase()} ${path}`,
662 |               description:
663 |                 (operation as any).summary ||
664 |                 (operation as any).description ||
665 |                 "",
666 |               parameters: [],
667 |             };
668 | 
669 |             if ((operation as any).parameters) {
670 |               for (const param of (operation as any).parameters) {
671 |                 api.parameters.push({
672 |                   name: param.name,
673 |                   type: param.type || param.schema?.type || "any",
674 |                   description: param.description || "",
675 |                 });
676 |               }
677 |             }
678 | 
679 |             if ((operation as any).responses?.["200"]) {
680 |               api.returns = (operation as any).responses["200"].description;
681 |             }
682 | 
683 |             apis.push(api);
684 |           }
685 |         }
686 |       }
687 |     }
688 |   } catch {
689 |     // Not valid JSON or doesn't follow expected structure
690 |   }
691 | 
692 |   return apis;
693 | }
694 | 
695 | async function extractJSDocAPIs(
696 |   srcPath: string,
697 | ): Promise<ExtractedContent["apiDocs"]> {
698 |   const apis: ExtractedContent["apiDocs"] = [];
699 |   const files = await walkSourceFiles(srcPath);
700 | 
701 |   for (const file of files.slice(0, 20)) {
702 |     // Limit to first 20 files for performance
703 |     try {
704 |       const content = await fs.readFile(file, "utf-8");
705 |       const jsdocBlocks = content.match(/\/\*\*[\s\S]*?\*\//g);
706 | 
707 |       if (jsdocBlocks) {
708 |         for (const block of jsdocBlocks) {
709 |           const api = parseJSDocBlock(block);
710 |           if (api) {
711 |             apis.push(api);
712 |           }
713 |         }
714 |       }
715 |     } catch {
716 |       // Skip unreadable files
717 |     }
718 |   }
719 | 
720 |   return apis;
721 | }
722 | 
723 | function parseJSDocBlock(block: string): ExtractedContent["apiDocs"][0] | null {
724 |   const api: Partial<ExtractedContent["apiDocs"][0]> = {
725 |     parameters: [],
726 |   };
727 | 
728 |   // Extract description (first line after /^**)
729 |   const descMatch = block.match(/\/\*\*\s*\n\s*\*\s*([^@\n]+)/);
730 |   if (descMatch) {
731 |     api.description = descMatch[1].trim();
732 |   }
733 | 
734 |   // Extract function name from the code following the JSDoc
735 |   const functionMatch = block.match(
736 |     /function\s+(\w+)|const\s+(\w+)\s*=|(\w+)\s*\(/,
737 |   );
738 |   if (functionMatch) {
739 |     api.function = functionMatch[1] || functionMatch[2] || functionMatch[3];
740 |   }
741 | 
742 |   // Extract parameters
743 |   const paramMatches = block.matchAll(
744 |     /@param\s*{([^}]+)}\s*(\w+)\s*-?\s*(.+)/g,
745 |   );
746 |   for (const match of paramMatches) {
747 |     api.parameters?.push({
748 |       name: match[2],
749 |       type: match[1],
750 |       description: match[3].trim(),
751 |     });
752 |   }
753 | 
754 |   // Extract returns
755 |   const returnsMatch = block.match(/@returns?\s*{([^}]+)}\s*(.+)/);
756 |   if (returnsMatch) {
757 |     api.returns = `${returnsMatch[1]}: ${returnsMatch[2]}`;
758 |   }
759 | 
760 |   if (api.function && api.description) {
761 |     return api as ExtractedContent["apiDocs"][0];
762 |   }
763 | 
764 |   return null;
765 | }
766 | 
```
Page 13/29FirstPrevNextLast