#
tokens: 45724/50000 6/274 files (page 16/29)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 16 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── 001-mcp-server-architecture.md
│   │   ├── 002-repository-analysis-engine.md
│   │   ├── 003-static-site-generator-recommendation-engine.md
│   │   ├── 004-diataxis-framework-integration.md
│   │   ├── 005-github-pages-deployment-automation.md
│   │   ├── 006-mcp-tools-api-design.md
│   │   ├── 007-mcp-prompts-and-resources-integration.md
│   │   ├── 008-intelligent-content-population-engine.md
│   │   ├── 009-content-accuracy-validation-framework.md
│   │   ├── 010-mcp-resource-pattern-redesign.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── check-documentation-links.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── ast-analyzer.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── permission-checker.ts
│   │   └── sitemap-generator.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/tests/integration/workflow.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // Integration tests for complete documentation workflows
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import os from "os";
  5 | import { analyzeRepository } from "../../src/tools/analyze-repository";
  6 | import { recommendSSG } from "../../src/tools/recommend-ssg";
  7 | import { generateConfig } from "../../src/tools/generate-config";
  8 | import { setupStructure } from "../../src/tools/setup-structure";
  9 | import { deployPages } from "../../src/tools/deploy-pages";
 10 | import { verifyDeployment } from "../../src/tools/verify-deployment";
 11 | 
 12 | describe("Integration Testing - Complete Workflows", () => {
 13 |   let tempDir: string;
 14 |   let testProject: string;
 15 | 
 16 |   beforeAll(async () => {
 17 |     tempDir = path.join(os.tmpdir(), "documcp-integration-tests");
 18 |     await fs.mkdir(tempDir, { recursive: true });
 19 | 
 20 |     testProject = await createRealisticProject();
 21 |   });
 22 | 
 23 |   afterAll(async () => {
 24 |     try {
 25 |       await fs.rm(tempDir, { recursive: true, force: true });
 26 |     } catch (error) {
 27 |       console.warn("Failed to cleanup integration test directory:", error);
 28 |     }
 29 |   });
 30 | 
 31 |   describe("End-to-End Documentation Workflow", () => {
 32 |     it("should complete full documentation setup workflow", async () => {
 33 |       const workflowDir = path.join(tempDir, "e2e-workflow");
 34 |       await fs.mkdir(workflowDir, { recursive: true });
 35 | 
 36 |       // Step 1: Analyze Repository
 37 |       console.log("Step 1: Analyzing repository...");
 38 |       const analysisResult = await analyzeRepository({
 39 |         path: testProject,
 40 |         depth: "standard",
 41 |       });
 42 | 
 43 |       expect(analysisResult.content).toBeDefined();
 44 |       expect((analysisResult as any).isError).toBeFalsy();
 45 | 
 46 |       // Extract analysis ID for next step
 47 |       const analysisText = analysisResult.content.find((c) =>
 48 |         c.text.includes('"id"'),
 49 |       );
 50 |       const analysis = JSON.parse(analysisText!.text);
 51 |       const analysisId = analysis.id;
 52 | 
 53 |       expect(analysisId).toBeDefined();
 54 |       expect(analysis.dependencies.ecosystem).toBe("javascript");
 55 | 
 56 |       // Step 2: Get SSG Recommendation
 57 |       console.log("Step 2: Getting SSG recommendation...");
 58 |       const recommendationResult = await recommendSSG({
 59 |         analysisId: analysisId,
 60 |         preferences: {
 61 |           priority: "features",
 62 |           ecosystem: "javascript",
 63 |         },
 64 |       });
 65 | 
 66 |       expect(recommendationResult.content).toBeDefined();
 67 |       const recommendationText = recommendationResult.content.find((c) =>
 68 |         c.text.includes('"recommended"'),
 69 |       );
 70 |       const recommendation = JSON.parse(recommendationText!.text);
 71 | 
 72 |       expect(recommendation.recommended).toBeDefined();
 73 |       expect(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]).toContain(
 74 |         recommendation.recommended,
 75 |       );
 76 | 
 77 |       // Step 3: Generate Configuration
 78 |       console.log("Step 3: Generating configuration...");
 79 |       const configResult = await generateConfig({
 80 |         ssg: recommendation.recommended,
 81 |         projectName: "Integration Test Project",
 82 |         projectDescription: "End-to-end integration test",
 83 |         outputPath: workflowDir,
 84 |       });
 85 | 
 86 |       expect(configResult.content).toBeDefined();
 87 |       expect((configResult as any).isError).toBeFalsy();
 88 | 
 89 |       // Verify config files were created
 90 |       const files = await fs.readdir(workflowDir);
 91 |       expect(files.length).toBeGreaterThan(0);
 92 | 
 93 |       // Step 4: Setup Documentation Structure
 94 |       console.log("Step 4: Setting up documentation structure...");
 95 |       const docsDir = path.join(workflowDir, "docs");
 96 |       const structureResult = await setupStructure({
 97 |         path: docsDir,
 98 |         ssg: recommendation.recommended,
 99 |         includeExamples: true,
100 |       });
101 | 
102 |       expect(structureResult.content).toBeDefined();
103 |       expect((structureResult as any).isError).toBeFalsy();
104 | 
105 |       // Verify Diataxis structure was created
106 |       const diataxisCategories = [
107 |         "tutorials",
108 |         "how-to",
109 |         "reference",
110 |         "explanation",
111 |       ];
112 |       for (const category of diataxisCategories) {
113 |         const categoryPath = path.join(docsDir, category);
114 |         expect(
115 |           await fs
116 |             .access(categoryPath)
117 |             .then(() => true)
118 |             .catch(() => false),
119 |         ).toBe(true);
120 |       }
121 | 
122 |       // Step 5: Setup Deployment
123 |       console.log("Step 5: Setting up deployment...");
124 |       const deploymentResult = await deployPages({
125 |         repository: workflowDir,
126 |         ssg: recommendation.recommended,
127 |         branch: "gh-pages",
128 |         customDomain: "docs.example.com",
129 |       });
130 | 
131 |       expect(deploymentResult.content).toBeDefined();
132 |       expect((deploymentResult as any).isError).toBeFalsy();
133 | 
134 |       // Verify workflow and CNAME were created
135 |       const workflowPath = path.join(
136 |         workflowDir,
137 |         ".github",
138 |         "workflows",
139 |         "deploy-docs.yml",
140 |       );
141 |       const cnamePath = path.join(workflowDir, "CNAME");
142 | 
143 |       expect(
144 |         await fs
145 |           .access(workflowPath)
146 |           .then(() => true)
147 |           .catch(() => false),
148 |       ).toBe(true);
149 |       expect(
150 |         await fs
151 |           .access(cnamePath)
152 |           .then(() => true)
153 |           .catch(() => false),
154 |       ).toBe(true);
155 | 
156 |       // Step 6: Verify Deployment Setup
157 |       console.log("Step 6: Verifying deployment setup...");
158 |       const verificationResult = await verifyDeployment({
159 |         repository: workflowDir,
160 |         url: "https://docs.example.com",
161 |       });
162 | 
163 |       expect(verificationResult.content).toBeDefined();
164 | 
165 |       // Parse the JSON response to check actual verification data
166 |       const verificationData = JSON.parse(verificationResult.content[0].text);
167 |       const passCount = verificationData.summary.passed;
168 |       const failCount = verificationData.summary.failed;
169 | 
170 |       console.log("Pass count:", passCount, "Fail count:", failCount);
171 | 
172 |       // Should have at least some passing checks
173 |       expect(passCount).toBeGreaterThan(0);
174 |       expect(passCount).toBeGreaterThanOrEqual(failCount);
175 | 
176 |       console.log("✅ End-to-end workflow completed successfully!");
177 |     }, 30000); // 30 second timeout for full workflow
178 |   });
179 | 
180 |   describe("Workflow Variations", () => {
181 |     it("should handle Python project workflow", async () => {
182 |       const pythonProject = await createPythonProject();
183 | 
184 |       // Analyze Python project
185 |       const analysis = await analyzeRepository({
186 |         path: pythonProject,
187 |         depth: "standard",
188 |       });
189 |       const analysisData = JSON.parse(
190 |         analysis.content.find((c) => c.text.includes('"ecosystem"'))!.text,
191 |       );
192 | 
193 |       expect(analysisData.dependencies.ecosystem).toBe("python");
194 | 
195 |       // Get recommendation (likely MkDocs for Python)
196 |       const recommendation = await recommendSSG({
197 |         analysisId: analysisData.id,
198 |       });
199 |       // const recData = JSON.parse(recommendation.content.find(c => c.text.includes('"recommended"'))!.text);
200 | 
201 |       // Generate MkDocs config
202 |       const configDir = path.join(tempDir, "python-workflow");
203 |       await fs.mkdir(configDir, { recursive: true });
204 | 
205 |       const config = await generateConfig({
206 |         ssg: "mkdocs",
207 |         projectName: "Python Test Project",
208 |         outputPath: configDir,
209 |       });
210 | 
211 |       // Verify MkDocs-specific files
212 |       expect(
213 |         await fs
214 |           .access(path.join(configDir, "mkdocs.yml"))
215 |           .then(() => true)
216 |           .catch(() => false),
217 |       ).toBe(true);
218 |       expect(
219 |         await fs
220 |           .access(path.join(configDir, "requirements.txt"))
221 |           .then(() => true)
222 |           .catch(() => false),
223 |       ).toBe(true);
224 |     });
225 | 
226 |     it("should handle different SSG preferences", async () => {
227 |       const analysisId = "test-preferences-123";
228 | 
229 |       // Test simplicity preference
230 |       const simplicityRec = await recommendSSG({
231 |         analysisId,
232 |         preferences: { priority: "simplicity" },
233 |       });
234 | 
235 |       // Test performance preference
236 |       const performanceRec = await recommendSSG({
237 |         analysisId,
238 |         preferences: { priority: "performance" },
239 |       });
240 | 
241 |       // Test features preference
242 |       const featuresRec = await recommendSSG({
243 |         analysisId,
244 |         preferences: { priority: "features" },
245 |       });
246 | 
247 |       // All should provide valid recommendations
248 |       [simplicityRec, performanceRec, featuresRec].forEach((result) => {
249 |         expect(result.content).toBeDefined();
250 |         const rec = JSON.parse(
251 |           result.content.find((c) => c.text.includes('"recommended"'))!.text,
252 |         );
253 |         expect([
254 |           "jekyll",
255 |           "hugo",
256 |           "docusaurus",
257 |           "mkdocs",
258 |           "eleventy",
259 |         ]).toContain(rec.recommended);
260 |       });
261 |     });
262 | 
263 |     it("should handle deployment workflow variations", async () => {
264 |       const deploymentDir = path.join(tempDir, "deployment-variations");
265 |       await fs.mkdir(deploymentDir, { recursive: true });
266 | 
267 |       // Test different SSGs
268 |       const ssgs = [
269 |         "docusaurus",
270 |         "mkdocs",
271 |         "hugo",
272 |         "jekyll",
273 |         "eleventy",
274 |       ] as const;
275 | 
276 |       for (const ssg of ssgs) {
277 |         const ssgDir = path.join(deploymentDir, ssg);
278 |         await fs.mkdir(ssgDir, { recursive: true });
279 | 
280 |         const result = await deployPages({
281 |           repository: ssgDir,
282 |           ssg: ssg,
283 |           branch: "main",
284 |         });
285 | 
286 |         expect(result.content).toBeDefined();
287 | 
288 |         const workflowPath = path.join(
289 |           ssgDir,
290 |           ".github",
291 |           "workflows",
292 |           "deploy-docs.yml",
293 |         );
294 |         expect(
295 |           await fs
296 |             .access(workflowPath)
297 |             .then(() => true)
298 |             .catch(() => false),
299 |         ).toBe(true);
300 | 
301 |         const workflowContent = await fs.readFile(workflowPath, "utf-8");
302 |         // Handle different SSG name formats
303 |         const expectedName =
304 |           ssg === "mkdocs"
305 |             ? "Deploy MkDocs"
306 |             : `Deploy ${ssg.charAt(0).toUpperCase() + ssg.slice(1)}`;
307 |         expect(workflowContent).toContain(expectedName);
308 | 
309 |         // Verify SSG-specific workflow content
310 |         switch (ssg) {
311 |           case "docusaurus":
312 |             expect(workflowContent).toContain("npm run build");
313 |             expect(workflowContent).toContain("id-token: write"); // OIDC compliance
314 |             break;
315 |           case "mkdocs":
316 |             expect(workflowContent).toContain("mkdocs gh-deploy");
317 |             expect(workflowContent).toContain("python");
318 |             break;
319 |           case "hugo":
320 |             expect(workflowContent).toContain("peaceiris/actions-hugo");
321 |             expect(workflowContent).toContain("hugo --minify");
322 |             break;
323 |           case "jekyll":
324 |             expect(workflowContent).toContain("bundle exec jekyll build");
325 |             expect(workflowContent).toContain("ruby");
326 |             break;
327 |           case "eleventy":
328 |             expect(workflowContent).toContain("npm run build");
329 |             break;
330 |         }
331 |       }
332 |     });
333 |   });
334 | 
335 |   describe("Error Handling and Recovery", () => {
336 |     it("should handle missing repository gracefully", async () => {
337 |       const result = await analyzeRepository({
338 |         path: "/non/existent/path",
339 |         depth: "standard",
340 |       });
341 | 
342 |       expect((result as any).isError).toBe(true);
343 |       expect(result.content[0].text).toContain("Error:");
344 |     });
345 | 
346 |     it("should handle invalid configuration gracefully", async () => {
347 |       const invalidDir = "/invalid/write/path/that/should/fail";
348 | 
349 |       const result = await generateConfig({
350 |         ssg: "docusaurus",
351 |         projectName: "Test",
352 |         outputPath: invalidDir,
353 |       });
354 | 
355 |       expect((result as any).isError).toBe(true);
356 |       expect(result.content[0].text).toContain("Error:");
357 |     });
358 | 
359 |     it("should handle structure setup in non-existent directory", async () => {
360 |       // This should actually work because setupStructure creates directories
361 |       const result = await setupStructure({
362 |         path: path.join(tempDir, "new-structure-dir"),
363 |         ssg: "docusaurus",
364 |         includeExamples: false,
365 |       });
366 | 
367 |       expect((result as any).isError).toBeFalsy();
368 |       expect(result.content).toBeDefined();
369 |     });
370 | 
371 |     it("should provide helpful error messages and resolutions", async () => {
372 |       const errorResult = await analyzeRepository({
373 |         path: "/definitely/does/not/exist",
374 |         depth: "standard",
375 |       });
376 | 
377 |       expect((errorResult as any).isError).toBe(true);
378 |       const errorText = errorResult.content.map((c) => c.text).join(" ");
379 |       // Check for resolution in JSON format (lowercase) or formatted text (capitalized)
380 |       expect(errorText.toLowerCase()).toContain("resolution");
381 |       expect(errorText.toLowerCase()).toContain("ensure");
382 |     });
383 |   });
384 | 
385 |   describe("Performance and Resource Management", () => {
386 |     it("should handle large repository analysis within performance bounds", async () => {
387 |       const largeRepo = await createLargeRepository();
388 | 
389 |       const startTime = Date.now();
390 |       const result = await analyzeRepository({
391 |         path: largeRepo,
392 |         depth: "standard",
393 |       });
394 |       const executionTime = Date.now() - startTime;
395 | 
396 |       // Should complete within reasonable time (large repo target is 60s)
397 |       expect(executionTime).toBeLessThan(60000);
398 |       expect(result.content).toBeDefined();
399 | 
400 |       const analysisData = JSON.parse(
401 |         result.content.find((c) => c.text.includes('"totalFiles"'))!.text,
402 |       );
403 |       expect(analysisData.structure.totalFiles).toBeGreaterThan(1000);
404 |     }, 65000); // 65s timeout for large repo test
405 | 
406 |     it("should clean up resources properly", async () => {
407 |       const tempWorkflowDir = path.join(tempDir, "resource-cleanup");
408 | 
409 |       // Run multiple operations
410 |       await generateConfig({
411 |         ssg: "docusaurus",
412 |         projectName: "Cleanup Test",
413 |         outputPath: tempWorkflowDir,
414 |       });
415 | 
416 |       await setupStructure({
417 |         path: path.join(tempWorkflowDir, "docs"),
418 |         ssg: "docusaurus",
419 |         includeExamples: true,
420 |       });
421 | 
422 |       // Verify files were created
423 |       const files = await fs.readdir(tempWorkflowDir);
424 |       expect(files.length).toBeGreaterThan(0);
425 | 
426 |       // Cleanup should work
427 |       await fs.rm(tempWorkflowDir, { recursive: true, force: true });
428 |       expect(
429 |         await fs
430 |           .access(tempWorkflowDir)
431 |           .then(() => false)
432 |           .catch(() => true),
433 |       ).toBe(true);
434 |     });
435 |   });
436 | 
437 |   // Helper functions
438 |   async function createRealisticProject(): Promise<string> {
439 |     const projectPath = path.join(tempDir, "realistic-project");
440 |     await fs.mkdir(projectPath, { recursive: true });
441 | 
442 |     // package.json with realistic dependencies
443 |     const packageJson = {
444 |       name: "realistic-test-project",
445 |       version: "2.1.0",
446 |       description: "A realistic Node.js project for testing DocuMCP",
447 |       main: "src/index.js",
448 |       scripts: {
449 |         start: "node src/index.js",
450 |         dev: "nodemon src/index.js",
451 |         test: "jest",
452 |         build: "webpack --mode production",
453 |         lint: "eslint src/",
454 |         docs: "jsdoc src/ -d docs/",
455 |       },
456 |       dependencies: {
457 |         express: "^4.18.2",
458 |         lodash: "^4.17.21",
459 |         axios: "^1.4.0",
460 |         moment: "^2.29.4",
461 |         "body-parser": "^1.20.2",
462 |       },
463 |       devDependencies: {
464 |         jest: "^29.5.0",
465 |         nodemon: "^2.0.22",
466 |         eslint: "^8.42.0",
467 |         webpack: "^5.86.0",
468 |         jsdoc: "^4.0.2",
469 |       },
470 |       keywords: ["node", "express", "api", "web"],
471 |       author: "Test Author",
472 |       license: "MIT",
473 |     };
474 | 
475 |     await fs.writeFile(
476 |       path.join(projectPath, "package.json"),
477 |       JSON.stringify(packageJson, null, 2),
478 |     );
479 | 
480 |     // Source directory structure
481 |     await fs.mkdir(path.join(projectPath, "src"), { recursive: true });
482 |     await fs.mkdir(path.join(projectPath, "src", "controllers"), {
483 |       recursive: true,
484 |     });
485 |     await fs.mkdir(path.join(projectPath, "src", "models"), {
486 |       recursive: true,
487 |     });
488 |     await fs.mkdir(path.join(projectPath, "src", "routes"), {
489 |       recursive: true,
490 |     });
491 |     await fs.mkdir(path.join(projectPath, "src", "utils"), { recursive: true });
492 | 
493 |     // Main application files
494 |     await fs.writeFile(
495 |       path.join(projectPath, "src", "index.js"),
496 |       `const express = require('express');
497 | const bodyParser = require('body-parser');
498 | const routes = require('./routes');
499 | 
500 | const app = express();
501 | app.use(bodyParser.json());
502 | app.use('/api', routes);
503 | 
504 | const PORT = process.env.PORT || 3000;
505 | app.listen(PORT, () => {
506 |   console.log(\`Server running on port \${PORT}\`);
507 | });`,
508 |     );
509 | 
510 |     await fs.writeFile(
511 |       path.join(projectPath, "src", "routes", "index.js"),
512 |       `const express = require('express');
513 | const router = express.Router();
514 | 
515 | router.get('/health', (req, res) => {
516 |   res.json({ status: 'OK', timestamp: new Date().toISOString() });
517 | });
518 | 
519 | module.exports = router;`,
520 |     );
521 | 
522 |     await fs.writeFile(
523 |       path.join(projectPath, "src", "controllers", "userController.js"),
524 |       `const { getUserById, createUser } = require('../models/user');
525 | 
526 | async function getUser(req, res) {
527 |   const user = await getUserById(req.params.id);
528 |   res.json(user);
529 | }
530 | 
531 | module.exports = { getUser };`,
532 |     );
533 | 
534 |     await fs.writeFile(
535 |       path.join(projectPath, "src", "models", "user.js"),
536 |       `const users = [];
537 | 
538 | function getUserById(id) {
539 |   return users.find(user => user.id === id);
540 | }
541 | 
542 | function createUser(userData) {
543 |   const user = { id: Date.now(), ...userData };
544 |   users.push(user);
545 |   return user;
546 | }
547 | 
548 | module.exports = { getUserById, createUser };`,
549 |     );
550 | 
551 |     await fs.writeFile(
552 |       path.join(projectPath, "src", "utils", "helpers.js"),
553 |       `const _ = require('lodash');
554 | const moment = require('moment');
555 | 
556 | function formatDate(date) {
557 |   return moment(date).format('YYYY-MM-DD HH:mm:ss');
558 | }
559 | 
560 | function validateEmail(email) {
561 |   return /^[^\\s@]+@[^\\s@]+\\.[^\\s@]+$/.test(email);
562 | }
563 | 
564 | module.exports = { formatDate, validateEmail };`,
565 |     );
566 | 
567 |     // Test directory
568 |     await fs.mkdir(path.join(projectPath, "tests"), { recursive: true });
569 |     await fs.writeFile(
570 |       path.join(projectPath, "tests", "app.test.js"),
571 |       `const { formatDate, validateEmail } = require('../src/utils/helpers');
572 | 
573 | describe('Helper Functions', () => {
574 |   test('formatDate should format date correctly', () => {
575 |     const date = new Date('2023-01-01');
576 |     expect(formatDate(date)).toMatch(/\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}/);
577 |   });
578 | 
579 |   test('validateEmail should validate email correctly', () => {
580 |     expect(validateEmail('[email protected]')).toBe(true);
581 |     expect(validateEmail('invalid-email')).toBe(false);
582 |   });
583 | });`,
584 |     );
585 | 
586 |     // Configuration files
587 |     await fs.writeFile(
588 |       path.join(projectPath, ".eslintrc.js"),
589 |       `module.exports = {
590 |   env: { node: true, es2021: true },
591 |   extends: ['eslint:recommended'],
592 |   parserOptions: { ecmaVersion: 12, sourceType: 'module' },
593 |   rules: { 'no-unused-vars': 'warn' }
594 | };`,
595 |     );
596 | 
597 |     await fs.writeFile(
598 |       path.join(projectPath, "jest.config.js"),
599 |       `module.exports = {
600 |   testEnvironment: 'node',
601 |   collectCoverageFrom: ['src/**/*.js'],
602 |   testMatch: ['**/tests/**/*.test.js']
603 | };`,
604 |     );
605 | 
606 |     // Documentation
607 |     await fs.writeFile(
608 |       path.join(projectPath, "README.md"),
609 |       `# Realistic Test Project
610 | 
611 | A comprehensive Node.js application for testing DocuMCP functionality.
612 | 
613 | ## Features
614 | 
615 | - Express.js web server
616 | - RESTful API endpoints
617 | - User management system
618 | - Comprehensive test suite
619 | - ESLint code quality
620 | - JSDoc documentation
621 | 
622 | ## Getting Started
623 | 
624 | 1. Install dependencies: \`npm install\`
625 | 2. Start development server: \`npm run dev\`
626 | 3. Run tests: \`npm test\`
627 | 
628 | ## API Endpoints
629 | 
630 | - \`GET /api/health\` - Health check endpoint
631 | - \`GET /api/users/:id\` - Get user by ID
632 | 
633 | ## Contributing
634 | 
635 | Please read CONTRIBUTING.md for contribution guidelines.`,
636 |     );
637 | 
638 |     await fs.writeFile(
639 |       path.join(projectPath, "CONTRIBUTING.md"),
640 |       `# Contributing to Realistic Test Project
641 | 
642 | ## Development Setup
643 | 
644 | 1. Fork the repository
645 | 2. Clone your fork
646 | 3. Install dependencies
647 | 4. Create a feature branch
648 | 5. Make changes and test
649 | 6. Submit a pull request
650 | 
651 | ## Code Style
652 | 
653 | - Follow ESLint configuration
654 | - Write tests for new features
655 | - Update documentation as needed`,
656 |     );
657 | 
658 |     await fs.writeFile(
659 |       path.join(projectPath, "LICENSE"),
660 |       "MIT License\n\nCopyright (c) 2023 Test Author",
661 |     );
662 | 
663 |     // CI/CD workflow
664 |     await fs.mkdir(path.join(projectPath, ".github", "workflows"), {
665 |       recursive: true,
666 |     });
667 |     await fs.writeFile(
668 |       path.join(projectPath, ".github", "workflows", "ci.yml"),
669 |       `name: CI
670 | 
671 | on: [push, pull_request]
672 | 
673 | jobs:
674 |   test:
675 |     runs-on: ubuntu-latest
676 |     steps:
677 |       - uses: actions/checkout@v3
678 |       - uses: actions/setup-node@v3
679 |         with:
680 |           node-version: '20'
681 |       - run: npm ci
682 |       - run: npm run lint
683 |       - run: npm test
684 |       - run: npm run build`,
685 |     );
686 | 
687 |     return projectPath;
688 |   }
689 | 
690 |   async function createPythonProject(): Promise<string> {
691 |     const projectPath = path.join(tempDir, "python-project");
692 |     await fs.mkdir(projectPath, { recursive: true });
693 | 
694 |     // Python project structure
695 |     await fs.writeFile(
696 |       path.join(projectPath, "requirements.txt"),
697 |       `flask==2.3.2
698 | requests==2.31.0
699 | pytest==7.4.0
700 | black==23.3.0
701 | flake8==6.0.0`,
702 |     );
703 | 
704 |     await fs.mkdir(path.join(projectPath, "src"), { recursive: true });
705 |     await fs.writeFile(
706 |       path.join(projectPath, "src", "app.py"),
707 |       `from flask import Flask, jsonify
708 | import requests
709 | 
710 | app = Flask(__name__)
711 | 
712 | @app.route('/health')
713 | def health():
714 |     return jsonify({'status': 'OK'})
715 | 
716 | if __name__ == '__main__':
717 |     app.run(debug=True)`,
718 |     );
719 | 
720 |     await fs.mkdir(path.join(projectPath, "tests"), { recursive: true });
721 |     await fs.writeFile(
722 |       path.join(projectPath, "tests", "test_app.py"),
723 |       `import pytest
724 | from src.app import app
725 | 
726 | def test_health():
727 |     client = app.test_client()
728 |     response = client.get('/health')
729 |     assert response.status_code == 200`,
730 |     );
731 | 
732 |     await fs.writeFile(
733 |       path.join(projectPath, "README.md"),
734 |       "# Python Test Project\n\nA Flask application for testing Python project analysis.",
735 |     );
736 | 
737 |     return projectPath;
738 |   }
739 | 
740 |   async function createLargeRepository(): Promise<string> {
741 |     const repoPath = path.join(tempDir, "large-repository");
742 |     await fs.mkdir(repoPath, { recursive: true });
743 | 
744 |     // Create a repository with 1200+ files to trigger large repo categorization
745 |     await fs.writeFile(
746 |       path.join(repoPath, "package.json"),
747 |       '{"name": "large-repo"}',
748 |     );
749 | 
750 |     for (let i = 0; i < 30; i++) {
751 |       const dirPath = path.join(repoPath, `module-${i}`);
752 |       await fs.mkdir(dirPath, { recursive: true });
753 | 
754 |       for (let j = 0; j < 40; j++) {
755 |         const fileName = `component-${j}.js`;
756 |         const content = `// Component ${i}-${j}
757 | export default function Component${i}${j}() {
758 |   return <div>Component ${i}-${j}</div>;
759 | }`;
760 |         await fs.writeFile(path.join(dirPath, fileName), content);
761 |       }
762 |     }
763 | 
764 |     await fs.writeFile(
765 |       path.join(repoPath, "README.md"),
766 |       "# Large Repository\n\nThis repository has 1200+ files for performance testing.",
767 |     );
768 | 
769 |     return repoPath;
770 |   }
771 | });
772 | 
```

--------------------------------------------------------------------------------
/tests/memory/freshness-kg-integration.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Tests for Documentation Freshness Knowledge Graph Integration
  3 |  */
  4 | 
  5 | import { promises as fs } from "fs";
  6 | import path from "path";
  7 | import { tmpdir } from "os";
  8 | import {
  9 |   storeFreshnessEvent,
 10 |   updateFreshnessEvent,
 11 |   getFreshnessHistory,
 12 |   getStalenessInsights,
 13 |   compareFreshnessAcrossProjects,
 14 | } from "../../src/memory/freshness-kg-integration.js";
 15 | import type { FreshnessScanReport } from "../../src/utils/freshness-tracker.js";
 16 | 
 17 | describe("Freshness Knowledge Graph Integration", () => {
 18 |   let testDir: string;
 19 | 
 20 |   beforeEach(async () => {
 21 |     // Create temporary test directory
 22 |     testDir = path.join(tmpdir(), `freshness-kg-test-${Date.now()}`);
 23 |     await fs.mkdir(testDir, { recursive: true });
 24 | 
 25 |     // Set storage directory to test directory
 26 |     process.env.DOCUMCP_STORAGE_DIR = path.join(testDir, ".documcp/memory");
 27 |   });
 28 | 
 29 |   afterEach(async () => {
 30 |     // Clean up test directory
 31 |     try {
 32 |       await fs.rm(testDir, { recursive: true, force: true });
 33 |     } catch (error) {
 34 |       // Ignore cleanup errors
 35 |     }
 36 |     delete process.env.DOCUMCP_STORAGE_DIR;
 37 |   });
 38 | 
 39 |   describe("storeFreshnessEvent", () => {
 40 |     it("should store a freshness scan event in KG", async () => {
 41 |       const projectPath = path.join(testDir, "test-project");
 42 |       const docsPath = path.join(projectPath, "docs");
 43 | 
 44 |       const report: FreshnessScanReport = {
 45 |         docsPath,
 46 |         scannedAt: new Date().toISOString(),
 47 |         totalFiles: 10,
 48 |         filesWithMetadata: 8,
 49 |         filesWithoutMetadata: 2,
 50 |         freshFiles: 6,
 51 |         warningFiles: 2,
 52 |         staleFiles: 1,
 53 |         criticalFiles: 1,
 54 |         files: [
 55 |           {
 56 |             filePath: path.join(docsPath, "page1.md"),
 57 |             relativePath: "page1.md",
 58 |             hasMetadata: true,
 59 |             isStale: false,
 60 |             stalenessLevel: "fresh",
 61 |             ageInMs: 1000 * 60 * 60 * 24, // 1 day
 62 |             ageFormatted: "1 day",
 63 |           },
 64 |           {
 65 |             filePath: path.join(docsPath, "page2.md"),
 66 |             relativePath: "page2.md",
 67 |             hasMetadata: true,
 68 |             isStale: true,
 69 |             stalenessLevel: "critical",
 70 |             ageInMs: 1000 * 60 * 60 * 24 * 100, // 100 days
 71 |             ageFormatted: "100 days",
 72 |           },
 73 |         ],
 74 |         thresholds: {
 75 |           warning: { value: 7, unit: "days" },
 76 |           stale: { value: 30, unit: "days" },
 77 |           critical: { value: 90, unit: "days" },
 78 |         },
 79 |       };
 80 | 
 81 |       const eventId = await storeFreshnessEvent(
 82 |         projectPath,
 83 |         docsPath,
 84 |         report,
 85 |         "scan",
 86 |       );
 87 | 
 88 |       expect(eventId).toBeDefined();
 89 |       expect(eventId).toContain("freshness_event:");
 90 |     });
 91 | 
 92 |     it("should store event with different event types", async () => {
 93 |       const projectPath = path.join(testDir, "test-project");
 94 |       const docsPath = path.join(projectPath, "docs");
 95 | 
 96 |       const report: FreshnessScanReport = {
 97 |         docsPath,
 98 |         scannedAt: new Date().toISOString(),
 99 |         totalFiles: 5,
100 |         filesWithMetadata: 5,
101 |         filesWithoutMetadata: 0,
102 |         freshFiles: 5,
103 |         warningFiles: 0,
104 |         staleFiles: 0,
105 |         criticalFiles: 0,
106 |         files: [],
107 |         thresholds: {
108 |           warning: { value: 7, unit: "days" },
109 |           stale: { value: 30, unit: "days" },
110 |           critical: { value: 90, unit: "days" },
111 |         },
112 |       };
113 | 
114 |       const initEventId = await storeFreshnessEvent(
115 |         projectPath,
116 |         docsPath,
117 |         report,
118 |         "initialization",
119 |       );
120 |       expect(initEventId).toBeDefined();
121 | 
122 |       const updateEventId = await storeFreshnessEvent(
123 |         projectPath,
124 |         docsPath,
125 |         report,
126 |         "update",
127 |       );
128 |       expect(updateEventId).toBeDefined();
129 | 
130 |       const validationEventId = await storeFreshnessEvent(
131 |         projectPath,
132 |         docsPath,
133 |         report,
134 |         "validation",
135 |       );
136 |       expect(validationEventId).toBeDefined();
137 |     });
138 |   });
139 | 
140 |   describe("getFreshnessHistory", () => {
141 |     it("should retrieve freshness event history", async () => {
142 |       const projectPath = path.join(testDir, "test-project");
143 |       const docsPath = path.join(projectPath, "docs");
144 | 
145 |       const report: FreshnessScanReport = {
146 |         docsPath,
147 |         scannedAt: new Date().toISOString(),
148 |         totalFiles: 5,
149 |         filesWithMetadata: 5,
150 |         filesWithoutMetadata: 0,
151 |         freshFiles: 5,
152 |         warningFiles: 0,
153 |         staleFiles: 0,
154 |         criticalFiles: 0,
155 |         files: [],
156 |         thresholds: {
157 |           warning: { value: 7, unit: "days" },
158 |           stale: { value: 30, unit: "days" },
159 |           critical: { value: 90, unit: "days" },
160 |         },
161 |       };
162 | 
163 |       // Store multiple events
164 |       await storeFreshnessEvent(projectPath, docsPath, report, "scan");
165 |       await new Promise((resolve) => setTimeout(resolve, 10)); // Small delay
166 |       await storeFreshnessEvent(projectPath, docsPath, report, "update");
167 | 
168 |       const history = await getFreshnessHistory(projectPath, 10);
169 | 
170 |       expect(history).toBeDefined();
171 |       expect(history.length).toBeGreaterThanOrEqual(0);
172 |     });
173 | 
174 |     it("should return empty array for project with no history", async () => {
175 |       const projectPath = path.join(testDir, "new-project");
176 | 
177 |       const history = await getFreshnessHistory(projectPath, 10);
178 | 
179 |       expect(history).toEqual([]);
180 |     });
181 |   });
182 | 
183 |   describe("getStalenessInsights", () => {
184 |     it("should return insights for project with no history", async () => {
185 |       const projectPath = path.join(testDir, "new-project");
186 | 
187 |       const insights = await getStalenessInsights(projectPath);
188 | 
189 |       expect(insights).toBeDefined();
190 |       expect(insights.totalEvents).toBe(0);
191 |       expect(insights.averageImprovementScore).toBe(0);
192 |       expect(insights.trend).toBe("stable");
193 |       expect(insights.currentStatus).toBeNull();
194 |       expect(insights.recommendations.length).toBeGreaterThan(0);
195 |       expect(insights.recommendations[0]).toContain(
196 |         "No freshness tracking history found",
197 |       );
198 |     });
199 | 
200 |     it("should calculate insights from event history", async () => {
201 |       const projectPath = path.join(testDir, "test-project");
202 |       const docsPath = path.join(projectPath, "docs");
203 | 
204 |       const report: FreshnessScanReport = {
205 |         docsPath,
206 |         scannedAt: new Date().toISOString(),
207 |         totalFiles: 10,
208 |         filesWithMetadata: 10,
209 |         filesWithoutMetadata: 0,
210 |         freshFiles: 8,
211 |         warningFiles: 1,
212 |         staleFiles: 1,
213 |         criticalFiles: 0,
214 |         files: [],
215 |         thresholds: {
216 |           warning: { value: 7, unit: "days" },
217 |           stale: { value: 30, unit: "days" },
218 |           critical: { value: 90, unit: "days" },
219 |         },
220 |       };
221 | 
222 |       await storeFreshnessEvent(projectPath, docsPath, report, "scan");
223 | 
224 |       const insights = await getStalenessInsights(projectPath);
225 | 
226 |       expect(insights).toBeDefined();
227 |       expect(insights.trend).toMatch(/improving|declining|stable/);
228 |       expect(insights.recommendations).toBeDefined();
229 |       expect(Array.isArray(insights.recommendations)).toBe(true);
230 |     });
231 | 
232 |     it("should detect improving trend", async () => {
233 |       const projectPath = path.join(testDir, "test-project");
234 |       const docsPath = path.join(projectPath, "docs");
235 | 
236 |       // Store older event with worse metrics
237 |       const olderReport: FreshnessScanReport = {
238 |         docsPath,
239 |         scannedAt: new Date(Date.now() - 1000 * 60 * 60 * 24).toISOString(),
240 |         totalFiles: 10,
241 |         filesWithMetadata: 10,
242 |         filesWithoutMetadata: 0,
243 |         freshFiles: 5,
244 |         warningFiles: 2,
245 |         staleFiles: 2,
246 |         criticalFiles: 1,
247 |         files: [],
248 |         thresholds: {
249 |           warning: { value: 7, unit: "days" },
250 |           stale: { value: 30, unit: "days" },
251 |           critical: { value: 90, unit: "days" },
252 |         },
253 |       };
254 | 
255 |       await storeFreshnessEvent(projectPath, docsPath, olderReport, "scan");
256 |       await new Promise((resolve) => setTimeout(resolve, 10));
257 | 
258 |       // Store newer event with better metrics
259 |       const newerReport: FreshnessScanReport = {
260 |         ...olderReport,
261 |         scannedAt: new Date().toISOString(),
262 |         freshFiles: 9,
263 |         warningFiles: 1,
264 |         staleFiles: 0,
265 |         criticalFiles: 0,
266 |       };
267 | 
268 |       await storeFreshnessEvent(projectPath, docsPath, newerReport, "scan");
269 | 
270 |       const insights = await getStalenessInsights(projectPath);
271 | 
272 |       expect(insights.trend).toMatch(/improving|stable/);
273 |     });
274 | 
275 |     it("should generate recommendations for critical files", async () => {
276 |       const projectPath = path.join(testDir, "test-project");
277 |       const docsPath = path.join(projectPath, "docs");
278 | 
279 |       const report: FreshnessScanReport = {
280 |         docsPath,
281 |         scannedAt: new Date().toISOString(),
282 |         totalFiles: 10,
283 |         filesWithMetadata: 10,
284 |         filesWithoutMetadata: 0,
285 |         freshFiles: 5,
286 |         warningFiles: 2,
287 |         staleFiles: 1,
288 |         criticalFiles: 2,
289 |         files: [],
290 |         thresholds: {
291 |           warning: { value: 7, unit: "days" },
292 |           stale: { value: 30, unit: "days" },
293 |           critical: { value: 90, unit: "days" },
294 |         },
295 |       };
296 | 
297 |       await storeFreshnessEvent(projectPath, docsPath, report, "scan");
298 | 
299 |       const insights = await getStalenessInsights(projectPath);
300 | 
301 |       expect(insights).toBeDefined();
302 |       expect(insights.recommendations).toBeDefined();
303 |       expect(Array.isArray(insights.recommendations)).toBe(true);
304 |     });
305 | 
306 |     it("should recommend validation for files without metadata", async () => {
307 |       const projectPath = path.join(testDir, "test-project");
308 |       const docsPath = path.join(projectPath, "docs");
309 | 
310 |       const report: FreshnessScanReport = {
311 |         docsPath,
312 |         scannedAt: new Date().toISOString(),
313 |         totalFiles: 10,
314 |         filesWithMetadata: 7,
315 |         filesWithoutMetadata: 3,
316 |         freshFiles: 7,
317 |         warningFiles: 0,
318 |         staleFiles: 0,
319 |         criticalFiles: 0,
320 |         files: [],
321 |         thresholds: {
322 |           warning: { value: 7, unit: "days" },
323 |           stale: { value: 30, unit: "days" },
324 |           critical: { value: 90, unit: "days" },
325 |         },
326 |       };
327 | 
328 |       await storeFreshnessEvent(projectPath, docsPath, report, "scan");
329 | 
330 |       const insights = await getStalenessInsights(projectPath);
331 | 
332 |       expect(insights).toBeDefined();
333 |       expect(insights.recommendations).toBeDefined();
334 |       expect(Array.isArray(insights.recommendations)).toBe(true);
335 |     });
336 |   });
337 | 
338 |   describe("compareFreshnessAcrossProjects", () => {
339 |     it("should handle project with no history", async () => {
340 |       const projectPath = path.join(testDir, "new-project");
341 | 
342 |       const comparison = await compareFreshnessAcrossProjects(projectPath);
343 | 
344 |       expect(comparison).toBeDefined();
345 |       expect(comparison.currentProject.path).toBe(projectPath);
346 |       expect(comparison.currentProject.improvementScore).toBe(0);
347 |       expect(comparison.similarProjects).toEqual([]);
348 |     });
349 | 
350 |     it("should calculate ranking for project", async () => {
351 |       const projectPath = path.join(testDir, "test-project");
352 |       const docsPath = path.join(projectPath, "docs");
353 | 
354 |       const report: FreshnessScanReport = {
355 |         docsPath,
356 |         scannedAt: new Date().toISOString(),
357 |         totalFiles: 10,
358 |         filesWithMetadata: 10,
359 |         filesWithoutMetadata: 0,
360 |         freshFiles: 8,
361 |         warningFiles: 1,
362 |         staleFiles: 1,
363 |         criticalFiles: 0,
364 |         files: [],
365 |         thresholds: {
366 |           warning: { value: 7, unit: "days" },
367 |           stale: { value: 30, unit: "days" },
368 |           critical: { value: 90, unit: "days" },
369 |         },
370 |       };
371 | 
372 |       await storeFreshnessEvent(projectPath, docsPath, report, "scan");
373 | 
374 |       const comparison = await compareFreshnessAcrossProjects(projectPath);
375 | 
376 |       expect(comparison.ranking).toBeGreaterThan(0);
377 |     });
378 | 
379 |     it("should compare with similar projects", async () => {
380 |       const projectPath1 = path.join(testDir, "project1");
381 |       const docsPath1 = path.join(projectPath1, "docs");
382 |       const projectPath2 = path.join(testDir, "project2");
383 |       const docsPath2 = path.join(projectPath2, "docs");
384 | 
385 |       // Store events for both projects
386 |       const report1: FreshnessScanReport = {
387 |         docsPath: docsPath1,
388 |         scannedAt: new Date().toISOString(),
389 |         totalFiles: 10,
390 |         filesWithMetadata: 10,
391 |         filesWithoutMetadata: 0,
392 |         freshFiles: 9,
393 |         warningFiles: 1,
394 |         staleFiles: 0,
395 |         criticalFiles: 0,
396 |         files: [],
397 |         thresholds: {
398 |           warning: { value: 7, unit: "days" },
399 |           stale: { value: 30, unit: "days" },
400 |           critical: { value: 90, unit: "days" },
401 |         },
402 |       };
403 | 
404 |       const report2: FreshnessScanReport = {
405 |         ...report1,
406 |         docsPath: docsPath2,
407 |         freshFiles: 7,
408 |         warningFiles: 2,
409 |         staleFiles: 1,
410 |       };
411 | 
412 |       await storeFreshnessEvent(projectPath1, docsPath1, report1, "scan");
413 |       await storeFreshnessEvent(projectPath2, docsPath2, report2, "scan");
414 | 
415 |       // The function should work even if there are no similar_to edges
416 |       // (it will just return empty similarProjects array)
417 |       const comparison = await compareFreshnessAcrossProjects(projectPath1);
418 | 
419 |       expect(comparison.currentProject.path).toBe(projectPath1);
420 |       expect(comparison.similarProjects).toBeDefined();
421 |       expect(Array.isArray(comparison.similarProjects)).toBe(true);
422 |     });
423 |   });
424 | 
425 |   describe("updateFreshnessEvent", () => {
426 |     it("should update a freshness event with new data", async () => {
427 |       const projectPath = path.join(testDir, "test-project");
428 |       const docsPath = path.join(projectPath, "docs");
429 | 
430 |       const report: FreshnessScanReport = {
431 |         docsPath,
432 |         scannedAt: new Date().toISOString(),
433 |         totalFiles: 10,
434 |         filesWithMetadata: 8,
435 |         filesWithoutMetadata: 2,
436 |         freshFiles: 8,
437 |         warningFiles: 0,
438 |         staleFiles: 0,
439 |         criticalFiles: 0,
440 |         files: [],
441 |         thresholds: {
442 |           warning: { value: 7, unit: "days" },
443 |           stale: { value: 30, unit: "days" },
444 |           critical: { value: 90, unit: "days" },
445 |         },
446 |       };
447 | 
448 |       const eventId = await storeFreshnessEvent(
449 |         projectPath,
450 |         docsPath,
451 |         report,
452 |         "scan",
453 |       );
454 | 
455 |       await updateFreshnessEvent(eventId, {
456 |         filesInitialized: 2,
457 |         filesUpdated: 5,
458 |         eventType: "update",
459 |       });
460 | 
461 |       // Verify the update by checking history
462 |       const history = await getFreshnessHistory(projectPath, 10);
463 |       expect(history.length).toBeGreaterThan(0);
464 |     });
465 | 
466 |     it("should throw error for non-existent event", async () => {
467 |       await expect(
468 |         updateFreshnessEvent("freshness_event:nonexistent", {
469 |           filesInitialized: 1,
470 |         }),
471 |       ).rejects.toThrow();
472 |     });
473 |   });
474 | 
475 |   describe("Edge cases and additional coverage", () => {
476 |     it("should handle more than 10 stale files", async () => {
477 |       const projectPath = path.join(testDir, "test-project");
478 |       const docsPath = path.join(projectPath, "docs");
479 | 
480 |       // Create 15 stale files
481 |       const staleFiles = Array.from({ length: 15 }, (_, i) => ({
482 |         filePath: path.join(docsPath, `stale${i}.md`),
483 |         relativePath: `stale${i}.md`,
484 |         hasMetadata: true,
485 |         isStale: true,
486 |         stalenessLevel: "stale" as const,
487 |         ageInMs: 1000 * 60 * 60 * 24 * (40 + i), // 40+ days
488 |         ageFormatted: `${40 + i} days`,
489 |       }));
490 | 
491 |       const report: FreshnessScanReport = {
492 |         docsPath,
493 |         scannedAt: new Date().toISOString(),
494 |         totalFiles: 15,
495 |         filesWithMetadata: 15,
496 |         filesWithoutMetadata: 0,
497 |         freshFiles: 0,
498 |         warningFiles: 0,
499 |         staleFiles: 15,
500 |         criticalFiles: 0,
501 |         files: staleFiles,
502 |         thresholds: {
503 |           warning: { value: 7, unit: "days" },
504 |           stale: { value: 30, unit: "days" },
505 |           critical: { value: 90, unit: "days" },
506 |         },
507 |       };
508 | 
509 |       const eventId = await storeFreshnessEvent(
510 |         projectPath,
511 |         docsPath,
512 |         report,
513 |         "scan",
514 |       );
515 |       expect(eventId).toBeDefined();
516 | 
517 |       const history = await getFreshnessHistory(projectPath, 1);
518 |       expect(history[0].event.mostStaleFiles.length).toBeLessThanOrEqual(10);
519 |     });
520 | 
521 |     it("should recommend action for 30%+ stale files", async () => {
522 |       const projectPath = path.join(testDir, "test-project");
523 |       const docsPath = path.join(projectPath, "docs");
524 | 
525 |       const report: FreshnessScanReport = {
526 |         docsPath,
527 |         scannedAt: new Date().toISOString(),
528 |         totalFiles: 10,
529 |         filesWithMetadata: 10,
530 |         filesWithoutMetadata: 0,
531 |         freshFiles: 6,
532 |         warningFiles: 0,
533 |         staleFiles: 4, // 40% stale
534 |         criticalFiles: 0,
535 |         files: [],
536 |         thresholds: {
537 |           warning: { value: 7, unit: "days" },
538 |           stale: { value: 30, unit: "days" },
539 |           critical: { value: 90, unit: "days" },
540 |         },
541 |       };
542 | 
543 |       await storeFreshnessEvent(projectPath, docsPath, report, "scan");
544 | 
545 |       const insights = await getStalenessInsights(projectPath);
546 |       expect(insights.recommendations).toBeDefined();
547 |       expect(insights.recommendations.length).toBeGreaterThan(0);
548 |       // Check that we get recommendations about stale files
549 |       const hasStaleRecommendation = insights.recommendations.some(
550 |         (r) => r.includes("30%") || r.includes("stale"),
551 |       );
552 |       expect(hasStaleRecommendation).toBe(true);
553 |     });
554 | 
555 |     it("should detect declining trend", async () => {
556 |       const projectPath = path.join(testDir, "test-project");
557 |       const docsPath = path.join(projectPath, "docs");
558 | 
559 |       // Store older event with good metrics
560 |       const olderReport: FreshnessScanReport = {
561 |         docsPath,
562 |         scannedAt: new Date(Date.now() - 1000 * 60 * 60 * 24 * 7).toISOString(),
563 |         totalFiles: 10,
564 |         filesWithMetadata: 10,
565 |         filesWithoutMetadata: 0,
566 |         freshFiles: 9,
567 |         warningFiles: 1,
568 |         staleFiles: 0,
569 |         criticalFiles: 0,
570 |         files: [],
571 |         thresholds: {
572 |           warning: { value: 7, unit: "days" },
573 |           stale: { value: 30, unit: "days" },
574 |           critical: { value: 90, unit: "days" },
575 |         },
576 |       };
577 | 
578 |       await storeFreshnessEvent(projectPath, docsPath, olderReport, "scan");
579 |       await new Promise((resolve) => setTimeout(resolve, 10));
580 | 
581 |       // Store newer event with worse metrics
582 |       const newerReport: FreshnessScanReport = {
583 |         ...olderReport,
584 |         scannedAt: new Date().toISOString(),
585 |         freshFiles: 5,
586 |         warningFiles: 2,
587 |         staleFiles: 2,
588 |         criticalFiles: 1,
589 |       };
590 | 
591 |       await storeFreshnessEvent(projectPath, docsPath, newerReport, "scan");
592 | 
593 |       const insights = await getStalenessInsights(projectPath);
594 |       expect(insights.trend).toMatch(/declining|stable/);
595 |     });
596 | 
597 |     it("should identify chronically stale files", async () => {
598 |       const projectPath = path.join(testDir, "test-project");
599 |       const docsPath = path.join(projectPath, "docs");
600 | 
601 |       // Create multiple events with same critical/stale files
602 |       // Need to create enough events so files appear repeatedly
603 |       for (let i = 0; i < 6; i++) {
604 |         const report: FreshnessScanReport = {
605 |           docsPath,
606 |           scannedAt: new Date(
607 |             Date.now() - 1000 * 60 * 60 * 24 * (6 - i),
608 |           ).toISOString(),
609 |           totalFiles: 10,
610 |           filesWithMetadata: 10,
611 |           filesWithoutMetadata: 0,
612 |           freshFiles: 6,
613 |           warningFiles: 0,
614 |           staleFiles: 2,
615 |           criticalFiles: 2,
616 |           files: [
617 |             {
618 |               filePath: path.join(docsPath, "always-stale.md"),
619 |               relativePath: "always-stale.md",
620 |               hasMetadata: true,
621 |               isStale: true,
622 |               stalenessLevel: "critical",
623 |               ageInMs: 1000 * 60 * 60 * 24 * 100,
624 |               ageFormatted: "100 days",
625 |             },
626 |             {
627 |               filePath: path.join(docsPath, "also-stale.md"),
628 |               relativePath: "also-stale.md",
629 |               hasMetadata: true,
630 |               isStale: true,
631 |               stalenessLevel: "critical",
632 |               ageInMs: 1000 * 60 * 60 * 24 * 95,
633 |               ageFormatted: "95 days",
634 |             },
635 |             {
636 |               filePath: path.join(docsPath, "stale-doc.md"),
637 |               relativePath: "stale-doc.md",
638 |               hasMetadata: true,
639 |               isStale: true,
640 |               stalenessLevel: "stale",
641 |               ageInMs: 1000 * 60 * 60 * 24 * 40,
642 |               ageFormatted: "40 days",
643 |             },
644 |             {
645 |               filePath: path.join(docsPath, "another-stale.md"),
646 |               relativePath: "another-stale.md",
647 |               hasMetadata: true,
648 |               isStale: true,
649 |               stalenessLevel: "stale",
650 |               ageInMs: 1000 * 60 * 60 * 24 * 35,
651 |               ageFormatted: "35 days",
652 |             },
653 |           ],
654 |           thresholds: {
655 |             warning: { value: 7, unit: "days" },
656 |             stale: { value: 30, unit: "days" },
657 |             critical: { value: 90, unit: "days" },
658 |           },
659 |         };
660 | 
661 |         await storeFreshnessEvent(projectPath, docsPath, report, "scan");
662 |         await new Promise((resolve) => setTimeout(resolve, 10));
663 |       }
664 | 
665 |       const insights = await getStalenessInsights(projectPath);
666 |       // With 6 events and files appearing in all of them,
667 |       // should trigger chronically stale recommendation
668 |       const hasChronicallyStale = insights.recommendations.some(
669 |         (r) => r.includes("chronically") || r.includes("critical"),
670 |       );
671 |       expect(hasChronicallyStale).toBe(true);
672 |     });
673 | 
674 |     it("should handle files without age information", async () => {
675 |       const projectPath = path.join(testDir, "test-project");
676 |       const docsPath = path.join(projectPath, "docs");
677 | 
678 |       const report: FreshnessScanReport = {
679 |         docsPath,
680 |         scannedAt: new Date().toISOString(),
681 |         totalFiles: 5,
682 |         filesWithMetadata: 3,
683 |         filesWithoutMetadata: 2,
684 |         freshFiles: 3,
685 |         warningFiles: 0,
686 |         staleFiles: 0,
687 |         criticalFiles: 0,
688 |         files: [
689 |           {
690 |             filePath: path.join(docsPath, "no-metadata.md"),
691 |             relativePath: "no-metadata.md",
692 |             hasMetadata: false,
693 |             isStale: false,
694 |             stalenessLevel: "unknown",
695 |           },
696 |         ],
697 |         thresholds: {
698 |           warning: { value: 7, unit: "days" },
699 |           stale: { value: 30, unit: "days" },
700 |           critical: { value: 90, unit: "days" },
701 |         },
702 |       };
703 | 
704 |       const eventId = await storeFreshnessEvent(
705 |         projectPath,
706 |         docsPath,
707 |         report,
708 |         "scan",
709 |       );
710 |       expect(eventId).toBeDefined();
711 | 
712 |       const history = await getFreshnessHistory(projectPath, 1);
713 |       expect(history.length).toBeGreaterThan(0);
714 |       if (history.length > 0) {
715 |         expect(history[0].event.averageAge).toBeUndefined();
716 |         expect(history[0].event.oldestFile).toBeUndefined();
717 |       }
718 |     });
719 |   });
720 | });
721 | 
```

--------------------------------------------------------------------------------
/src/memory/multi-agent-sharing.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Multi-Agent Memory Sharing System for DocuMCP
  3 |  * Implements Issue #50: Multi-Agent Memory Sharing
  4 |  *
  5 |  * Enables multiple DocuMCP instances to share and synchronize memory,
  6 |  * creating a collaborative knowledge network for enhanced learning and recommendations.
  7 |  */
  8 | 
  9 | import { MemoryManager } from "./manager.js";
 10 | import { MemoryEntry } from "./storage.js";
 11 | import { EventEmitter } from "events";
 12 | import * as crypto from "crypto";
 13 | 
 14 | export interface AgentIdentity {
 15 |   id: string;
 16 |   name: string;
 17 |   version: string;
 18 |   capabilities: string[];
 19 |   lastSeen: string;
 20 |   trustLevel: "untrusted" | "low" | "medium" | "high" | "trusted";
 21 |   specializations: string[];
 22 | }
 23 | 
 24 | export interface SharedMemory {
 25 |   originalEntry: MemoryEntry;
 26 |   sharingMetadata: {
 27 |     sourceAgent: string;
 28 |     sharedAt: string;
 29 |     accessCount: number;
 30 |     trustScore: number;
 31 |     validatedBy: string[];
 32 |     conflicts: string[];
 33 |   };
 34 |   transformations: Array<{
 35 |     agentId: string;
 36 |     transformationType:
 37 |       | "anonymization"
 38 |       | "aggregation"
 39 |       | "enrichment"
 40 |       | "validation";
 41 |     appliedAt: string;
 42 |     details: Record<string, any>;
 43 |   }>;
 44 | }
 45 | 
 46 | export interface SyncRequest {
 47 |   id: string;
 48 |   fromAgent: string;
 49 |   toAgent: string;
 50 |   requestType: "full_sync" | "incremental" | "selective" | "validation";
 51 |   criteria?: {
 52 |     types?: string[];
 53 |     tags?: string[];
 54 |     timeRange?: { start: string; end: string };
 55 |     minTrustLevel?: number;
 56 |   };
 57 |   requestedAt: string;
 58 |   status: "pending" | "in_progress" | "completed" | "failed";
 59 | }
 60 | 
 61 | export interface ConflictResolution {
 62 |   conflictId: string;
 63 |   conflictType:
 64 |     | "duplicate"
 65 |     | "contradiction"
 66 |     | "version_mismatch"
 67 |     | "trust_dispute";
 68 |   involvedEntries: string[];
 69 |   involvedAgents: string[];
 70 |   resolutionStrategy:
 71 |     | "merge"
 72 |     | "prioritize_trusted"
 73 |     | "manual_review"
 74 |     | "temporal_precedence";
 75 |   resolvedAt?: string;
 76 |   resolution?: {
 77 |     action: string;
 78 |     resultingEntry?: MemoryEntry;
 79 |     metadata: Record<string, any>;
 80 |   };
 81 | }
 82 | 
 83 | export interface CollaborativeInsight {
 84 |   id: string;
 85 |   type: "trend" | "pattern" | "anomaly" | "consensus" | "disagreement";
 86 |   description: string;
 87 |   evidence: string[];
 88 |   contributingAgents: string[];
 89 |   confidence: number;
 90 |   generatedAt: string;
 91 |   actionable: boolean;
 92 |   recommendations?: string[];
 93 | }
 94 | 
 95 | export class MultiAgentMemorySharing extends EventEmitter {
 96 |   private memoryManager: MemoryManager;
 97 |   private agentId: string;
 98 |   private knownAgents: Map<string, AgentIdentity>;
 99 |   private sharedMemories: Map<string, SharedMemory>;
100 |   private syncRequests: Map<string, SyncRequest>;
101 |   private conflicts: Map<string, ConflictResolution>;
102 |   private collaborativeInsights: Map<string, CollaborativeInsight>;
103 |   private syncInterval: NodeJS.Timeout | null = null;
104 | 
105 |   constructor(
106 |     memoryManager: MemoryManager,
107 |     agentIdentity: Partial<AgentIdentity>,
108 |   ) {
109 |     super();
110 |     this.memoryManager = memoryManager;
111 |     this.agentId = agentIdentity.id || this.generateAgentId();
112 |     this.knownAgents = new Map();
113 |     this.sharedMemories = new Map();
114 |     this.syncRequests = new Map();
115 |     this.conflicts = new Map();
116 |     this.collaborativeInsights = new Map();
117 | 
118 |     // Register self
119 |     this.registerAgent({
120 |       id: this.agentId,
121 |       name: agentIdentity.name || "DocuMCP Agent",
122 |       version: agentIdentity.version || "1.0.0",
123 |       capabilities: agentIdentity.capabilities || [
124 |         "analysis",
125 |         "recommendation",
126 |         "deployment",
127 |       ],
128 |       lastSeen: new Date().toISOString(),
129 |       trustLevel: "trusted",
130 |       specializations: agentIdentity.specializations || [],
131 |     });
132 |   }
133 | 
134 |   /**
135 |    * Initialize multi-agent sharing
136 |    */
137 |   async initialize(): Promise<void> {
138 |     await this.loadSharedMemories();
139 |     await this.loadKnownAgents();
140 |     await this.loadPendingSyncRequests();
141 | 
142 |     // Start periodic sync
143 |     this.startPeriodicSync();
144 | 
145 |     this.emit("initialized", { agentId: this.agentId });
146 |   }
147 | 
148 |   /**
149 |    * Register a new agent in the network
150 |    */
151 |   async registerAgent(agent: AgentIdentity): Promise<void> {
152 |     this.knownAgents.set(agent.id, {
153 |       ...agent,
154 |       lastSeen: new Date().toISOString(),
155 |     });
156 | 
157 |     await this.persistAgentRegistry();
158 |     this.emit("agent-registered", agent);
159 |   }
160 | 
161 |   /**
162 |    * Share a memory entry with other agents
163 |    */
164 |   async shareMemory(
165 |     memoryId: string,
166 |     targetAgents?: string[],
167 |     options?: {
168 |       anonymize?: boolean;
169 |       requireValidation?: boolean;
170 |       trustLevel?: number;
171 |     },
172 |   ): Promise<SharedMemory> {
173 |     const memory = await this.memoryManager.recall(memoryId);
174 |     if (!memory) {
175 |       throw new Error(`Memory ${memoryId} not found`);
176 |     }
177 | 
178 |     // Create shared memory wrapper
179 |     const sharedMemory: SharedMemory = {
180 |       originalEntry: this.anonymizeIfRequired(memory, options?.anonymize),
181 |       sharingMetadata: {
182 |         sourceAgent: this.agentId,
183 |         sharedAt: new Date().toISOString(),
184 |         accessCount: 0,
185 |         trustScore: this.calculateInitialTrustScore(memory),
186 |         validatedBy: [],
187 |         conflicts: [],
188 |       },
189 |       transformations: [],
190 |     };
191 | 
192 |     // Apply anonymization transformation if required
193 |     if (options?.anonymize) {
194 |       sharedMemory.transformations.push({
195 |         agentId: this.agentId,
196 |         transformationType: "anonymization",
197 |         appliedAt: new Date().toISOString(),
198 |         details: { level: "standard", preserveStructure: true },
199 |       });
200 |     }
201 | 
202 |     this.sharedMemories.set(memoryId, sharedMemory);
203 | 
204 |     // Create sync requests for target agents
205 |     if (targetAgents) {
206 |       for (const targetAgent of targetAgents) {
207 |         await this.createSyncRequest(targetAgent, "selective", {
208 |           memoryIds: [memoryId],
209 |         });
210 |       }
211 |     } else {
212 |       // Broadcast to all trusted agents
213 |       await this.broadcastToTrustedAgents(sharedMemory);
214 |     }
215 | 
216 |     await this.persistSharedMemories();
217 |     this.emit("memory-shared", { memoryId, sharedMemory });
218 | 
219 |     return sharedMemory;
220 |   }
221 | 
222 |   /**
223 |    * Receive shared memory from another agent
224 |    */
225 |   async receiveSharedMemory(
226 |     sharedMemory: SharedMemory,
227 |     sourceAgent: string,
228 |   ): Promise<{
229 |     accepted: boolean;
230 |     conflicts?: ConflictResolution[];
231 |     integrationResult?: string;
232 |   }> {
233 |     // Validate source agent trust level
234 |     const sourceAgentInfo = this.knownAgents.get(sourceAgent);
235 |     if (!sourceAgentInfo || sourceAgentInfo.trustLevel === "untrusted") {
236 |       return { accepted: false };
237 |     }
238 | 
239 |     // Check for conflicts with existing memories
240 |     const conflicts = await this.detectConflicts(sharedMemory);
241 | 
242 |     if (conflicts.length > 0) {
243 |       // Store conflicts for resolution
244 |       for (const conflict of conflicts) {
245 |         this.conflicts.set(conflict.conflictId, conflict);
246 |         await this.resolveConflict(conflict);
247 |       }
248 | 
249 |       this.emit("conflict-detected", { conflicts, sharedMemory });
250 |     }
251 | 
252 |     // Integrate the shared memory
253 |     const integrationResult = await this.integrateSharedMemory(
254 |       sharedMemory,
255 |       sourceAgent,
256 |     );
257 | 
258 |     // Update sharing metadata
259 |     sharedMemory.sharingMetadata.accessCount++;
260 | 
261 |     this.emit("memory-received", {
262 |       sharedMemory,
263 |       sourceAgent,
264 |       integrationResult,
265 |     });
266 | 
267 |     return {
268 |       accepted: true,
269 |       conflicts: conflicts.length > 0 ? conflicts : undefined,
270 |       integrationResult,
271 |     };
272 |   }
273 | 
274 |   /**
275 |    * Request synchronization with another agent
276 |    */
277 |   async requestSync(
278 |     targetAgent: string,
279 |     syncType: SyncRequest["requestType"] = "incremental",
280 |     criteria?: SyncRequest["criteria"],
281 |   ): Promise<SyncRequest> {
282 |     const syncRequest: SyncRequest = {
283 |       id: this.generateSyncId(),
284 |       fromAgent: this.agentId,
285 |       toAgent: targetAgent,
286 |       requestType: syncType,
287 |       criteria,
288 |       requestedAt: new Date().toISOString(),
289 |       status: "pending",
290 |     };
291 | 
292 |     this.syncRequests.set(syncRequest.id, syncRequest);
293 |     await this.persistSyncRequests();
294 | 
295 |     this.emit("sync-requested", syncRequest);
296 |     return syncRequest;
297 |   }
298 | 
299 |   /**
300 |    * Process incoming sync request
301 |    */
302 |   async processSyncRequest(syncRequest: SyncRequest): Promise<{
303 |     approved: boolean;
304 |     memories?: SharedMemory[];
305 |     reason?: string;
306 |   }> {
307 |     // Validate requesting agent
308 |     const requestingAgent = this.knownAgents.get(syncRequest.fromAgent);
309 |     if (!requestingAgent || requestingAgent.trustLevel === "untrusted") {
310 |       return { approved: false, reason: "Untrusted agent" };
311 |     }
312 | 
313 |     // Update request status
314 |     syncRequest.status = "in_progress";
315 |     this.syncRequests.set(syncRequest.id, syncRequest);
316 | 
317 |     try {
318 |       // Get memories based on sync type and criteria
319 |       const memories = await this.getMemoriesForSync(syncRequest);
320 | 
321 |       syncRequest.status = "completed";
322 |       this.emit("sync-completed", {
323 |         syncRequest,
324 |         memoriesCount: memories.length,
325 |       });
326 | 
327 |       return { approved: true, memories };
328 |     } catch (error) {
329 |       syncRequest.status = "failed";
330 |       this.emit("sync-failed", { syncRequest, error });
331 | 
332 |       return {
333 |         approved: false,
334 |         reason: error instanceof Error ? error.message : "Unknown error",
335 |       };
336 |     }
337 |   }
338 | 
339 |   /**
340 |    * Generate collaborative insights from shared memories
341 |    */
342 |   async generateCollaborativeInsights(): Promise<CollaborativeInsight[]> {
343 |     const insights: CollaborativeInsight[] = [];
344 | 
345 |     // Analyze trends across agents
346 |     const trends = await this.analyzeTrends();
347 |     insights.push(...trends);
348 | 
349 |     // Find consensus patterns
350 |     const consensus = await this.findConsensusPatterns();
351 |     insights.push(...consensus);
352 | 
353 |     // Identify disagreements that need attention
354 |     const disagreements = await this.identifyDisagreements();
355 |     insights.push(...disagreements);
356 | 
357 |     // Detect anomalies
358 |     const anomalies = await this.detectAnomalies();
359 |     insights.push(...anomalies);
360 | 
361 |     // Store insights
362 |     for (const insight of insights) {
363 |       this.collaborativeInsights.set(insight.id, insight);
364 |     }
365 | 
366 |     await this.persistCollaborativeInsights();
367 |     this.emit("insights-generated", { count: insights.length });
368 | 
369 |     return insights;
370 |   }
371 | 
372 |   /**
373 |    * Validate shared memory against local knowledge
374 |    */
375 |   async validateSharedMemory(sharedMemory: SharedMemory): Promise<{
376 |     isValid: boolean;
377 |     confidence: number;
378 |     issues: string[];
379 |     recommendations: string[];
380 |   }> {
381 |     const issues: string[] = [];
382 |     const recommendations: string[] = [];
383 |     let confidence = 1.0;
384 | 
385 |     // Check data consistency
386 |     if (!this.validateDataStructure(sharedMemory.originalEntry)) {
387 |       issues.push("Invalid data structure");
388 |       confidence *= 0.7;
389 |     }
390 | 
391 |     // Cross-validate with local memories
392 |     const similarMemories = await this.findSimilarLocalMemories(
393 |       sharedMemory.originalEntry,
394 |     );
395 |     if (similarMemories.length > 0) {
396 |       const consistencyScore = this.calculateConsistency(
397 |         sharedMemory.originalEntry,
398 |         similarMemories,
399 |       );
400 |       if (consistencyScore < 0.8) {
401 |         issues.push("Inconsistent with local knowledge");
402 |         confidence *= consistencyScore;
403 |       }
404 |     }
405 | 
406 |     // Check source agent reliability
407 |     const sourceAgent = this.knownAgents.get(
408 |       sharedMemory.sharingMetadata.sourceAgent,
409 |     );
410 |     if (sourceAgent) {
411 |       if (sourceAgent.trustLevel === "low") {
412 |         confidence *= 0.8;
413 |         recommendations.push("Verify with additional sources");
414 |       } else if (
415 |         sourceAgent.trustLevel === "high" ||
416 |         sourceAgent.trustLevel === "trusted"
417 |       ) {
418 |         confidence *= 1.1;
419 |       }
420 |     }
421 | 
422 |     // Validate transformations
423 |     for (const transformation of sharedMemory.transformations) {
424 |       if (transformation.transformationType === "anonymization") {
425 |         // Check if anonymization preserved essential information
426 |         if (!this.validateAnonymization(transformation)) {
427 |           issues.push("Anonymization may have removed critical information");
428 |           confidence *= 0.9;
429 |         }
430 |       }
431 |     }
432 | 
433 |     return {
434 |       isValid: issues.length === 0 || confidence > 0.6,
435 |       confidence: Math.min(confidence, 1.0),
436 |       issues,
437 |       recommendations,
438 |     };
439 |   }
440 | 
441 |   /**
442 |    * Get network statistics
443 |    */
444 |   getNetworkStatistics(): {
445 |     connectedAgents: number;
446 |     sharedMemories: number;
447 |     activeSyncs: number;
448 |     resolvedConflicts: number;
449 |     trustDistribution: Record<string, number>;
450 |     collaborativeInsights: number;
451 |     networkHealth: number;
452 |   } {
453 |     const trustDistribution: Record<string, number> = {};
454 |     for (const agent of this.knownAgents.values()) {
455 |       trustDistribution[agent.trustLevel] =
456 |         (trustDistribution[agent.trustLevel] || 0) + 1;
457 |     }
458 | 
459 |     const activeSyncs = Array.from(this.syncRequests.values()).filter(
460 |       (req) => req.status === "pending" || req.status === "in_progress",
461 |     ).length;
462 | 
463 |     const resolvedConflicts = Array.from(this.conflicts.values()).filter(
464 |       (conflict) => conflict.resolvedAt,
465 |     ).length;
466 | 
467 |     // Calculate network health (0-1)
468 |     const trustedAgents = Array.from(this.knownAgents.values()).filter(
469 |       (agent) => agent.trustLevel === "high" || agent.trustLevel === "trusted",
470 |     ).length;
471 |     const totalAgents = this.knownAgents.size;
472 |     const networkHealth = totalAgents > 0 ? trustedAgents / totalAgents : 0;
473 | 
474 |     return {
475 |       connectedAgents: this.knownAgents.size,
476 |       sharedMemories: this.sharedMemories.size,
477 |       activeSyncs,
478 |       resolvedConflicts,
479 |       trustDistribution,
480 |       collaborativeInsights: this.collaborativeInsights.size,
481 |       networkHealth,
482 |     };
483 |   }
484 | 
485 |   // Private helper methods
486 | 
487 |   private generateAgentId(): string {
488 |     return `agent_${crypto.randomBytes(8).toString("hex")}`;
489 |   }
490 | 
491 |   private generateSyncId(): string {
492 |     return `sync_${Date.now()}_${crypto.randomBytes(4).toString("hex")}`;
493 |   }
494 | 
495 |   private anonymizeIfRequired(
496 |     memory: MemoryEntry,
497 |     anonymize?: boolean,
498 |   ): MemoryEntry {
499 |     if (!anonymize) return memory;
500 | 
501 |     // Create anonymized copy
502 |     const anonymized = JSON.parse(JSON.stringify(memory));
503 | 
504 |     // Remove/hash sensitive information
505 |     if (anonymized.metadata.repository) {
506 |       anonymized.metadata.repository = this.hashSensitiveData(
507 |         anonymized.metadata.repository,
508 |       );
509 |     }
510 | 
511 |     if (anonymized.metadata.projectId) {
512 |       anonymized.metadata.projectId = this.hashSensitiveData(
513 |         anonymized.metadata.projectId,
514 |       );
515 |     }
516 | 
517 |     // Remove file paths and specific identifiers
518 |     if (anonymized.data.files) {
519 |       delete anonymized.data.files;
520 |     }
521 | 
522 |     return anonymized;
523 |   }
524 | 
525 |   private hashSensitiveData(data: string): string {
526 |     return crypto
527 |       .createHash("sha256")
528 |       .update(data)
529 |       .digest("hex")
530 |       .substring(0, 16);
531 |   }
532 | 
533 |   private calculateInitialTrustScore(memory: MemoryEntry): number {
534 |     let score = 0.5; // Base score
535 | 
536 |     // Boost for successful outcomes
537 |     if (memory.data.status === "success") score += 0.2;
538 | 
539 |     // Boost for rich metadata
540 |     if (memory.metadata.tags && memory.metadata.tags.length > 2) score += 0.1;
541 | 
542 |     // Boost for recent data
543 |     const daysSince =
544 |       (Date.now() - new Date(memory.timestamp).getTime()) /
545 |       (1000 * 60 * 60 * 24);
546 |     if (daysSince <= 30) score += 0.1;
547 | 
548 |     // Boost for complete data
549 |     if (memory.checksum) score += 0.1;
550 | 
551 |     return Math.min(score, 1.0);
552 |   }
553 | 
554 |   private async detectConflicts(
555 |     sharedMemory: SharedMemory,
556 |   ): Promise<ConflictResolution[]> {
557 |     const conflicts: ConflictResolution[] = [];
558 | 
559 |     // Check for duplicates
560 |     const similarLocal = await this.findSimilarLocalMemories(
561 |       sharedMemory.originalEntry,
562 |     );
563 |     for (const similar of similarLocal) {
564 |       if (this.isLikelyDuplicate(sharedMemory.originalEntry, similar)) {
565 |         conflicts.push({
566 |           conflictId: `conflict_${Date.now()}_${crypto
567 |             .randomBytes(4)
568 |             .toString("hex")}`,
569 |           conflictType: "duplicate",
570 |           involvedEntries: [sharedMemory.originalEntry.id, similar.id],
571 |           involvedAgents: [
572 |             sharedMemory.sharingMetadata.sourceAgent,
573 |             this.agentId,
574 |           ],
575 |           resolutionStrategy: "merge",
576 |         });
577 |       }
578 |     }
579 | 
580 |     return conflicts;
581 |   }
582 | 
583 |   private async resolveConflict(conflict: ConflictResolution): Promise<void> {
584 |     // Implement conflict resolution based on strategy
585 |     switch (conflict.resolutionStrategy) {
586 |       case "merge":
587 |         await this.mergeConflictingEntries(conflict);
588 |         break;
589 |       case "prioritize_trusted":
590 |         await this.prioritizeTrustedSource(conflict);
591 |         break;
592 |       case "temporal_precedence":
593 |         await this.useTemporalPrecedence(conflict);
594 |         break;
595 |       default:
596 |         // Mark for manual review
597 |         conflict.resolutionStrategy = "manual_review";
598 |     }
599 | 
600 |     conflict.resolvedAt = new Date().toISOString();
601 |     this.conflicts.set(conflict.conflictId, conflict);
602 |   }
603 | 
604 |   private async integrateSharedMemory(
605 |     sharedMemory: SharedMemory,
606 |     sourceAgent: string,
607 |   ): Promise<string> {
608 |     // Add transformation for integration
609 |     sharedMemory.transformations.push({
610 |       agentId: this.agentId,
611 |       transformationType: "enrichment",
612 |       appliedAt: new Date().toISOString(),
613 |       details: { integratedFrom: sourceAgent },
614 |     });
615 | 
616 |     // Store in local memory with special metadata
617 |     const enrichedEntry = {
618 |       ...sharedMemory.originalEntry,
619 |       metadata: {
620 |         ...sharedMemory.originalEntry.metadata,
621 |         sharedFrom: sourceAgent,
622 |         integratedAt: new Date().toISOString(),
623 |         tags: [
624 |           ...(sharedMemory.originalEntry.metadata.tags || []),
625 |           "shared",
626 |           "collaborative",
627 |         ],
628 |       },
629 |     };
630 | 
631 |     await this.memoryManager.remember(
632 |       enrichedEntry.type,
633 |       enrichedEntry.data,
634 |       enrichedEntry.metadata,
635 |     );
636 | 
637 |     return "integrated_successfully";
638 |   }
639 | 
640 |   private async getMemoriesForSync(
641 |     syncRequest: SyncRequest,
642 |   ): Promise<SharedMemory[]> {
643 |     const allMemories = await this.memoryManager.search("", {
644 |       sortBy: "timestamp",
645 |     });
646 |     let filteredMemories = allMemories;
647 | 
648 |     // Apply criteria filtering
649 |     if (syncRequest.criteria) {
650 |       if (syncRequest.criteria.types) {
651 |         filteredMemories = filteredMemories.filter((m) =>
652 |           syncRequest.criteria!.types!.includes(m.type),
653 |         );
654 |       }
655 | 
656 |       if (syncRequest.criteria.tags) {
657 |         filteredMemories = filteredMemories.filter(
658 |           (m) =>
659 |             m.metadata.tags?.some((tag) =>
660 |               syncRequest.criteria!.tags!.includes(tag),
661 |             ),
662 |         );
663 |       }
664 | 
665 |       if (syncRequest.criteria.timeRange) {
666 |         const start = new Date(syncRequest.criteria.timeRange.start);
667 |         const end = new Date(syncRequest.criteria.timeRange.end);
668 |         filteredMemories = filteredMemories.filter((m) => {
669 |           const memTime = new Date(m.timestamp);
670 |           return memTime >= start && memTime <= end;
671 |         });
672 |       }
673 |     }
674 | 
675 |     // Convert to shared memories
676 |     return filteredMemories.map((memory) => ({
677 |       originalEntry: memory,
678 |       sharingMetadata: {
679 |         sourceAgent: this.agentId,
680 |         sharedAt: new Date().toISOString(),
681 |         accessCount: 0,
682 |         trustScore: this.calculateInitialTrustScore(memory),
683 |         validatedBy: [],
684 |         conflicts: [],
685 |       },
686 |       transformations: [],
687 |     }));
688 |   }
689 | 
690 |   private async analyzeTrends(): Promise<CollaborativeInsight[]> {
691 |     // Analyze shared memories to identify trends
692 |     return []; // Placeholder implementation
693 |   }
694 | 
695 |   private async findConsensusPatterns(): Promise<CollaborativeInsight[]> {
696 |     // Find patterns where multiple agents agree
697 |     return []; // Placeholder implementation
698 |   }
699 | 
700 |   private async identifyDisagreements(): Promise<CollaborativeInsight[]> {
701 |     // Find areas where agents disagree
702 |     return []; // Placeholder implementation
703 |   }
704 | 
705 |   private async detectAnomalies(): Promise<CollaborativeInsight[]> {
706 |     // Detect unusual patterns in shared data
707 |     return []; // Placeholder implementation
708 |   }
709 | 
710 |   private validateDataStructure(entry: MemoryEntry): boolean {
711 |     return Boolean(entry.id && entry.timestamp && entry.type && entry.data);
712 |   }
713 | 
714 |   private async findSimilarLocalMemories(
715 |     entry: MemoryEntry,
716 |   ): Promise<MemoryEntry[]> {
717 |     // Find similar memories in local storage
718 |     return this.memoryManager.search(entry.metadata.projectId || "", {
719 |       sortBy: "timestamp",
720 |     });
721 |   }
722 | 
723 |   private calculateConsistency(
724 |     _entry: MemoryEntry,
725 |     _similar: MemoryEntry[],
726 |   ): number {
727 |     // Calculate consistency score (placeholder)
728 |     return 0.8;
729 |   }
730 | 
731 |   private validateAnonymization(_transformation: any): boolean {
732 |     // Validate anonymization transformation (placeholder)
733 |     return true;
734 |   }
735 | 
736 |   private isLikelyDuplicate(entry1: MemoryEntry, entry2: MemoryEntry): boolean {
737 |     // Simple duplicate detection
738 |     return (
739 |       entry1.type === entry2.type &&
740 |       entry1.metadata.projectId === entry2.metadata.projectId &&
741 |       Math.abs(
742 |         new Date(entry1.timestamp).getTime() -
743 |           new Date(entry2.timestamp).getTime(),
744 |       ) < 60000
745 |     ); // 1 minute
746 |   }
747 | 
748 |   private async mergeConflictingEntries(
749 |     _conflict: ConflictResolution,
750 |   ): Promise<void> {
751 |     // Merge conflicting entries (placeholder)
752 |   }
753 | 
754 |   private async prioritizeTrustedSource(
755 |     _conflict: ConflictResolution,
756 |   ): Promise<void> {
757 |     // Prioritize trusted source (placeholder)
758 |   }
759 | 
760 |   private async useTemporalPrecedence(
761 |     _conflict: ConflictResolution,
762 |   ): Promise<void> {
763 |     // Use temporal precedence (placeholder)
764 |   }
765 | 
766 |   private async broadcastToTrustedAgents(
767 |     _sharedMemory: SharedMemory,
768 |   ): Promise<void> {
769 |     // Broadcast to trusted agents (placeholder)
770 |   }
771 | 
772 |   private startPeriodicSync(): void {
773 |     this.syncInterval = setInterval(
774 |       async () => {
775 |         await this.performPeriodicSync();
776 |       },
777 |       5 * 60 * 1000,
778 |     ); // Every 5 minutes
779 |   }
780 | 
781 |   private async performPeriodicSync(): Promise<void> {
782 |     // Perform periodic synchronization with trusted agents
783 |   }
784 | 
785 |   private async loadSharedMemories(): Promise<void> {
786 |     // Load shared memories from persistence
787 |   }
788 | 
789 |   private async loadKnownAgents(): Promise<void> {
790 |     // Load known agents from persistence
791 |   }
792 | 
793 |   private async loadPendingSyncRequests(): Promise<void> {
794 |     // Load pending sync requests from persistence
795 |   }
796 | 
797 |   private async persistSharedMemories(): Promise<void> {
798 |     // Persist shared memories
799 |   }
800 | 
801 |   private async persistAgentRegistry(): Promise<void> {
802 |     // Persist agent registry
803 |   }
804 | 
805 |   private async persistSyncRequests(): Promise<void> {
806 |     // Persist sync requests
807 |   }
808 | 
809 |   private async persistCollaborativeInsights(): Promise<void> {
810 |     // Persist collaborative insights
811 |   }
812 | 
813 |   private async createSyncRequest(
814 |     _targetAgent: string,
815 |     _type: SyncRequest["requestType"],
816 |     _options: any,
817 |   ): Promise<void> {
818 |     // Create sync request (placeholder)
819 |   }
820 | 
821 |   /**
822 |    * Cleanup and shutdown
823 |    */
824 |   async shutdown(): Promise<void> {
825 |     if (this.syncInterval) {
826 |       clearInterval(this.syncInterval);
827 |     }
828 | 
829 |     await this.persistSharedMemories();
830 |     await this.persistAgentRegistry();
831 |     await this.persistSyncRequests();
832 |     await this.persistCollaborativeInsights();
833 | 
834 |     this.emit("shutdown", { agentId: this.agentId });
835 |   }
836 | }
837 | 
838 | export default MultiAgentMemorySharing;
839 | 
```

--------------------------------------------------------------------------------
/docs/how-to/usage-examples.md:
--------------------------------------------------------------------------------

```markdown
  1 | ---
  2 | documcp:
  3 |   last_updated: "2025-11-20T00:46:21.957Z"
  4 |   last_validated: "2025-11-20T00:46:21.957Z"
  5 |   auto_updated: false
  6 |   update_frequency: monthly
  7 | ---
  8 | 
  9 | # DocuMCP Usage Examples
 10 | 
 11 | This guide provides comprehensive usage examples for DocuMCP functions, organized by common use cases and scenarios.
 12 | 
 13 | ## 🎯 Repository Analysis Examples
 14 | 
 15 | ### Basic Repository Analysis
 16 | 
 17 | ```typescript
 18 | import { analyzeRepository } from "./dist/tools/analyze-repository.js";
 19 | 
 20 | // Analyze a simple project
 21 | const analysis = await analyzeRepository({
 22 |   path: "/path/to/my-project",
 23 |   depth: "standard",
 24 | });
 25 | 
 26 | console.log(`Found ${analysis.data.structure.totalFiles} files`);
 27 | console.log(
 28 |   `Primary language: ${analysis.data.recommendations.primaryLanguage}`,
 29 | );
 30 | ```
 31 | 
 32 | ### Advanced Repository Analysis
 33 | 
 34 | ```typescript
 35 | // Deep analysis with historical context
 36 | const deepAnalysis = await analyzeRepository({
 37 |   path: "/path/to/complex-project",
 38 |   depth: "deep",
 39 | });
 40 | 
 41 | // Access detailed information
 42 | const { structure, dependencies, documentation } = deepAnalysis.data;
 43 | 
 44 | console.log(`Languages: ${Object.keys(structure.languages).join(", ")}`);
 45 | console.log(`Has tests: ${structure.hasTests}`);
 46 | console.log(`Has CI: ${structure.hasCI}`);
 47 | console.log(`Documentation complexity: ${documentation.estimatedComplexity}`);
 48 | ```
 49 | 
 50 | ## 🔧 SSG Recommendation Examples
 51 | 
 52 | ### Basic SSG Recommendation
 53 | 
 54 | ```typescript
 55 | import { recommendSSG } from "./dist/tools/recommend-ssg.js";
 56 | 
 57 | // Get recommendation based on analysis
 58 | const recommendation = await recommendSSG({
 59 |   analysisId: "analysis_abc123_def456",
 60 |   userId: "developer123",
 61 | });
 62 | 
 63 | console.log(`Recommended SSG: ${recommendation.data.recommended}`);
 64 | console.log(`Confidence: ${recommendation.data.confidence * 100}%`);
 65 | console.log(`Reasoning: ${recommendation.data.reasoning.join(", ")}`);
 66 | ```
 67 | 
 68 | ### Personalized SSG Recommendation
 69 | 
 70 | ```typescript
 71 | // With user preferences
 72 | const personalized = await recommendSSG({
 73 |   analysisId: "analysis_abc123_def456",
 74 |   userId: "developer123",
 75 |   preferences: {
 76 |     priority: "performance",
 77 |     ecosystem: "javascript",
 78 |   },
 79 | });
 80 | 
 81 | // Compare alternatives
 82 | recommendation.data.alternatives.forEach((alt) => {
 83 |   console.log(`${alt.name}: ${alt.score} (${alt.pros.join(", ")})`);
 84 | });
 85 | ```
 86 | 
 87 | ### Enterprise SSG Recommendation
 88 | 
 89 | ```typescript
 90 | // Enterprise-focused recommendation
 91 | const enterprise = await recommendSSG({
 92 |   analysisId: "analysis_abc123_def456",
 93 |   userId: "enterprise_user",
 94 |   preferences: {
 95 |     priority: "simplicity",
 96 |     ecosystem: "any",
 97 |   },
 98 | });
 99 | 
100 | // Check historical data
101 | if (recommendation.data.historicalData) {
102 |   const { similarProjectCount, successRates } =
103 |     recommendation.data.historicalData;
104 |   console.log(`Based on ${similarProjectCount} similar projects`);
105 |   console.log(`Success rates: ${JSON.stringify(successRates, null, 2)}`);
106 | }
107 | ```
108 | 
109 | ## 📁 Documentation Structure Examples
110 | 
111 | ### Basic Structure Setup
112 | 
113 | ```typescript
114 | import { setupStructure } from "./dist/tools/setup-structure.js";
115 | 
116 | // Set up Docusaurus structure with Diataxis framework
117 | const structure = await setupStructure({
118 |   path: "./docs",
119 |   ssg: "docusaurus",
120 |   includeExamples: true,
121 | });
122 | 
123 | console.log(`Created ${structure.data.directoriesCreated.length} directories`);
124 | console.log(`Created ${structure.data.filesCreated.length} files`);
125 | console.log(
126 |   `Diataxis structure: ${Object.keys(structure.data.diataxisStructure).join(
127 |     ", ",
128 |   )}`,
129 | );
130 | ```
131 | 
132 | ### Minimal Structure Setup
133 | 
134 | ```typescript
135 | // Minimal structure for existing projects
136 | const minimal = await setupStructure({
137 |   path: "./site",
138 |   ssg: "hugo",
139 |   includeExamples: false,
140 | });
141 | 
142 | // Check what was created
143 | minimal.data.directoriesCreated.forEach((dir) => {
144 |   console.log(`Created directory: ${dir}`);
145 | });
146 | ```
147 | 
148 | ### Custom Structure Setup
149 | 
150 | ```typescript
151 | // Custom structure with specific categories
152 | const custom = await setupStructure({
153 |   path: "./custom-docs",
154 |   ssg: "mkdocs",
155 |   includeExamples: true,
156 | });
157 | 
158 | // Access structure details
159 | const { diataxisStructure, ssgSpecificFiles } = custom.data;
160 | console.log(
161 |   `Diataxis categories: ${Object.keys(diataxisStructure).join(", ")}`,
162 | );
163 | ```
164 | 
165 | ## ⚙️ Configuration Generation Examples
166 | 
167 | ### Docusaurus Configuration
168 | 
169 | ```typescript
170 | import { generateConfig } from "documcp";
171 | 
172 | // Generate Docusaurus configuration
173 | const config = await generateConfig({
174 |   ssg: "docusaurus",
175 |   projectName: "My Awesome Project",
176 |   projectDescription: "A comprehensive documentation project",
177 |   outputPath: "./docs",
178 | });
179 | 
180 | console.log(`Generated ${config.data.filesCreated.length} configuration files`);
181 | config.data.filesCreated.forEach((file) => {
182 |   console.log(`Created: ${file}`);
183 | });
184 | ```
185 | 
186 | ### Hugo Configuration
187 | 
188 | ```typescript
189 | // Generate Hugo configuration
190 | const hugoConfig = await generateConfig({
191 |   ssg: "hugo",
192 |   projectName: "My Hugo Site",
193 |   outputPath: "./site",
194 | });
195 | 
196 | // Check configuration details
197 | const { ssg, projectName, filesCreated } = hugoConfig.data;
198 | console.log(`Generated ${ssg} configuration for ${projectName}`);
199 | ```
200 | 
201 | ### Multi-SSG Configuration
202 | 
203 | ```typescript
204 | // Generate configurations for multiple SSGs
205 | const ssgs = ["docusaurus", "hugo", "mkdocs"];
206 | 
207 | for (const ssg of ssgs) {
208 |   const config = await generateConfig({
209 |     ssg: ssg as any,
210 |     projectName: "Multi-SSG Project",
211 |     outputPath: `./docs-${ssg}`,
212 |   });
213 | 
214 |   console.log(
215 |     `Generated ${ssg} configuration: ${config.data.filesCreated.length} files`,
216 |   );
217 | }
218 | ```
219 | 
220 | ## 📝 Content Population Examples
221 | 
222 | ### Basic Content Population
223 | 
224 | ```typescript
225 | import { handlePopulateDiataxisContent } from "documcp";
226 | 
227 | // Populate documentation content
228 | const population = await handlePopulateDiataxisContent({
229 |   analysisId: "analysis_abc123_def456",
230 |   docsPath: "./docs",
231 |   populationLevel: "comprehensive",
232 | });
233 | 
234 | console.log(
235 |   `Generated ${population.data.contentGenerated.length} content files`,
236 | );
237 | console.log(
238 |   `Extracted ${population.data.contentExtracted.length} content pieces`,
239 | );
240 | ```
241 | 
242 | ### Focused Content Population
243 | 
244 | ```typescript
245 | // Populate with specific focus areas
246 | const focused = await handlePopulateDiataxisContent({
247 |   analysisId: "analysis_abc123_def456",
248 |   docsPath: "./docs",
249 |   populationLevel: "intelligent",
250 |   focusAreas: ["api", "examples", "tutorials"],
251 | });
252 | 
253 | // Check what was generated
254 | focused.data.contentGenerated.forEach((content) => {
255 |   console.log(`Generated: ${content.category}/${content.filename}`);
256 | });
257 | ```
258 | 
259 | ### Technology-Focused Population
260 | 
261 | ```typescript
262 | // Populate with technology focus
263 | const techFocused = await handlePopulateDiataxisContent({
264 |   analysisId: "analysis_abc123_def456",
265 |   docsPath: "./docs",
266 |   technologyFocus: ["React", "TypeScript", "Node.js"],
267 | });
268 | 
269 | // Access generated content
270 | techFocused.data.contentGenerated.forEach((content) => {
271 |   if (content.technology) {
272 |     console.log(
273 |       `Technology-specific content: ${content.filename} (${content.technology})`,
274 |     );
275 |   }
276 | });
277 | ```
278 | 
279 | ## 🚀 Deployment Examples
280 | 
281 | ### Basic GitHub Pages Deployment
282 | 
283 | ```typescript
284 | import { handleDeployPages } from "documcp";
285 | 
286 | // Deploy to GitHub Pages
287 | const deployment = await handleDeployPages({
288 |   repository: "user/repository",
289 |   ssg: "docusaurus",
290 | });
291 | 
292 | console.log(`Deployment URL: ${deployment.data.url}`);
293 | console.log(`Status: ${deployment.data.status}`);
294 | ```
295 | 
296 | ### Custom Domain Deployment
297 | 
298 | ```typescript
299 | // Deploy with custom domain
300 | const customDomain = await handleDeployPages({
301 |   repository: "user/repository",
302 |   ssg: "docusaurus",
303 |   customDomain: "docs.example.com",
304 | });
305 | 
306 | // Check deployment details
307 | const { url, status, configuration } = customDomain.data;
308 | console.log(`Deployed to: ${url}`);
309 | console.log(`Custom domain: ${configuration.customDomain}`);
310 | ```
311 | 
312 | ### Branch-Specific Deployment
313 | 
314 | ```typescript
315 | // Deploy to specific branch
316 | const branchDeployment = await handleDeployPages({
317 |   repository: "user/repository",
318 |   ssg: "docusaurus",
319 |   branch: "gh-pages",
320 | });
321 | 
322 | // Monitor deployment
323 | if (branchDeployment.data.status === "success") {
324 |   console.log(`Successfully deployed to ${branchDeployment.data.branch}`);
325 | } else {
326 |   console.log(`Deployment failed: ${branchDeployment.data.error}`);
327 | }
328 | ```
329 | 
330 | ## 🧠 Memory System Examples
331 | 
332 | ### Memory Initialization
333 | 
334 | ```typescript
335 | import { initializeMemory } from "documcp/memory";
336 | 
337 | // Initialize memory system
338 | const memory = await initializeMemory("./custom-memory-storage");
339 | 
340 | console.log("Memory system initialized");
341 | console.log(`Storage directory: ${memory.storageDir}`);
342 | ```
343 | 
344 | ### Storing Analysis Data
345 | 
346 | ```typescript
347 | import { rememberAnalysis } from "documcp/memory";
348 | 
349 | // Store analysis data
350 | const memoryId = await rememberAnalysis("/path/to/project", {
351 |   id: "analysis_123",
352 |   structure: { totalFiles: 150, languages: { ".ts": 100 } },
353 |   dependencies: { ecosystem: "javascript", packages: ["react"] },
354 | });
355 | 
356 | console.log(`Stored analysis with ID: ${memoryId}`);
357 | ```
358 | 
359 | ### Retrieving Project Insights
360 | 
361 | ```typescript
362 | import { getProjectInsights } from "documcp/memory";
363 | 
364 | // Get project insights
365 | const insights = await getProjectInsights("project_abc123");
366 | 
367 | insights.forEach((insight) => {
368 |   console.log(`💡 ${insight}`);
369 | });
370 | ```
371 | 
372 | ### Finding Similar Projects
373 | 
374 | ```typescript
375 | import { getSimilarProjects } from "documcp/memory";
376 | 
377 | // Find similar projects
378 | const similar = await getSimilarProjects(analysisData, 5);
379 | 
380 | console.log(`Found ${similar.length} similar projects:`);
381 | similar.forEach((project) => {
382 |   console.log(
383 |     `- ${project.metadata.projectId} (${project.similarity}% similar)`,
384 |   );
385 | });
386 | ```
387 | 
388 | ## 📊 README Analysis Examples
389 | 
390 | ### Basic README Analysis
391 | 
392 | ```typescript
393 | import { analyzeReadme } from "documcp";
394 | 
395 | // Analyze README
396 | const analysis = await analyzeReadme({
397 |   project_path: "/path/to/project",
398 | });
399 | 
400 | const { analysis: readmeAnalysis } = analysis.data;
401 | console.log(`README Score: ${readmeAnalysis.overallScore}/100`);
402 | console.log(
403 |   `Current length: ${readmeAnalysis.lengthAnalysis.currentLines} lines`,
404 | );
405 | console.log(
406 |   `Target length: ${readmeAnalysis.lengthAnalysis.targetLines} lines`,
407 | );
408 | ```
409 | 
410 | ### Community-Focused Analysis
411 | 
412 | ```typescript
413 | // Analyze for community contributors
414 | const communityAnalysis = await analyzeReadme({
415 |   project_path: "/path/to/project",
416 |   target_audience: "community_contributors",
417 |   optimization_level: "moderate",
418 | });
419 | 
420 | const { communityReadiness } = communityAnalysis.data.analysis;
421 | console.log(`Has contributing guide: ${communityReadiness.hasContributing}`);
422 | console.log(`Has code of conduct: ${communityReadiness.hasCodeOfConduct}`);
423 | console.log(`Badge count: ${communityReadiness.badgeCount}`);
424 | ```
425 | 
426 | ### Enterprise README Analysis
427 | 
428 | ```typescript
429 | // Analyze for enterprise users
430 | const enterpriseAnalysis = await analyzeReadme({
431 |   project_path: "/path/to/project",
432 |   target_audience: "enterprise_users",
433 |   optimization_level: "aggressive",
434 |   max_length_target: 200,
435 | });
436 | 
437 | const { optimizationOpportunities } = enterpriseAnalysis.data.analysis;
438 | optimizationOpportunities.forEach((opportunity) => {
439 |   console.log(
440 |     `${opportunity.type}: ${opportunity.description} (${opportunity.priority})`,
441 |   );
442 | });
443 | ```
444 | 
445 | ## 🔧 README Optimization Examples
446 | 
447 | ### Basic README Optimization
448 | 
449 | ```typescript
450 | import { optimizeReadme } from "documcp";
451 | 
452 | // Optimize README
453 | const optimization = await optimizeReadme({
454 |   readme_path: "./README.md",
455 |   strategy: "community_focused",
456 | });
457 | 
458 | const { optimization: result } = optimization.data;
459 | console.log(
460 |   `Reduced from ${result.originalLength} to ${result.optimizedLength} lines`,
461 | );
462 | console.log(`Reduction: ${result.reductionPercentage}%`);
463 | ```
464 | 
465 | ### Enterprise README Optimization
466 | 
467 | ```typescript
468 | // Optimize for enterprise
469 | const enterpriseOptimization = await optimizeReadme({
470 |   readme_path: "./README.md",
471 |   strategy: "enterprise_focused",
472 |   max_length: 200,
473 |   preserve_existing: true,
474 | });
475 | 
476 | // Check restructuring changes
477 | enterpriseOptimization.data.optimization.restructuringChanges.forEach(
478 |   (change) => {
479 |     console.log(`${change.type}: ${change.section} - ${change.description}`);
480 |   },
481 | );
482 | ```
483 | 
484 | ### README Template Generation
485 | 
486 | ```typescript
487 | import { generateReadmeTemplate } from "documcp";
488 | 
489 | // Generate README template
490 | const template = await generateReadmeTemplate({
491 |   projectName: "MyAwesomeProject",
492 |   description: "A powerful utility library",
493 |   templateType: "library",
494 |   author: "Your Name",
495 |   license: "MIT",
496 |   includeBadges: true,
497 |   outputPath: "./README.md",
498 | });
499 | 
500 | console.log(`Generated ${template.metadata.estimatedLength} line README`);
501 | console.log(`Sections included: ${template.metadata.sectionsIncluded}`);
502 | ```
503 | 
504 | ## 🔗 Link Checking Examples
505 | 
506 | ### Basic Link Checking
507 | 
508 | ```typescript
509 | import { checkDocumentationLinks } from "documcp";
510 | 
511 | // Check documentation links
512 | const linkCheck = await checkDocumentationLinks({
513 |   documentation_path: "./docs",
514 |   check_external_links: true,
515 |   check_internal_links: true,
516 |   check_anchor_links: true,
517 | });
518 | 
519 | console.log(`Total links checked: ${linkCheck.data.totalLinks}`);
520 | console.log(`Broken links: ${linkCheck.data.brokenLinks}`);
521 | console.log(`Success rate: ${linkCheck.data.successRate}%`);
522 | ```
523 | 
524 | ### Comprehensive Link Checking
525 | 
526 | ```typescript
527 | // Comprehensive link checking
528 | const comprehensive = await checkDocumentationLinks({
529 |   documentation_path: "./docs",
530 |   check_external_links: true,
531 |   check_internal_links: true,
532 |   check_anchor_links: true,
533 |   timeout_ms: 10000,
534 |   max_concurrent_checks: 10,
535 |   fail_on_broken_links: false,
536 | });
537 | 
538 | // Check specific link types
539 | comprehensive.data.linkTypes.forEach((linkType) => {
540 |   console.log(
541 |     `${linkType.type}: ${linkType.total} total, ${linkType.broken} broken`,
542 |   );
543 | });
544 | ```
545 | 
546 | ## 🧪 Local Testing Examples
547 | 
548 | ### Local Documentation Testing
549 | 
550 | ```typescript
551 | import { handleTestLocalDeployment } from "documcp";
552 | 
553 | // Test local deployment
554 | const test = await handleTestLocalDeployment({
555 |   repositoryPath: "/path/to/project",
556 |   ssg: "docusaurus",
557 |   port: 3000,
558 |   timeout: 60,
559 | });
560 | 
561 | console.log(`Test status: ${test.data.status}`);
562 | console.log(`Local URL: ${test.data.localUrl}`);
563 | console.log(`Build time: ${test.data.buildTime}ms`);
564 | ```
565 | 
566 | ### Docker Testing
567 | 
568 | ```typescript
569 | // Test with Docker
570 | const dockerTest = await handleTestLocalDeployment({
571 |   repositoryPath: "/path/to/project",
572 |   ssg: "hugo",
573 |   port: 8080,
574 |   timeout: 120,
575 | });
576 | 
577 | if (dockerTest.data.status === "success") {
578 |   console.log(`Docker test successful: ${dockerTest.data.localUrl}`);
579 | } else {
580 |   console.log(`Docker test failed: ${dockerTest.data.error}`);
581 | }
582 | ```
583 | 
584 | ## 🔄 Workflow Examples
585 | 
586 | ### Complete Documentation Workflow
587 | 
588 | ```typescript
589 | // Complete workflow from analysis to deployment
590 | async function completeDocumentationWorkflow(projectPath: string) {
591 |   try {
592 |     // 1. Analyze repository
593 |     const analysis = await analyzeRepository({
594 |       path: projectPath,
595 |       depth: "standard",
596 |     });
597 | 
598 |     const analysisId = analysis.data.id;
599 |     console.log(`Analysis complete: ${analysisId}`);
600 | 
601 |     // 2. Get SSG recommendation
602 |     const recommendation = await recommendSSG({
603 |       analysisId: analysisId,
604 |       userId: "developer123",
605 |     });
606 | 
607 |     const ssg = recommendation.data.recommended;
608 |     console.log(`Recommended SSG: ${ssg}`);
609 | 
610 |     // 3. Set up structure
611 |     const structure = await setupStructure({
612 |       path: "./docs",
613 |       ssg: ssg,
614 |       includeExamples: true,
615 |     });
616 | 
617 |     console.log(
618 |       `Structure created: ${structure.data.directoriesCreated.length} directories`,
619 |     );
620 | 
621 |     // 4. Generate configuration
622 |     const config = await generateConfig({
623 |       ssg: ssg,
624 |       projectName: "My Project",
625 |       outputPath: "./docs",
626 |     });
627 | 
628 |     console.log(
629 |       `Configuration generated: ${config.data.filesCreated.length} files`,
630 |     );
631 | 
632 |     // 5. Populate content
633 |     const content = await handlePopulateDiataxisContent({
634 |       analysisId: analysisId,
635 |       docsPath: "./docs",
636 |       populationLevel: "comprehensive",
637 |     });
638 | 
639 |     console.log(
640 |       `Content populated: ${content.data.contentGenerated.length} files`,
641 |     );
642 | 
643 |     // 6. Deploy to GitHub Pages
644 |     const deployment = await handleDeployPages({
645 |       repository: "user/repository",
646 |       ssg: ssg,
647 |     });
648 | 
649 |     console.log(`Deployed to: ${deployment.data.url}`);
650 | 
651 |     return {
652 |       analysisId,
653 |       ssg,
654 |       deploymentUrl: deployment.data.url,
655 |     };
656 |   } catch (error) {
657 |     console.error("Workflow failed:", error);
658 |     throw error;
659 |   }
660 | }
661 | 
662 | // Usage
663 | completeDocumentationWorkflow("/path/to/project")
664 |   .then((result) => {
665 |     console.log("Workflow completed successfully:", result);
666 |   })
667 |   .catch((error) => {
668 |     console.error("Workflow failed:", error);
669 |   });
670 | ```
671 | 
672 | ### Batch Processing Example
673 | 
674 | ```typescript
675 | // Process multiple repositories
676 | async function batchProcessRepositories(repositories: string[]) {
677 |   const results = [];
678 | 
679 |   for (const repoPath of repositories) {
680 |     try {
681 |       console.log(`Processing: ${repoPath}`);
682 | 
683 |       const analysis = await analyzeRepository({
684 |         path: repoPath,
685 |         depth: "quick",
686 |       });
687 | 
688 |       const recommendation = await recommendSSG({
689 |         analysisId: analysis.data.id,
690 |       });
691 | 
692 |       results.push({
693 |         path: repoPath,
694 |         analysisId: analysis.data.id,
695 |         recommendedSSG: recommendation.data.recommended,
696 |         confidence: recommendation.data.confidence,
697 |       });
698 |     } catch (error) {
699 |       console.error(`Failed to process ${repoPath}:`, error);
700 |       results.push({
701 |         path: repoPath,
702 |         error: error.message,
703 |       });
704 |     }
705 |   }
706 | 
707 |   return results;
708 | }
709 | 
710 | // Usage
711 | const repositories = [
712 |   "/path/to/project1",
713 |   "/path/to/project2",
714 |   "/path/to/project3",
715 | ];
716 | 
717 | batchProcessRepositories(repositories).then((results) => {
718 |   console.log("Batch processing complete:", results);
719 | });
720 | ```
721 | 
722 | ## 🎯 Error Handling Examples
723 | 
724 | ### Comprehensive Error Handling
725 | 
726 | ```typescript
727 | async function robustAnalysis(projectPath: string) {
728 |   try {
729 |     const analysis = await analyzeRepository({
730 |       path: projectPath,
731 |       depth: "standard",
732 |     });
733 | 
734 |     if (analysis.success) {
735 |       return analysis.data;
736 |     } else {
737 |       throw new Error(analysis.error.message);
738 |     }
739 |   } catch (error) {
740 |     if (error.code === "EACCES") {
741 |       console.error("Permission denied. Check file permissions.");
742 |     } else if (error.code === "ENOENT") {
743 |       console.error("Directory not found. Check the path.");
744 |     } else if (error.message.includes("analysis failed")) {
745 |       console.error("Analysis failed. Check repository structure.");
746 |     } else {
747 |       console.error("Unexpected error:", error);
748 |     }
749 | 
750 |     throw error;
751 |   }
752 | }
753 | ```
754 | 
755 | ### Retry Logic Example
756 | 
757 | ```typescript
758 | async function retryAnalysis(projectPath: string, maxRetries: number = 3) {
759 |   let lastError;
760 | 
761 |   for (let attempt = 1; attempt <= maxRetries; attempt++) {
762 |     try {
763 |       console.log(`Attempt ${attempt} of ${maxRetries}`);
764 | 
765 |       const analysis = await analyzeRepository({
766 |         path: projectPath,
767 |         depth: "standard",
768 |       });
769 | 
770 |       return analysis.data;
771 |     } catch (error) {
772 |       lastError = error;
773 |       console.log(`Attempt ${attempt} failed:`, error.message);
774 | 
775 |       if (attempt < maxRetries) {
776 |         const delay = Math.pow(2, attempt) * 1000; // Exponential backoff
777 |         console.log(`Retrying in ${delay}ms...`);
778 |         await new Promise((resolve) => setTimeout(resolve, delay));
779 |       }
780 |     }
781 |   }
782 | 
783 |   throw new Error(
784 |     `Analysis failed after ${maxRetries} attempts: ${lastError.message}`,
785 |   );
786 | }
787 | ```
788 | 
789 | ## 🔌 MCP Integration Examples
790 | 
791 | ### Complete MCP Workflow
792 | 
793 | ```typescript
794 | // Real-world example: Complete documentation deployment via MCP
795 | import { analyzeRepository } from "./dist/tools/analyze-repository.js";
796 | import { recommendSSG } from "./dist/tools/recommend-ssg.js";
797 | import { generateConfig } from "./dist/tools/generate-config.js";
798 | import { setupStructure } from "./dist/tools/setup-structure.js";
799 | import { deployPages } from "./dist/tools/deploy-pages.js";
800 | 
801 | async function completeMCPWorkflow(projectPath: string, githubRepo: string) {
802 |   console.log("🔍 Starting DocuMCP workflow...");
803 | 
804 |   // Step 1: Analyze repository
805 |   const analysis = await analyzeRepository({
806 |     path: projectPath,
807 |     depth: "comprehensive",
808 |   });
809 | 
810 |   console.log(
811 |     `📊 Analysis complete: ${analysis.data.structure.totalFiles} files analyzed`,
812 |   );
813 |   console.log(`🏗️ Project type: ${analysis.data.recommendations.projectType}`);
814 |   console.log(
815 |     `💻 Primary language: ${analysis.data.recommendations.primaryLanguage}`,
816 |   );
817 | 
818 |   // Step 2: Get SSG recommendation
819 |   const ssgRecommendation = await recommendSSG({
820 |     analysisId: analysis.id,
821 |     preferences: {
822 |       ecosystem: "javascript",
823 |       priority: "features",
824 |     },
825 |   });
826 | 
827 |   console.log(`🎯 Recommended SSG: ${ssgRecommendation.data.recommended}`);
828 |   console.log(
829 |     `✅ Confidence: ${Math.round(ssgRecommendation.data.confidence * 100)}%`,
830 |   );
831 | 
832 |   // Step 3: Generate configuration
833 |   const config = await generateConfig({
834 |     ssg: ssgRecommendation.data.recommended,
835 |     projectName: analysis.data.structure.projectName,
836 |     outputPath: "./docs",
837 |   });
838 | 
839 |   console.log(
840 |     `⚙️ Configuration generated: ${config.data.filesGenerated.length} files`,
841 |   );
842 | 
843 |   // Step 4: Setup Diataxis structure
844 |   const structure = await setupStructure({
845 |     path: "./docs",
846 |     ssg: ssgRecommendation.data.recommended,
847 |     includeExamples: true,
848 |   });
849 | 
850 |   console.log(
851 |     `📁 Structure created: ${structure.data.directoriesCreated.length} directories`,
852 |   );
853 |   console.log(`📄 Files created: ${structure.data.filesCreated.length} files`);
854 | 
855 |   // Step 5: Deploy to GitHub Pages
856 |   const deployment = await deployPages({
857 |     repository: githubRepo,
858 |     ssg: ssgRecommendation.data.recommended,
859 |     projectPath: projectPath,
860 |   });
861 | 
862 |   console.log(`🚀 Deployment initiated: ${deployment.data.workflowUrl}`);
863 |   console.log(`🌐 Site URL: ${deployment.data.siteUrl}`);
864 | 
865 |   return {
866 |     analysis: analysis.data,
867 |     recommendation: ssgRecommendation.data,
868 |     deployment: deployment.data,
869 |   };
870 | }
871 | 
872 | // Usage
873 | completeMCPWorkflow("/path/to/your/project", "username/repository-name")
874 |   .then((result) => {
875 |     console.log("🎉 DocuMCP workflow completed successfully!");
876 |     console.log(`📊 Final result:`, result);
877 |   })
878 |   .catch((error) => {
879 |     console.error("❌ Workflow failed:", error.message);
880 |   });
881 | ```
882 | 
883 | ### MCP Memory Integration
884 | 
885 | ```typescript
886 | // Example showing DocuMCP's memory system for learning user preferences
887 | import { analyzeRepository } from "./dist/tools/analyze-repository.js";
888 | import { recommendSSG } from "./dist/tools/recommend-ssg.js";
889 | 
890 | async function intelligentRecommendation(projectPath: string, userId: string) {
891 |   // DocuMCP automatically learns from previous successful deployments
892 |   const analysis = await analyzeRepository({
893 |     path: projectPath,
894 |     depth: "standard",
895 |   });
896 | 
897 |   // Memory system provides personalized recommendations
898 |   const recommendation = await recommendSSG({
899 |     analysisId: analysis.id,
900 |     userId: userId, // Memory system tracks user preferences
901 |   });
902 | 
903 |   console.log(
904 |     `🧠 Memory-enhanced recommendation: ${recommendation.data.recommended}`,
905 |   );
906 |   console.log(
907 |     `📈 Based on ${
908 |       recommendation.data.memoryInsights?.similarProjects || 0
909 |     } similar projects`,
910 |   );
911 |   console.log(
912 |     `🎯 Success rate: ${Math.round(
913 |       (recommendation.data.memoryInsights?.successRate || 0) * 100,
914 |     )}%`,
915 |   );
916 | 
917 |   return recommendation;
918 | }
919 | ```
920 | 
921 | These examples demonstrate the comprehensive capabilities of DocuMCP and provide practical patterns for common use cases. Use them as starting points and adapt them to your specific needs.
922 | 
```

--------------------------------------------------------------------------------
/src/tools/detect-gaps.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { z } from "zod";
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
  5 | import { analyzeRepository } from "./analyze-repository.js";
  6 | import { handleValidateDiataxisContent } from "./validate-content.js";
  7 | import { CodeScanner, CodeAnalysisResult } from "../utils/code-scanner.js";
  8 | 
  9 | const inputSchema = z.object({
 10 |   repositoryPath: z.string().describe("Path to the repository to analyze"),
 11 |   documentationPath: z
 12 |     .string()
 13 |     .optional()
 14 |     .describe("Path to existing documentation (if any)"),
 15 |   analysisId: z
 16 |     .string()
 17 |     .optional()
 18 |     .describe("Optional existing analysis ID to reuse"),
 19 |   depth: z
 20 |     .enum(["quick", "standard", "comprehensive"])
 21 |     .optional()
 22 |     .default("standard"),
 23 | });
 24 | 
 25 | interface DocumentationGap {
 26 |   category: "tutorials" | "how-to" | "reference" | "explanation" | "general";
 27 |   gapType:
 28 |     | "missing_section"
 29 |     | "incomplete_content"
 30 |     | "outdated_info"
 31 |     | "missing_examples"
 32 |     | "poor_structure";
 33 |   description: string;
 34 |   priority: "critical" | "high" | "medium" | "low";
 35 |   recommendation: string;
 36 |   suggestedContent?: string;
 37 |   relatedFiles?: string[];
 38 |   estimatedEffort: "minimal" | "moderate" | "substantial";
 39 | }
 40 | 
 41 | interface GapAnalysisResult {
 42 |   repositoryPath: string;
 43 |   documentationPath?: string;
 44 |   analysisId: string;
 45 |   overallScore: number;
 46 |   gaps: DocumentationGap[];
 47 |   strengths: string[];
 48 |   recommendations: {
 49 |     immediate: string[];
 50 |     shortTerm: string[];
 51 |     longTerm: string[];
 52 |   };
 53 |   missingStructure: {
 54 |     directories: string[];
 55 |     files: string[];
 56 |   };
 57 |   contentCoverage: {
 58 |     tutorials: number;
 59 |     howTo: number;
 60 |     reference: number;
 61 |     explanation: number;
 62 |   };
 63 | }
 64 | 
 65 | export async function detectDocumentationGaps(
 66 |   args: unknown,
 67 | ): Promise<{ content: any[] }> {
 68 |   const startTime = Date.now();
 69 |   const {
 70 |     repositoryPath,
 71 |     documentationPath,
 72 |     analysisId: existingAnalysisId,
 73 |     depth,
 74 |   } = inputSchema.parse(args);
 75 | 
 76 |   try {
 77 |     // Step 1: Get or perform repository analysis
 78 |     let analysisId = existingAnalysisId;
 79 |     let repositoryAnalysis: any;
 80 | 
 81 |     if (!analysisId) {
 82 |       const analysisResult = await analyzeRepository({
 83 |         path: repositoryPath,
 84 |         depth,
 85 |       });
 86 | 
 87 |       if (analysisResult.content && analysisResult.content[0]) {
 88 |         // The analyze_repository tool returns the analysis data directly as JSON text
 89 |         repositoryAnalysis = JSON.parse(analysisResult.content[0].text);
 90 | 
 91 |         // Check if the analysis was successful
 92 |         if (repositoryAnalysis.success === false) {
 93 |           throw new Error("Repository analysis failed");
 94 |         }
 95 | 
 96 |         analysisId = repositoryAnalysis.id; // Use the 'id' field from the analysis
 97 |       } else {
 98 |         throw new Error("Repository analysis failed - no content returned");
 99 |       }
100 |     } else {
101 |       // Try to retrieve existing analysis (simplified for this implementation)
102 |       // In a full implementation, this would retrieve from persistent storage
103 |     }
104 | 
105 |     // Step 2: Perform deep code analysis
106 |     const codeScanner = new CodeScanner(repositoryPath);
107 |     const codeAnalysis = await codeScanner.analyzeRepository();
108 | 
109 |     // Step 3: Analyze existing documentation structure
110 |     const documentationAnalysis = await analyzeExistingDocumentation(
111 |       documentationPath || path.join(repositoryPath, "docs"),
112 |     );
113 | 
114 |     // Step 4: Perform content validation if documentation exists
115 |     let validationResult: any = null;
116 |     if (documentationAnalysis.exists && documentationPath) {
117 |       try {
118 |         const validation = await handleValidateDiataxisContent({
119 |           contentPath: documentationPath,
120 |           analysisId: analysisId,
121 |           validationType: "all",
122 |           includeCodeValidation: true,
123 |           confidence: "moderate",
124 |         });
125 | 
126 |         if (
127 |           validation &&
128 |           (validation as any).content &&
129 |           (validation as any).content[0]
130 |         ) {
131 |           const validationData = JSON.parse(
132 |             (validation as any).content[0].text,
133 |           );
134 |           if (validationData.success) {
135 |             validationResult = validationData.data;
136 |           }
137 |         }
138 |       } catch (error) {
139 |         // Validation errors are non-fatal - continue without validation data
140 |         console.warn(
141 |           "Content validation failed, continuing without validation data:",
142 |           error,
143 |         );
144 |       }
145 |     }
146 | 
147 |     // Step 5: Identify gaps based on project and code analysis
148 |     const gaps = identifyDocumentationGaps(
149 |       repositoryAnalysis,
150 |       documentationAnalysis,
151 |       validationResult,
152 |       codeAnalysis,
153 |     );
154 | 
155 |     // Step 6: Generate recommendations
156 |     const recommendations = generateRecommendations(
157 |       gaps,
158 |       repositoryAnalysis,
159 |       codeAnalysis,
160 |     );
161 | 
162 |     // Step 7: Calculate coverage scores
163 |     const contentCoverage = calculateContentCoverage(
164 |       documentationAnalysis,
165 |       gaps,
166 |     );
167 | 
168 |     const gapAnalysis: GapAnalysisResult = {
169 |       repositoryPath,
170 |       documentationPath,
171 |       analysisId: analysisId || "unknown",
172 |       overallScore: calculateOverallScore(gaps, contentCoverage),
173 |       gaps,
174 |       strengths: identifyStrengths(documentationAnalysis, validationResult),
175 |       recommendations,
176 |       missingStructure: identifyMissingStructure(documentationAnalysis),
177 |       contentCoverage,
178 |     };
179 | 
180 |     const response: MCPToolResponse<typeof gapAnalysis> = {
181 |       success: true,
182 |       data: gapAnalysis,
183 |       metadata: {
184 |         toolVersion: "1.0.0",
185 |         executionTime: Date.now() - startTime,
186 |         timestamp: new Date().toISOString(),
187 |       },
188 |       recommendations: [
189 |         {
190 |           type:
191 |             gapAnalysis.overallScore < 60
192 |               ? "critical"
193 |               : gapAnalysis.overallScore < 80
194 |                 ? "warning"
195 |                 : "info",
196 |           title: "Documentation Gap Analysis Complete",
197 |           description: `Found ${gaps.length} gaps. Overall documentation score: ${gapAnalysis.overallScore}%`,
198 |         },
199 |       ],
200 |       nextSteps: recommendations.immediate.map((rec) => ({
201 |         action: rec,
202 |         toolRequired: getRecommendedTool(rec),
203 |         description: rec,
204 |         priority: "high" as const,
205 |       })),
206 |     };
207 | 
208 |     return formatMCPResponse(response);
209 |   } catch (error) {
210 |     const errorResponse: MCPToolResponse = {
211 |       success: false,
212 |       error: {
213 |         code: "GAP_DETECTION_FAILED",
214 |         message: `Failed to detect documentation gaps: ${error}`,
215 |         resolution: "Ensure repository and documentation paths are accessible",
216 |       },
217 |       metadata: {
218 |         toolVersion: "1.0.0",
219 |         executionTime: Date.now() - startTime,
220 |         timestamp: new Date().toISOString(),
221 |       },
222 |     };
223 |     return formatMCPResponse(errorResponse);
224 |   }
225 | }
226 | 
227 | async function analyzeExistingDocumentation(docsPath: string) {
228 |   try {
229 |     const stats = await fs.stat(docsPath);
230 |     if (!stats.isDirectory()) {
231 |       return { exists: false, structure: {}, files: [] };
232 |     }
233 | 
234 |     const structure = {
235 |       tutorials: { exists: false, files: [] as string[] },
236 |       "how-to": { exists: false, files: [] as string[] },
237 |       reference: { exists: false, files: [] as string[] },
238 |       explanation: { exists: false, files: [] as string[] },
239 |     };
240 | 
241 |     const allFiles: string[] = [];
242 | 
243 |     // Check for Diataxis structure
244 |     for (const [category] of Object.entries(structure)) {
245 |       const categoryPath = path.join(docsPath, category);
246 |       try {
247 |         const categoryStats = await fs.stat(categoryPath);
248 |         if (categoryStats.isDirectory()) {
249 |           structure[category as keyof typeof structure].exists = true;
250 |           const files = await fs.readdir(categoryPath);
251 |           const mdFiles = files.filter(
252 |             (f) => f.endsWith(".md") || f.endsWith(".mdx"),
253 |           );
254 |           structure[category as keyof typeof structure].files = mdFiles;
255 |           allFiles.push(...mdFiles.map((f) => path.join(category, f)));
256 |         }
257 |       } catch {
258 |         // Category doesn't exist
259 |       }
260 |     }
261 | 
262 |     // Also check root level files
263 |     const rootFiles = await fs.readdir(docsPath);
264 |     const rootMdFiles = rootFiles.filter(
265 |       (f) => f.endsWith(".md") || f.endsWith(".mdx"),
266 |     );
267 |     allFiles.push(...rootMdFiles);
268 | 
269 |     return {
270 |       exists: true,
271 |       structure,
272 |       files: allFiles,
273 |       hasRootIndex:
274 |         rootFiles.includes("index.md") || rootFiles.includes("README.md"),
275 |     };
276 |   } catch {
277 |     return { exists: false, structure: {}, files: [] };
278 |   }
279 | }
280 | 
281 | function identifyDocumentationGaps(
282 |   repoAnalysis: any,
283 |   docsAnalysis: any,
284 |   validationResult: any,
285 |   codeAnalysis: CodeAnalysisResult,
286 | ): DocumentationGap[] {
287 |   const gaps: DocumentationGap[] = [];
288 | 
289 |   // Check for missing Diataxis structure
290 |   if (!docsAnalysis.exists) {
291 |     gaps.push({
292 |       category: "general",
293 |       gapType: "missing_section",
294 |       description: "No documentation directory found",
295 |       priority: "critical",
296 |       recommendation:
297 |         "Create documentation structure using setup_structure tool",
298 |       estimatedEffort: "moderate",
299 |     });
300 |     return gaps; // If no docs exist, return early
301 |   }
302 | 
303 |   const diataxisCategories = [
304 |     "tutorials",
305 |     "how-to",
306 |     "reference",
307 |     "explanation",
308 |   ];
309 |   for (const category of diataxisCategories) {
310 |     if (!docsAnalysis.structure[category]?.exists) {
311 |       gaps.push({
312 |         category: category as any,
313 |         gapType: "missing_section",
314 |         description: `Missing ${category} documentation section`,
315 |         priority:
316 |           category === "tutorials" || category === "reference"
317 |             ? "high"
318 |             : "medium",
319 |         recommendation: `Create ${category} directory and add relevant content`,
320 |         estimatedEffort: "moderate",
321 |       });
322 |     } else if (docsAnalysis.structure[category].files.length === 0) {
323 |       gaps.push({
324 |         category: category as any,
325 |         gapType: "incomplete_content",
326 |         description: `${category} section exists but has no content`,
327 |         priority: "high",
328 |         recommendation: `Add content to ${category} section using populate_diataxis_content tool`,
329 |         estimatedEffort: "substantial",
330 |       });
331 |     }
332 |   }
333 | 
334 |   // Code-based gaps using actual code analysis
335 |   // Check for API documentation gaps based on actual endpoints found
336 |   if (
337 |     codeAnalysis.apiEndpoints.length > 0 &&
338 |     !hasApiDocumentation(docsAnalysis)
339 |   ) {
340 |     gaps.push({
341 |       category: "reference",
342 |       gapType: "missing_section",
343 |       description: `Found ${codeAnalysis.apiEndpoints.length} API endpoints but no API documentation`,
344 |       priority: "critical",
345 |       recommendation:
346 |         "Create API reference documentation for discovered endpoints",
347 |       estimatedEffort: "substantial",
348 |       relatedFiles: [
349 |         ...new Set(codeAnalysis.apiEndpoints.map((ep) => ep.filePath)),
350 |       ],
351 |     });
352 |   }
353 | 
354 |   // Check for undocumented API endpoints
355 |   const undocumentedEndpoints = codeAnalysis.apiEndpoints.filter(
356 |     (ep) => !ep.hasDocumentation,
357 |   );
358 |   if (undocumentedEndpoints.length > 0) {
359 |     gaps.push({
360 |       category: "reference",
361 |       gapType: "missing_examples",
362 |       description: `${undocumentedEndpoints.length} API endpoints lack inline documentation`,
363 |       priority: "high",
364 |       recommendation: "Add JSDoc comments to API endpoint handlers",
365 |       estimatedEffort: "moderate",
366 |       relatedFiles: [
367 |         ...new Set(undocumentedEndpoints.map((ep) => ep.filePath)),
368 |       ],
369 |     });
370 |   }
371 | 
372 |   // Check for class/interface documentation
373 |   const undocumentedClasses = codeAnalysis.classes.filter(
374 |     (cls) => cls.exported && !cls.hasJSDoc,
375 |   );
376 |   if (undocumentedClasses.length > 0) {
377 |     gaps.push({
378 |       category: "reference",
379 |       gapType: "incomplete_content",
380 |       description: `${undocumentedClasses.length} exported classes lack documentation`,
381 |       priority: "medium",
382 |       recommendation:
383 |         "Add JSDoc comments to exported classes and create API reference",
384 |       estimatedEffort: "moderate",
385 |       relatedFiles: [
386 |         ...new Set(undocumentedClasses.map((cls) => cls.filePath)),
387 |       ],
388 |     });
389 |   }
390 | 
391 |   // Check for interface documentation
392 |   const undocumentedInterfaces = codeAnalysis.interfaces.filter(
393 |     (iface) => iface.exported && !iface.hasJSDoc,
394 |   );
395 |   if (undocumentedInterfaces.length > 0) {
396 |     gaps.push({
397 |       category: "reference",
398 |       gapType: "incomplete_content",
399 |       description: `${undocumentedInterfaces.length} exported interfaces lack documentation`,
400 |       priority: "medium",
401 |       recommendation:
402 |         "Add JSDoc comments to exported interfaces and create type documentation",
403 |       estimatedEffort: "moderate",
404 |       relatedFiles: [
405 |         ...new Set(undocumentedInterfaces.map((iface) => iface.filePath)),
406 |       ],
407 |     });
408 |   }
409 | 
410 |   // Check for function documentation
411 |   const undocumentedFunctions = codeAnalysis.functions.filter(
412 |     (func) => func.exported && !func.hasJSDoc,
413 |   );
414 |   if (undocumentedFunctions.length > 0) {
415 |     gaps.push({
416 |       category: "reference",
417 |       gapType: "incomplete_content",
418 |       description: `${undocumentedFunctions.length} exported functions lack documentation`,
419 |       priority: "medium",
420 |       recommendation:
421 |         "Add JSDoc comments to exported functions and create API reference",
422 |       estimatedEffort: "substantial",
423 |       relatedFiles: [
424 |         ...new Set(undocumentedFunctions.map((func) => func.filePath)),
425 |       ],
426 |     });
427 |   }
428 | 
429 |   // Framework-specific documentation gaps
430 |   if (
431 |     codeAnalysis.frameworks.includes("React") &&
432 |     !hasFrameworkDocumentation(docsAnalysis, "react")
433 |   ) {
434 |     gaps.push({
435 |       category: "how-to",
436 |       gapType: "missing_section",
437 |       description:
438 |         "React framework detected but no React-specific documentation found",
439 |       priority: "medium",
440 |       recommendation: "Create React component usage and development guides",
441 |       estimatedEffort: "moderate",
442 |     });
443 |   }
444 | 
445 |   if (
446 |     codeAnalysis.frameworks.includes("Express") &&
447 |     !hasFrameworkDocumentation(docsAnalysis, "express")
448 |   ) {
449 |     gaps.push({
450 |       category: "how-to",
451 |       gapType: "missing_section",
452 |       description:
453 |         "Express framework detected but no Express-specific documentation found",
454 |       priority: "medium",
455 |       recommendation: "Create Express server setup and API development guides",
456 |       estimatedEffort: "moderate",
457 |     });
458 |   }
459 | 
460 |   // Test documentation gaps
461 |   if (codeAnalysis.hasTests && !hasTestingDocumentation(docsAnalysis)) {
462 |     gaps.push({
463 |       category: "how-to",
464 |       gapType: "missing_section",
465 |       description: "Test files found but no testing documentation",
466 |       priority: "medium",
467 |       recommendation: "Create testing setup and contribution guides",
468 |       estimatedEffort: "moderate",
469 |       relatedFiles: codeAnalysis.testFiles,
470 |     });
471 |   }
472 | 
473 |   // Technology-specific gaps based on repository analysis (fallback)
474 |   if (repoAnalysis) {
475 |     // Check for setup/installation guides
476 |     if (repoAnalysis.packageManager && !hasInstallationGuide(docsAnalysis)) {
477 |       gaps.push({
478 |         category: "tutorials",
479 |         gapType: "missing_section",
480 |         description: "Package manager detected but no installation guide found",
481 |         priority: "high",
482 |         recommendation: "Create installation and setup tutorial",
483 |         estimatedEffort: "moderate",
484 |       });
485 |     }
486 | 
487 |     // Check for Docker documentation
488 |     if (repoAnalysis.hasDocker && !hasDockerDocumentation(docsAnalysis)) {
489 |       gaps.push({
490 |         category: "how-to",
491 |         gapType: "missing_section",
492 |         description: "Docker configuration found but no Docker documentation",
493 |         priority: "medium",
494 |         recommendation: "Add Docker deployment and development guides",
495 |         estimatedEffort: "moderate",
496 |       });
497 |     }
498 | 
499 |     // Check for CI/CD documentation
500 |     if (repoAnalysis.hasCICD && !hasCICDDocumentation(docsAnalysis)) {
501 |       gaps.push({
502 |         category: "explanation",
503 |         gapType: "missing_section",
504 |         description: "CI/CD configuration found but no related documentation",
505 |         priority: "medium",
506 |         recommendation: "Document CI/CD processes and deployment workflows",
507 |         estimatedEffort: "moderate",
508 |       });
509 |     }
510 |   }
511 | 
512 |   // Add validation-based gaps
513 |   if (validationResult?.validationResults) {
514 |     for (const result of validationResult.validationResults) {
515 |       if (result.status === "fail") {
516 |         gaps.push({
517 |           category: "general",
518 |           gapType: "poor_structure",
519 |           description: result.message,
520 |           priority: "medium",
521 |           recommendation: result.recommendation || "Fix validation issue",
522 |           estimatedEffort: "minimal",
523 |         });
524 |       }
525 |     }
526 |   }
527 | 
528 |   return gaps;
529 | }
530 | 
531 | function hasApiDocumentation(docsAnalysis: any): boolean {
532 |   const allFiles = docsAnalysis.files || [];
533 |   return allFiles.some(
534 |     (file: string) =>
535 |       file.toLowerCase().includes("api") ||
536 |       file.toLowerCase().includes("endpoint") ||
537 |       file.toLowerCase().includes("swagger") ||
538 |       file.toLowerCase().includes("openapi"),
539 |   );
540 | }
541 | 
542 | function hasInstallationGuide(docsAnalysis: any): boolean {
543 |   const allFiles = docsAnalysis.files || [];
544 |   return allFiles.some(
545 |     (file: string) =>
546 |       file.toLowerCase().includes("install") ||
547 |       file.toLowerCase().includes("setup") ||
548 |       file.toLowerCase().includes("getting-started") ||
549 |       file.toLowerCase().includes("quickstart"),
550 |   );
551 | }
552 | 
553 | function hasDockerDocumentation(docsAnalysis: any): boolean {
554 |   const allFiles = docsAnalysis.files || [];
555 |   return allFiles.some(
556 |     (file: string) =>
557 |       file.toLowerCase().includes("docker") ||
558 |       file.toLowerCase().includes("container") ||
559 |       file.toLowerCase().includes("compose"),
560 |   );
561 | }
562 | 
563 | function hasCICDDocumentation(docsAnalysis: any): boolean {
564 |   const allFiles = docsAnalysis.files || [];
565 |   return allFiles.some(
566 |     (file: string) =>
567 |       file.toLowerCase().includes("ci") ||
568 |       file.toLowerCase().includes("cd") ||
569 |       file.toLowerCase().includes("deploy") ||
570 |       file.toLowerCase().includes("workflow") ||
571 |       file.toLowerCase().includes("pipeline"),
572 |   );
573 | }
574 | 
575 | function hasFrameworkDocumentation(
576 |   docsAnalysis: any,
577 |   framework: string,
578 | ): boolean {
579 |   const allFiles = docsAnalysis.files || [];
580 |   return allFiles.some(
581 |     (file: string) =>
582 |       file.toLowerCase().includes(framework.toLowerCase()) ||
583 |       file.toLowerCase().includes(`${framework.toLowerCase()}-guide`) ||
584 |       file.toLowerCase().includes(`${framework.toLowerCase()}-setup`),
585 |   );
586 | }
587 | 
588 | function hasTestingDocumentation(docsAnalysis: any): boolean {
589 |   const allFiles = docsAnalysis.files || [];
590 |   return allFiles.some(
591 |     (file: string) =>
592 |       file.toLowerCase().includes("test") ||
593 |       file.toLowerCase().includes("testing") ||
594 |       file.toLowerCase().includes("jest") ||
595 |       file.toLowerCase().includes("spec") ||
596 |       file.toLowerCase().includes("unit-test") ||
597 |       file.toLowerCase().includes("integration-test"),
598 |   );
599 | }
600 | 
601 | function generateRecommendations(
602 |   gaps: DocumentationGap[],
603 |   _repoAnalysis: any,
604 |   codeAnalysis: CodeAnalysisResult,
605 | ) {
606 |   const immediate: string[] = [];
607 |   const shortTerm: string[] = [];
608 |   const longTerm: string[] = [];
609 | 
610 |   const criticalGaps = gaps.filter((g) => g.priority === "critical");
611 |   const highGaps = gaps.filter((g) => g.priority === "high");
612 |   const mediumGaps = gaps.filter((g) => g.priority === "medium");
613 | 
614 |   // Immediate (Critical gaps)
615 |   criticalGaps.forEach((gap) => {
616 |     immediate.push(gap.recommendation);
617 |   });
618 | 
619 |   // Short-term (High priority gaps)
620 |   highGaps.forEach((gap) => {
621 |     shortTerm.push(gap.recommendation);
622 |   });
623 | 
624 |   // Long-term (Medium/Low priority gaps)
625 |   mediumGaps.forEach((gap) => {
626 |     longTerm.push(gap.recommendation);
627 |   });
628 | 
629 |   // Add code-analysis-based recommendations
630 |   if (codeAnalysis.apiEndpoints.length > 0) {
631 |     longTerm.push(
632 |       `Consider generating OpenAPI/Swagger documentation for ${codeAnalysis.apiEndpoints.length} API endpoints`,
633 |     );
634 |   }
635 | 
636 |   if (
637 |     codeAnalysis.functions.length +
638 |       codeAnalysis.classes.length +
639 |       codeAnalysis.interfaces.length >
640 |     50
641 |   ) {
642 |     longTerm.push(
643 |       "Consider using automated documentation tools like TypeDoc for large codebases",
644 |     );
645 |   }
646 | 
647 |   // Add general recommendations
648 |   if (immediate.length === 0 && shortTerm.length === 0) {
649 |     immediate.push(
650 |       "Documentation structure looks good - consider content enhancement",
651 |     );
652 |   }
653 | 
654 |   return { immediate, shortTerm, longTerm };
655 | }
656 | 
657 | function calculateContentCoverage(docsAnalysis: any, gaps: DocumentationGap[]) {
658 |   const categories = ["tutorials", "howTo", "reference", "explanation"];
659 |   const coverage: any = {};
660 | 
661 |   categories.forEach((category) => {
662 |     const categoryKey = category === "howTo" ? "how-to" : category;
663 |     const hasContent =
664 |       docsAnalysis.structure?.[categoryKey]?.exists &&
665 |       docsAnalysis.structure[categoryKey].files.length > 0;
666 |     const hasGaps = gaps.some((g) => g.category === categoryKey);
667 | 
668 |     if (hasContent && !hasGaps) {
669 |       coverage[category] = 100;
670 |     } else if (hasContent && hasGaps) {
671 |       coverage[category] = 60;
672 |     } else {
673 |       coverage[category] = 0;
674 |     }
675 |   });
676 | 
677 |   return coverage;
678 | }
679 | 
680 | function calculateOverallScore(
681 |   gaps: DocumentationGap[],
682 |   contentCoverage: any,
683 | ): number {
684 |   const coverageScore =
685 |     Object.values(contentCoverage).reduce(
686 |       (acc: number, val: any) => acc + val,
687 |       0,
688 |     ) / 4;
689 |   const gapPenalty = gaps.length * 5; // Each gap reduces score by 5
690 |   const criticalPenalty =
691 |     gaps.filter((g) => g.priority === "critical").length * 15; // Critical gaps have higher penalty
692 | 
693 |   return Math.max(
694 |     0,
695 |     Math.min(100, coverageScore - gapPenalty - criticalPenalty),
696 |   );
697 | }
698 | 
699 | function identifyStrengths(docsAnalysis: any, validationResult: any): string[] {
700 |   const strengths: string[] = [];
701 | 
702 |   if (docsAnalysis.hasRootIndex) {
703 |     strengths.push("Has main documentation index file");
704 |   }
705 | 
706 |   const existingSections = Object.entries(docsAnalysis.structure || {})
707 |     .filter(([_, data]: [string, any]) => data.exists && data.files.length > 0)
708 |     .map(([section]) => section);
709 | 
710 |   if (existingSections.length > 0) {
711 |     strengths.push(`Well-organized sections: ${existingSections.join(", ")}`);
712 |   }
713 | 
714 |   if (validationResult?.overallScore > 80) {
715 |     strengths.push("High-quality existing content");
716 |   }
717 | 
718 |   return strengths;
719 | }
720 | 
721 | function identifyMissingStructure(docsAnalysis: any) {
722 |   const missingDirectories: string[] = [];
723 |   const missingFiles: string[] = [];
724 | 
725 |   const expectedDirectories = [
726 |     "tutorials",
727 |     "how-to",
728 |     "reference",
729 |     "explanation",
730 |   ];
731 |   expectedDirectories.forEach((dir) => {
732 |     if (!docsAnalysis.structure?.[dir]?.exists) {
733 |       missingDirectories.push(dir);
734 |     }
735 |   });
736 | 
737 |   if (!docsAnalysis.hasRootIndex) {
738 |     missingFiles.push("index.md");
739 |   }
740 | 
741 |   return { directories: missingDirectories, files: missingFiles };
742 | }
743 | 
744 | function getRecommendedTool(recommendation: string): string {
745 |   if (recommendation.includes("setup_structure")) return "setup_structure";
746 |   if (recommendation.includes("populate_diataxis_content"))
747 |     return "populate_diataxis_content";
748 |   if (recommendation.includes("validate_diataxis_content"))
749 |     return "validate_diataxis_content";
750 |   if (recommendation.includes("generate_config")) return "generate_config";
751 |   if (recommendation.includes("deploy_pages")) return "deploy_pages";
752 |   return "manual";
753 | }
754 | 
```

--------------------------------------------------------------------------------
/tests/benchmarks/performance.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { PerformanceBenchmarker } from "../../src/benchmarks/performance";
  2 | import * as fs from "fs/promises";
  3 | import * as path from "path";
  4 | import * as os from "os";
  5 | 
  6 | describe("Performance Benchmarking System", () => {
  7 |   let benchmarker: PerformanceBenchmarker;
  8 |   let tempDir: string;
  9 | 
 10 |   beforeEach(async () => {
 11 |     benchmarker = new PerformanceBenchmarker();
 12 |     tempDir = await fs.mkdtemp(path.join(os.tmpdir(), "perf-test-"));
 13 |   });
 14 | 
 15 |   afterEach(async () => {
 16 |     try {
 17 |       await fs.rm(tempDir, { recursive: true, force: true });
 18 |     } catch (error) {
 19 |       // Ignore cleanup errors
 20 |     }
 21 |   });
 22 | 
 23 |   // Helper function to create test repositories
 24 |   async function createTestRepo(
 25 |     name: string,
 26 |     fileCount: number,
 27 |   ): Promise<string> {
 28 |     const repoPath = path.join(tempDir, name);
 29 |     await fs.mkdir(repoPath, { recursive: true });
 30 | 
 31 |     // Create package.json to make it look like a real project
 32 |     await fs.writeFile(
 33 |       path.join(repoPath, "package.json"),
 34 |       JSON.stringify({ name, version: "1.0.0" }, null, 2),
 35 |     );
 36 | 
 37 |     // Create additional files to reach the target count
 38 |     for (let i = 1; i < fileCount; i++) {
 39 |       const fileName = `file${i}.js`;
 40 |       await fs.writeFile(
 41 |         path.join(repoPath, fileName),
 42 |         `// Test file ${i}\nconsole.log('Hello from file ${i}');\n`,
 43 |       );
 44 |     }
 45 | 
 46 |     return repoPath;
 47 |   }
 48 | 
 49 |   describe("Repository Size Categorization", () => {
 50 |     it("should categorize small repositories correctly", async () => {
 51 |       const smallRepoPath = await createTestRepo("small-repo", 25);
 52 |       const result = await benchmarker.benchmarkRepository(smallRepoPath);
 53 | 
 54 |       expect(result.repoSize).toBe("small");
 55 |       expect(result.fileCount).toBe(25);
 56 |     });
 57 | 
 58 |     it("should categorize medium repositories correctly", async () => {
 59 |       const mediumRepoPath = await createTestRepo("medium-repo", 250);
 60 |       const result = await benchmarker.benchmarkRepository(mediumRepoPath);
 61 | 
 62 |       expect(result.repoSize).toBe("medium");
 63 |       expect(result.fileCount).toBe(250);
 64 |     });
 65 | 
 66 |     it("should categorize large repositories correctly", async () => {
 67 |       const largeRepoPath = await createTestRepo("large-repo", 1200);
 68 |       const result = await benchmarker.benchmarkRepository(largeRepoPath);
 69 | 
 70 |       expect(result.repoSize).toBe("large");
 71 |       expect(result.fileCount).toBe(1200);
 72 |     });
 73 |   });
 74 | 
 75 |   describe("Performance Measurement", () => {
 76 |     it("should measure execution time accurately", async () => {
 77 |       const testRepoPath = await createTestRepo("timing-test", 10);
 78 |       const result = await benchmarker.benchmarkRepository(testRepoPath);
 79 | 
 80 |       expect(result.executionTime).toBeGreaterThanOrEqual(0);
 81 |       expect(typeof result.executionTime).toBe("number");
 82 |     });
 83 | 
 84 |     it("should calculate performance ratios correctly", async () => {
 85 |       const testRepoPath = await createTestRepo("ratio-test", 50);
 86 |       const result = await benchmarker.benchmarkRepository(testRepoPath);
 87 | 
 88 |       expect(result.performanceRatio).toBeGreaterThanOrEqual(0);
 89 |       expect(result.performanceRatio).toBeLessThanOrEqual(100);
 90 |     });
 91 | 
 92 |     it("should track memory usage", async () => {
 93 |       const testRepoPath = await createTestRepo("memory-test", 30);
 94 |       const result = await benchmarker.benchmarkRepository(testRepoPath);
 95 | 
 96 |       expect(result.details.memoryUsage).toBeDefined();
 97 |       // Memory delta can be negative due to GC, just verify it's tracked
 98 |       expect(typeof result.details.memoryUsage.heapUsed).toBe("number");
 99 |       expect(result.details.memoryUsage.rss).toBeDefined();
100 |       expect(result.details.memoryUsage.heapTotal).toBeDefined();
101 |     });
102 |   });
103 | 
104 |   describe("PERF-001 Compliance", () => {
105 |     it("should pass for small repositories under 1 second", async () => {
106 |       const testRepoPath = await createTestRepo("perf-test", 10);
107 |       const result = await benchmarker.benchmarkRepository(testRepoPath);
108 | 
109 |       expect(result.passed).toBe(true);
110 |       expect(result.executionTime).toBeLessThan(1000);
111 |     });
112 | 
113 |     it("should have correct performance targets", async () => {
114 |       const smallRepo = await createTestRepo("small-perf", 50);
115 |       const mediumRepo = await createTestRepo("medium-perf", 500);
116 |       const largeRepo = await createTestRepo("large-perf", 1500);
117 | 
118 |       const smallResult = await benchmarker.benchmarkRepository(smallRepo);
119 |       const mediumResult = await benchmarker.benchmarkRepository(mediumRepo);
120 |       const largeResult = await benchmarker.benchmarkRepository(largeRepo);
121 | 
122 |       expect(smallResult.targetTime).toBe(1000); // 1 second for small
123 |       expect(mediumResult.targetTime).toBe(10000); // 10 seconds for medium
124 |       expect(largeResult.targetTime).toBe(60000); // 60 seconds for large
125 |     });
126 |   });
127 | 
128 |   describe("Benchmark Suite", () => {
129 |     it("should run multiple repository benchmarks", async () => {
130 |       const testRepos = [
131 |         {
132 |           path: await createTestRepo("suite-test-1", 25),
133 |           name: "Suite Test 1",
134 |         },
135 |         {
136 |           path: await createTestRepo("suite-test-2", 75),
137 |           name: "Suite Test 2",
138 |         },
139 |       ];
140 | 
141 |       const suite = await benchmarker.runBenchmarkSuite(testRepos);
142 | 
143 |       expect(suite.results.length).toBe(2);
144 |       expect(suite.testName).toBeDefined();
145 |       expect(suite.overallPassed).toBeDefined();
146 |     });
147 | 
148 |     it("should generate accurate summaries", async () => {
149 |       const testRepos = [
150 |         { path: await createTestRepo("small-repo", 25), name: "Small Repo" },
151 |         { path: await createTestRepo("medium-repo", 250), name: "Medium Repo" },
152 |       ];
153 | 
154 |       const suite = await benchmarker.runBenchmarkSuite(testRepos);
155 | 
156 |       expect(suite.summary).toBeDefined();
157 |       const totalRepos =
158 |         suite.summary.smallRepos.count +
159 |         suite.summary.mediumRepos.count +
160 |         suite.summary.largeRepos.count;
161 |       expect(totalRepos).toBe(2);
162 |       const totalPassed =
163 |         suite.summary.smallRepos.passed +
164 |         suite.summary.mediumRepos.passed +
165 |         suite.summary.largeRepos.passed;
166 |       expect(totalPassed).toBeGreaterThanOrEqual(0);
167 |     });
168 |   });
169 | 
170 |   describe("Result Export", () => {
171 |     it("should export benchmark results to JSON", async () => {
172 |       const testRepos = [
173 |         { path: await createTestRepo("export-test", 20), name: "Export Test" },
174 |       ];
175 | 
176 |       const suite = await benchmarker.runBenchmarkSuite(testRepos);
177 |       const exportPath = path.join(tempDir, "benchmark-results.json");
178 | 
179 |       await benchmarker.exportResults(suite, exportPath);
180 | 
181 |       const exportedContent = await fs.readFile(exportPath, "utf-8");
182 |       const exportedData = JSON.parse(exportedContent);
183 | 
184 |       expect(exportedData.suite).toBeDefined();
185 |       expect(exportedData.systemInfo).toBeDefined();
186 |       expect(exportedData.performanceTargets).toBeDefined();
187 |       expect(exportedData.timestamp).toBeDefined();
188 |     });
189 |   });
190 | 
191 |   describe("Error Handling", () => {
192 |     it("should handle non-existent repository paths gracefully", async () => {
193 |       const nonExistentPath = path.join(tempDir, "does-not-exist");
194 | 
195 |       const result = await benchmarker.benchmarkRepository(nonExistentPath);
196 | 
197 |       // Should handle gracefully with 0 files
198 |       expect(result.fileCount).toBe(0);
199 |       expect(result.repoSize).toBe("small");
200 |       expect(result.executionTime).toBeGreaterThanOrEqual(0);
201 |       expect(result.passed).toBe(true); // Fast execution passes performance target
202 |     });
203 | 
204 |     it("should handle permission denied scenarios gracefully", async () => {
205 |       if (process.platform === "win32") {
206 |         // Skip on Windows as permission handling is different
207 |         return;
208 |       }
209 | 
210 |       const restrictedPath = path.join(tempDir, "restricted");
211 |       await fs.mkdir(restrictedPath, { recursive: true });
212 | 
213 |       try {
214 |         await fs.chmod(restrictedPath, 0o000);
215 | 
216 |         const result = await benchmarker.benchmarkRepository(restrictedPath);
217 | 
218 |         // Should handle gracefully with 0 files
219 |         expect(result.fileCount).toBe(0);
220 |         expect(result.repoSize).toBe("small");
221 |         expect(result.executionTime).toBeGreaterThanOrEqual(0);
222 |       } finally {
223 |         // Restore permissions for cleanup
224 |         await fs.chmod(restrictedPath, 0o755);
225 |       }
226 |     });
227 | 
228 |     it("should handle empty repositories", async () => {
229 |       const emptyRepoPath = path.join(tempDir, "empty-repo");
230 |       await fs.mkdir(emptyRepoPath, { recursive: true });
231 | 
232 |       const result = await benchmarker.benchmarkRepository(emptyRepoPath);
233 | 
234 |       expect(result.fileCount).toBe(0);
235 |       expect(result.repoSize).toBe("small");
236 |       expect(result.executionTime).toBeGreaterThanOrEqual(0);
237 |     });
238 | 
239 |     it("should handle suite with all valid repositories", async () => {
240 |       const validRepo1 = await createTestRepo("valid-repo-1", 10);
241 |       const validRepo2 = await createTestRepo("valid-repo-2", 20);
242 | 
243 |       const testRepos = [
244 |         { path: validRepo1, name: "Valid Repo 1" },
245 |         { path: validRepo2, name: "Valid Repo 2" },
246 |       ];
247 | 
248 |       const suite = await benchmarker.runBenchmarkSuite(testRepos);
249 | 
250 |       expect(suite.results.length).toBe(2);
251 |       expect(suite.overallPassed).toBeDefined();
252 |       expect(typeof suite.averagePerformance).toBe("number");
253 |     });
254 | 
255 |     it("should handle benchmark execution errors in try-catch", async () => {
256 |       // Test the error handling path by mocking analyzeRepository to throw
257 |       const originalAnalyze =
258 |         require("../../src/tools/analyze-repository").analyzeRepository;
259 |       const mockAnalyze = jest.fn().mockRejectedValue(new Error("Mock error"));
260 | 
261 |       // Replace the function temporarily
262 |       require("../../src/tools/analyze-repository").analyzeRepository =
263 |         mockAnalyze;
264 | 
265 |       try {
266 |         const testRepoPath = await createTestRepo("error-test", 10);
267 | 
268 |         await expect(
269 |           benchmarker.benchmarkRepository(testRepoPath),
270 |         ).rejects.toThrow("Mock error");
271 | 
272 |         // Should still record the failed benchmark
273 |         const results = benchmarker.getResults();
274 |         expect(results.length).toBe(1);
275 |         expect(results[0].passed).toBe(false);
276 |       } finally {
277 |         // Restore original function
278 |         require("../../src/tools/analyze-repository").analyzeRepository =
279 |           originalAnalyze;
280 |       }
281 |     });
282 |   });
283 | 
284 |   describe("Utility Methods", () => {
285 |     it("should reset benchmark results", async () => {
286 |       const testRepoPath = await createTestRepo("reset-test", 10);
287 |       await benchmarker.benchmarkRepository(testRepoPath);
288 | 
289 |       expect(benchmarker.getResults().length).toBe(1);
290 | 
291 |       benchmarker.reset();
292 | 
293 |       expect(benchmarker.getResults().length).toBe(0);
294 |     });
295 | 
296 |     it("should return copy of results array", async () => {
297 |       const testRepoPath = await createTestRepo("copy-test", 15);
298 |       await benchmarker.benchmarkRepository(testRepoPath);
299 | 
300 |       const results1 = benchmarker.getResults();
301 |       const results2 = benchmarker.getResults();
302 | 
303 |       expect(results1).toEqual(results2);
304 |       expect(results1).not.toBe(results2); // Different array instances
305 |     });
306 | 
307 |     it("should handle different analysis depths", async () => {
308 |       const testRepoPath = await createTestRepo("depth-test", 20);
309 | 
310 |       // Test with quick analysis
311 |       const quickResult = await benchmarker.benchmarkRepository(
312 |         testRepoPath,
313 |         "quick",
314 |       );
315 |       expect(quickResult.executionTime).toBeGreaterThanOrEqual(0);
316 | 
317 |       // Test with deep analysis
318 |       const deepResult = await benchmarker.benchmarkRepository(
319 |         testRepoPath,
320 |         "deep",
321 |       );
322 |       expect(deepResult.executionTime).toBeGreaterThanOrEqual(0);
323 |     });
324 |   });
325 | 
326 |   describe("Report Generation", () => {
327 |     it("should generate detailed reports without errors", async () => {
328 |       const testRepos = [
329 |         await createTestRepo("report-small", 25),
330 |         await createTestRepo("report-medium", 250),
331 |         await createTestRepo("report-large", 1200),
332 |       ];
333 | 
334 |       const results: any[] = [];
335 |       for (const repoPath of testRepos) {
336 |         const result = await benchmarker.benchmarkRepository(repoPath);
337 |         results.push(result);
338 |       }
339 | 
340 |       const suite = benchmarker.generateSuite("Report Test", results);
341 | 
342 |       // Capture console output
343 |       const originalLog = console.log;
344 |       const logOutput: string[] = [];
345 |       console.log = (...args) => {
346 |         logOutput.push(args.join(" "));
347 |       };
348 | 
349 |       try {
350 |         benchmarker.printDetailedReport(suite);
351 | 
352 |         expect(logOutput.length).toBeGreaterThan(0);
353 |         expect(
354 |           logOutput.some((line) =>
355 |             line.includes("Performance Benchmark Report"),
356 |           ),
357 |         ).toBe(true);
358 |       } finally {
359 |         console.log = originalLog;
360 |       }
361 |     });
362 | 
363 |     it("should handle empty suite reports", async () => {
364 |       const emptySuite = benchmarker.generateSuite("Empty Suite", []);
365 | 
366 |       // Should not throw when generating report for empty suite
367 |       expect(() => benchmarker.printDetailedReport(emptySuite)).not.toThrow();
368 |     });
369 | 
370 |     it("should calculate correct averages for mixed results", async () => {
371 |       const repo1 = await createTestRepo("avg-test-1", 10);
372 |       const repo2 = await createTestRepo("avg-test-2", 20);
373 |       const repo3 = await createTestRepo("avg-test-3", 30);
374 | 
375 |       const results = [
376 |         await benchmarker.benchmarkRepository(repo1),
377 |         await benchmarker.benchmarkRepository(repo2),
378 |         await benchmarker.benchmarkRepository(repo3),
379 |       ];
380 | 
381 |       const suite = benchmarker.generateSuite("Average Test", results);
382 | 
383 |       expect(suite.averagePerformance).toBeGreaterThanOrEqual(0);
384 |       expect(suite.averagePerformance).toBeLessThanOrEqual(100);
385 |       expect(typeof suite.averagePerformance).toBe("number");
386 |     });
387 |   });
388 | 
389 |   describe("Memory Usage Tracking", () => {
390 |     it("should track memory usage differences", async () => {
391 |       const testRepoPath = await createTestRepo("memory-tracking", 100);
392 |       const result = await benchmarker.benchmarkRepository(testRepoPath);
393 | 
394 |       expect(result.details.memoryUsage).toBeDefined();
395 |       // Memory differences can be negative due to garbage collection
396 |       expect(typeof result.details.memoryUsage.heapUsed).toBe("number");
397 |       expect(typeof result.details.memoryUsage.heapTotal).toBe("number");
398 |       expect(typeof result.details.memoryUsage.rss).toBe("number");
399 |     });
400 | 
401 |     it("should handle memory tracking in error scenarios", async () => {
402 |       const emptyRepoPath = path.join(tempDir, "empty-memory-test");
403 |       await fs.mkdir(emptyRepoPath, { recursive: true });
404 | 
405 |       const result = await benchmarker.benchmarkRepository(emptyRepoPath);
406 | 
407 |       // Even in error scenarios, memory tracking should work
408 |       expect(result.details.memoryUsage).toBeDefined();
409 |       expect(typeof result.details.memoryUsage.heapUsed).toBe("number");
410 |     });
411 |   });
412 | 
413 |   describe("Edge Cases", () => {
414 |     it("should handle repositories with special characters in paths", async () => {
415 |       const specialCharRepo = path.join(tempDir, "repo with spaces & symbols!");
416 |       await fs.mkdir(specialCharRepo, { recursive: true });
417 |       await fs.writeFile(
418 |         path.join(specialCharRepo, "test.js"),
419 |         'console.log("test");',
420 |       );
421 | 
422 |       const result = await benchmarker.benchmarkRepository(specialCharRepo);
423 | 
424 |       expect(result.fileCount).toBe(1);
425 |       expect(result.executionTime).toBeGreaterThanOrEqual(0);
426 |     });
427 | 
428 |     it("should handle very deep directory structures", async () => {
429 |       const deepRepoPath = path.join(tempDir, "deep-repo");
430 |       let currentPath = deepRepoPath;
431 | 
432 |       // Create a deep directory structure
433 |       for (let i = 0; i < 10; i++) {
434 |         currentPath = path.join(currentPath, `level-${i}`);
435 |         await fs.mkdir(currentPath, { recursive: true });
436 |         await fs.writeFile(
437 |           path.join(currentPath, `file-${i}.js`),
438 |           `// Level ${i}`,
439 |         );
440 |       }
441 | 
442 |       const result = await benchmarker.benchmarkRepository(deepRepoPath);
443 | 
444 |       expect(result.fileCount).toBe(10);
445 |       expect(result.executionTime).toBeGreaterThanOrEqual(0);
446 |     });
447 | 
448 |     it("should handle concurrent benchmarking", async () => {
449 |       const repo1 = await createTestRepo("concurrent-1", 15);
450 |       const repo2 = await createTestRepo("concurrent-2", 25);
451 |       const repo3 = await createTestRepo("concurrent-3", 35);
452 | 
453 |       // Run benchmarks concurrently
454 |       const promises = [
455 |         benchmarker.benchmarkRepository(repo1),
456 |         benchmarker.benchmarkRepository(repo2),
457 |         benchmarker.benchmarkRepository(repo3),
458 |       ];
459 | 
460 |       const results = await Promise.all(promises);
461 | 
462 |       expect(results.length).toBe(3);
463 |       results.forEach((result) => {
464 |         expect(result.executionTime).toBeGreaterThanOrEqual(0);
465 |         expect(result.fileCount).toBeGreaterThan(0);
466 |       });
467 |     });
468 | 
469 |     it("should handle extremely deep recursion limit", async () => {
470 |       const deepRepoPath = path.join(tempDir, "extremely-deep");
471 |       let currentPath = deepRepoPath;
472 | 
473 |       // Create a structure deeper than the 10-level limit
474 |       for (let i = 0; i < 15; i++) {
475 |         currentPath = path.join(currentPath, `level-${i}`);
476 |         await fs.mkdir(currentPath, { recursive: true });
477 |         await fs.writeFile(
478 |           path.join(currentPath, `file-${i}.js`),
479 |           `// Level ${i}`,
480 |         );
481 |       }
482 | 
483 |       const result = await benchmarker.benchmarkRepository(deepRepoPath);
484 | 
485 |       // Should stop at recursion limit, so fewer than 15 files
486 |       expect(result.fileCount).toBeLessThanOrEqual(10);
487 |       expect(result.executionTime).toBeGreaterThanOrEqual(0);
488 |     });
489 | 
490 |     it("should skip node_modules and vendor directories", async () => {
491 |       const repoPath = await createTestRepo("skip-dirs", 5);
492 | 
493 |       // Add node_modules and vendor directories
494 |       const nodeModulesPath = path.join(repoPath, "node_modules");
495 |       const vendorPath = path.join(repoPath, "vendor");
496 | 
497 |       await fs.mkdir(nodeModulesPath, { recursive: true });
498 |       await fs.mkdir(vendorPath, { recursive: true });
499 | 
500 |       // Add files that should be skipped
501 |       await fs.writeFile(
502 |         path.join(nodeModulesPath, "package.js"),
503 |         "module.exports = {};",
504 |       );
505 |       await fs.writeFile(path.join(vendorPath, "library.js"), "var lib = {};");
506 | 
507 |       const result = await benchmarker.benchmarkRepository(repoPath);
508 | 
509 |       // Should only count the original 5 files, not the ones in node_modules/vendor
510 |       expect(result.fileCount).toBe(5);
511 |     });
512 | 
513 |     it("should skip hidden files except .github", async () => {
514 |       const repoPath = await createTestRepo("hidden-files", 3);
515 | 
516 |       // Add hidden files and .github directory
517 |       await fs.writeFile(path.join(repoPath, ".hidden"), "hidden content");
518 |       await fs.writeFile(path.join(repoPath, ".env"), "SECRET=value");
519 | 
520 |       const githubPath = path.join(repoPath, ".github");
521 |       await fs.mkdir(githubPath, { recursive: true });
522 |       await fs.writeFile(path.join(githubPath, "workflow.yml"), "name: CI");
523 | 
524 |       const result = await benchmarker.benchmarkRepository(repoPath);
525 | 
526 |       // Should count original 3 files + 1 .github file, but not other hidden files
527 |       expect(result.fileCount).toBe(4);
528 |     });
529 |   });
530 | 
531 |   describe("Factory Function", () => {
532 |     it("should create benchmarker instance via factory", () => {
533 |       const { createBenchmarker } = require("../../src/benchmarks/performance");
534 |       const factoryBenchmarker = createBenchmarker();
535 | 
536 |       expect(factoryBenchmarker).toBeInstanceOf(PerformanceBenchmarker);
537 |       expect(factoryBenchmarker.getResults()).toEqual([]);
538 |     });
539 |   });
540 | 
541 |   describe("Export Results Error Handling", () => {
542 |     it("should handle export to invalid path gracefully", async () => {
543 |       const testRepos = [
544 |         {
545 |           path: await createTestRepo("export-error-test", 10),
546 |           name: "Export Error Test",
547 |         },
548 |       ];
549 | 
550 |       const suite = await benchmarker.runBenchmarkSuite(testRepos);
551 |       const invalidPath = path.join(
552 |         "/invalid/nonexistent/path",
553 |         "results.json",
554 |       );
555 | 
556 |       await expect(
557 |         benchmarker.exportResults(suite, invalidPath),
558 |       ).rejects.toThrow();
559 |     });
560 | 
561 |     it("should export complete system information", async () => {
562 |       const testRepos = [
563 |         {
564 |           path: await createTestRepo("system-info-test", 5),
565 |           name: "System Info Test",
566 |         },
567 |       ];
568 | 
569 |       const suite = await benchmarker.runBenchmarkSuite(testRepos);
570 |       const exportPath = path.join(tempDir, "system-info-results.json");
571 | 
572 |       await benchmarker.exportResults(suite, exportPath);
573 | 
574 |       const exportedContent = await fs.readFile(exportPath, "utf-8");
575 |       const exportedData = JSON.parse(exportedContent);
576 | 
577 |       expect(exportedData.systemInfo.node).toBe(process.version);
578 |       expect(exportedData.systemInfo.platform).toBe(process.platform);
579 |       expect(exportedData.systemInfo.arch).toBe(process.arch);
580 |       expect(exportedData.systemInfo.memoryUsage).toBeDefined();
581 |       expect(exportedData.performanceTargets).toEqual({
582 |         small: 1000,
583 |         medium: 10000,
584 |         large: 60000,
585 |       });
586 |     });
587 |   });
588 | 
589 |   describe("Detailed Report Coverage", () => {
590 |     it("should print detailed reports with all categories", async () => {
591 |       // Create repos of all sizes to test all report sections
592 |       const smallRepo = await createTestRepo("report-small", 25);
593 |       const mediumRepo = await createTestRepo("report-medium", 250);
594 |       const largeRepo = await createTestRepo("report-large", 1200);
595 | 
596 |       const results = [
597 |         await benchmarker.benchmarkRepository(smallRepo),
598 |         await benchmarker.benchmarkRepository(mediumRepo),
599 |         await benchmarker.benchmarkRepository(largeRepo),
600 |       ];
601 | 
602 |       const suite = benchmarker.generateSuite("Complete Report Test", results);
603 | 
604 |       // Capture console output
605 |       const originalLog = console.log;
606 |       const logOutput: string[] = [];
607 |       console.log = (...args) => {
608 |         logOutput.push(args.join(" "));
609 |       };
610 | 
611 |       try {
612 |         benchmarker.printDetailedReport(suite);
613 | 
614 |         // Verify all report sections are present
615 |         const fullOutput = logOutput.join("\n");
616 |         expect(fullOutput).toContain("Performance Benchmark Report");
617 |         expect(fullOutput).toContain("Overall Status:");
618 |         expect(fullOutput).toContain("Average Performance:");
619 |         expect(fullOutput).toContain("Small (<100 files)");
620 |         expect(fullOutput).toContain("Medium (100-1000 files)");
621 |         expect(fullOutput).toContain("Large (1000+ files)");
622 |         expect(fullOutput).toContain("Detailed Results:");
623 |         expect(fullOutput).toContain("Memory:");
624 |       } finally {
625 |         console.log = originalLog;
626 |       }
627 |     });
628 | 
629 |     it("should handle report generation with no results in some categories", async () => {
630 |       // Only create small repos to test empty category handling
631 |       const results = [
632 |         await benchmarker.benchmarkRepository(
633 |           await createTestRepo("small-only-1", 10),
634 |         ),
635 |         await benchmarker.benchmarkRepository(
636 |           await createTestRepo("small-only-2", 20),
637 |         ),
638 |       ];
639 | 
640 |       const suite = benchmarker.generateSuite("Small Only Test", results);
641 | 
642 |       const originalLog = console.log;
643 |       const logOutput: string[] = [];
644 |       console.log = (...args) => {
645 |         logOutput.push(args.join(" "));
646 |       };
647 | 
648 |       try {
649 |         benchmarker.printDetailedReport(suite);
650 | 
651 |         const fullOutput = logOutput.join("\n");
652 |         expect(fullOutput).toContain("Small (<100 files)");
653 |         // Medium and Large categories should not appear since count is 0
654 |         expect(fullOutput).not.toContain("Medium (100-1000 files):");
655 |         expect(fullOutput).not.toContain("Large (1000+ files):");
656 |       } finally {
657 |         console.log = originalLog;
658 |       }
659 |     });
660 |   });
661 | });
662 | 
```
Page 16/29FirstPrevNextLast