#
tokens: 48866/50000 6/307 files (page 20/33)
lines: on (toggle) GitHub
raw markdown copy reset
This is page 20 of 33. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.

# Directory Structure

```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│   ├── agents
│   │   ├── documcp-ast.md
│   │   ├── documcp-deploy.md
│   │   ├── documcp-memory.md
│   │   ├── documcp-test.md
│   │   └── documcp-tool.md
│   ├── copilot-instructions.md
│   ├── dependabot.yml
│   ├── ISSUE_TEMPLATE
│   │   ├── automated-changelog.md
│   │   ├── bug_report.md
│   │   ├── bug_report.yml
│   │   ├── documentation_issue.md
│   │   ├── feature_request.md
│   │   ├── feature_request.yml
│   │   ├── npm-publishing-fix.md
│   │   └── release_improvements.md
│   ├── PULL_REQUEST_TEMPLATE.md
│   ├── release-drafter.yml
│   └── workflows
│       ├── auto-merge.yml
│       ├── ci.yml
│       ├── codeql.yml
│       ├── dependency-review.yml
│       ├── deploy-docs.yml
│       ├── README.md
│       ├── release-drafter.yml
│       └── release.yml
├── .gitignore
├── .husky
│   ├── commit-msg
│   └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── ARCHITECTURAL_CHANGES_SUMMARY.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│   ├── .docusaurus
│   │   ├── docusaurus-plugin-content-docs
│   │   │   └── default
│   │   │       └── __mdx-loader-dependency.json
│   │   └── docusaurus-plugin-content-pages
│   │       └── default
│   │           └── __plugin.json
│   ├── adrs
│   │   ├── adr-0001-mcp-server-architecture.md
│   │   ├── adr-0002-repository-analysis-engine.md
│   │   ├── adr-0003-static-site-generator-recommendation-engine.md
│   │   ├── adr-0004-diataxis-framework-integration.md
│   │   ├── adr-0005-github-pages-deployment-automation.md
│   │   ├── adr-0006-mcp-tools-api-design.md
│   │   ├── adr-0007-mcp-prompts-and-resources-integration.md
│   │   ├── adr-0008-intelligent-content-population-engine.md
│   │   ├── adr-0009-content-accuracy-validation-framework.md
│   │   ├── adr-0010-mcp-resource-pattern-redesign.md
│   │   ├── adr-0011-ce-mcp-compatibility.md
│   │   ├── adr-0012-priority-scoring-system-for-documentation-drift.md
│   │   ├── adr-0013-release-pipeline-and-package-distribution.md
│   │   └── README.md
│   ├── api
│   │   ├── .nojekyll
│   │   ├── assets
│   │   │   ├── hierarchy.js
│   │   │   ├── highlight.css
│   │   │   ├── icons.js
│   │   │   ├── icons.svg
│   │   │   ├── main.js
│   │   │   ├── navigation.js
│   │   │   ├── search.js
│   │   │   └── style.css
│   │   ├── hierarchy.html
│   │   ├── index.html
│   │   ├── modules.html
│   │   └── variables
│   │       └── TOOLS.html
│   ├── assets
│   │   └── logo.svg
│   ├── CE-MCP-FINDINGS.md
│   ├── development
│   │   └── MCP_INSPECTOR_TESTING.md
│   ├── docusaurus.config.js
│   ├── explanation
│   │   ├── architecture.md
│   │   └── index.md
│   ├── guides
│   │   ├── link-validation.md
│   │   ├── playwright-integration.md
│   │   └── playwright-testing-workflow.md
│   ├── how-to
│   │   ├── analytics-setup.md
│   │   ├── change-watcher.md
│   │   ├── custom-domains.md
│   │   ├── documentation-freshness-tracking.md
│   │   ├── drift-priority-scoring.md
│   │   ├── github-pages-deployment.md
│   │   ├── index.md
│   │   ├── llm-integration.md
│   │   ├── local-testing.md
│   │   ├── performance-optimization.md
│   │   ├── prompting-guide.md
│   │   ├── repository-analysis.md
│   │   ├── seo-optimization.md
│   │   ├── site-monitoring.md
│   │   ├── troubleshooting.md
│   │   └── usage-examples.md
│   ├── index.md
│   ├── knowledge-graph.md
│   ├── package-lock.json
│   ├── package.json
│   ├── phase-2-intelligence.md
│   ├── reference
│   │   ├── api-overview.md
│   │   ├── cli.md
│   │   ├── configuration.md
│   │   ├── deploy-pages.md
│   │   ├── index.md
│   │   ├── mcp-tools.md
│   │   └── prompt-templates.md
│   ├── research
│   │   ├── cross-domain-integration
│   │   │   └── README.md
│   │   ├── domain-1-mcp-architecture
│   │   │   ├── index.md
│   │   │   └── mcp-performance-research.md
│   │   ├── domain-2-repository-analysis
│   │   │   └── README.md
│   │   ├── domain-3-ssg-recommendation
│   │   │   ├── index.md
│   │   │   └── ssg-performance-analysis.md
│   │   ├── domain-4-diataxis-integration
│   │   │   └── README.md
│   │   ├── domain-5-github-deployment
│   │   │   ├── github-pages-security-analysis.md
│   │   │   └── index.md
│   │   ├── domain-6-api-design
│   │   │   └── README.md
│   │   ├── README.md
│   │   ├── research-integration-summary-2025-01-14.md
│   │   ├── research-progress-template.md
│   │   └── research-questions-2025-01-14.md
│   ├── robots.txt
│   ├── sidebars.js
│   ├── sitemap.xml
│   ├── src
│   │   └── css
│   │       └── custom.css
│   └── tutorials
│       ├── development-setup.md
│       ├── environment-setup.md
│       ├── first-deployment.md
│       ├── getting-started.md
│       ├── index.md
│       ├── memory-workflows.md
│       └── user-onboarding.md
├── ISSUE_IMPLEMENTATION_SUMMARY.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│   └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│   ├── benchmarks
│   │   └── performance.ts
│   ├── index.ts
│   ├── memory
│   │   ├── contextual-retrieval.ts
│   │   ├── deployment-analytics.ts
│   │   ├── enhanced-manager.ts
│   │   ├── export-import.ts
│   │   ├── freshness-kg-integration.ts
│   │   ├── index.ts
│   │   ├── integration.ts
│   │   ├── kg-code-integration.ts
│   │   ├── kg-health.ts
│   │   ├── kg-integration.ts
│   │   ├── kg-link-validator.ts
│   │   ├── kg-storage.ts
│   │   ├── knowledge-graph.ts
│   │   ├── learning.ts
│   │   ├── manager.ts
│   │   ├── multi-agent-sharing.ts
│   │   ├── pruning.ts
│   │   ├── schemas.ts
│   │   ├── storage.ts
│   │   ├── temporal-analysis.ts
│   │   ├── user-preferences.ts
│   │   └── visualization.ts
│   ├── prompts
│   │   └── technical-writer-prompts.ts
│   ├── scripts
│   │   └── benchmark.ts
│   ├── templates
│   │   └── playwright
│   │       ├── accessibility.spec.template.ts
│   │       ├── Dockerfile.template
│   │       ├── docs-e2e.workflow.template.yml
│   │       ├── link-validation.spec.template.ts
│   │       └── playwright.config.template.ts
│   ├── tools
│   │   ├── analyze-deployments.ts
│   │   ├── analyze-readme.ts
│   │   ├── analyze-repository.ts
│   │   ├── change-watcher.ts
│   │   ├── check-documentation-links.ts
│   │   ├── cleanup-agent-artifacts.ts
│   │   ├── deploy-pages.ts
│   │   ├── detect-gaps.ts
│   │   ├── evaluate-readme-health.ts
│   │   ├── generate-config.ts
│   │   ├── generate-contextual-content.ts
│   │   ├── generate-llm-context.ts
│   │   ├── generate-readme-template.ts
│   │   ├── generate-technical-writer-prompts.ts
│   │   ├── kg-health-check.ts
│   │   ├── manage-preferences.ts
│   │   ├── manage-sitemap.ts
│   │   ├── optimize-readme.ts
│   │   ├── populate-content.ts
│   │   ├── readme-best-practices.ts
│   │   ├── recommend-ssg.ts
│   │   ├── setup-playwright-tests.ts
│   │   ├── setup-structure.ts
│   │   ├── simulate-execution.ts
│   │   ├── sync-code-to-docs.ts
│   │   ├── test-local-deployment.ts
│   │   ├── track-documentation-freshness.ts
│   │   ├── update-existing-documentation.ts
│   │   ├── validate-content.ts
│   │   ├── validate-documentation-freshness.ts
│   │   ├── validate-readme-checklist.ts
│   │   └── verify-deployment.ts
│   ├── types
│   │   └── api.ts
│   ├── utils
│   │   ├── artifact-detector.ts
│   │   ├── ast-analyzer.ts
│   │   ├── change-watcher.ts
│   │   ├── code-scanner.ts
│   │   ├── content-extractor.ts
│   │   ├── drift-detector.ts
│   │   ├── execution-simulator.ts
│   │   ├── freshness-tracker.ts
│   │   ├── language-parsers-simple.ts
│   │   ├── llm-client.ts
│   │   ├── permission-checker.ts
│   │   ├── semantic-analyzer.ts
│   │   ├── sitemap-generator.ts
│   │   ├── usage-metadata.ts
│   │   └── user-feedback-integration.ts
│   └── workflows
│       └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│   ├── api
│   │   └── mcp-responses.test.ts
│   ├── benchmarks
│   │   └── performance.test.ts
│   ├── call-graph-builder.test.ts
│   ├── change-watcher-priority.integration.test.ts
│   ├── change-watcher.test.ts
│   ├── edge-cases
│   │   └── error-handling.test.ts
│   ├── execution-simulator.test.ts
│   ├── functional
│   │   └── tools.test.ts
│   ├── integration
│   │   ├── kg-documentation-workflow.test.ts
│   │   ├── knowledge-graph-workflow.test.ts
│   │   ├── mcp-readme-tools.test.ts
│   │   ├── memory-mcp-tools.test.ts
│   │   ├── readme-technical-writer.test.ts
│   │   └── workflow.test.ts
│   ├── memory
│   │   ├── contextual-retrieval.test.ts
│   │   ├── enhanced-manager.test.ts
│   │   ├── export-import.test.ts
│   │   ├── freshness-kg-integration.test.ts
│   │   ├── kg-code-integration.test.ts
│   │   ├── kg-health.test.ts
│   │   ├── kg-link-validator.test.ts
│   │   ├── kg-storage-validation.test.ts
│   │   ├── kg-storage.test.ts
│   │   ├── knowledge-graph-documentation-examples.test.ts
│   │   ├── knowledge-graph-enhanced.test.ts
│   │   ├── knowledge-graph.test.ts
│   │   ├── learning.test.ts
│   │   ├── manager-advanced.test.ts
│   │   ├── manager.test.ts
│   │   ├── mcp-resource-integration.test.ts
│   │   ├── mcp-tool-persistence.test.ts
│   │   ├── schemas-documentation-examples.test.ts
│   │   ├── schemas.test.ts
│   │   ├── storage.test.ts
│   │   ├── temporal-analysis.test.ts
│   │   └── user-preferences.test.ts
│   ├── performance
│   │   ├── memory-load-testing.test.ts
│   │   └── memory-stress-testing.test.ts
│   ├── prompts
│   │   ├── guided-workflow-prompts.test.ts
│   │   └── technical-writer-prompts.test.ts
│   ├── server.test.ts
│   ├── setup.ts
│   ├── tools
│   │   ├── all-tools.test.ts
│   │   ├── analyze-coverage.test.ts
│   │   ├── analyze-deployments.test.ts
│   │   ├── analyze-readme.test.ts
│   │   ├── analyze-repository.test.ts
│   │   ├── check-documentation-links.test.ts
│   │   ├── cleanup-agent-artifacts.test.ts
│   │   ├── deploy-pages-kg-retrieval.test.ts
│   │   ├── deploy-pages-tracking.test.ts
│   │   ├── deploy-pages.test.ts
│   │   ├── detect-gaps.test.ts
│   │   ├── evaluate-readme-health.test.ts
│   │   ├── generate-contextual-content.test.ts
│   │   ├── generate-llm-context.test.ts
│   │   ├── generate-readme-template.test.ts
│   │   ├── generate-technical-writer-prompts.test.ts
│   │   ├── kg-health-check.test.ts
│   │   ├── manage-sitemap.test.ts
│   │   ├── optimize-readme.test.ts
│   │   ├── readme-best-practices.test.ts
│   │   ├── recommend-ssg-historical.test.ts
│   │   ├── recommend-ssg-preferences.test.ts
│   │   ├── recommend-ssg.test.ts
│   │   ├── simple-coverage.test.ts
│   │   ├── sync-code-to-docs.test.ts
│   │   ├── test-local-deployment.test.ts
│   │   ├── tool-error-handling.test.ts
│   │   ├── track-documentation-freshness.test.ts
│   │   ├── validate-content.test.ts
│   │   ├── validate-documentation-freshness.test.ts
│   │   └── validate-readme-checklist.test.ts
│   ├── types
│   │   └── type-safety.test.ts
│   └── utils
│       ├── artifact-detector.test.ts
│       ├── ast-analyzer.test.ts
│       ├── content-extractor.test.ts
│       ├── drift-detector-diataxis.test.ts
│       ├── drift-detector-priority.test.ts
│       ├── drift-detector.test.ts
│       ├── freshness-tracker.test.ts
│       ├── llm-client.test.ts
│       ├── semantic-analyzer.test.ts
│       ├── sitemap-generator.test.ts
│       ├── usage-metadata.test.ts
│       └── user-feedback-integration.test.ts
├── tsconfig.json
└── typedoc.json
```

# Files

--------------------------------------------------------------------------------
/tests/integration/workflow.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | // Integration tests for complete documentation workflows
  2 | import { promises as fs } from "fs";
  3 | import path from "path";
  4 | import os from "os";
  5 | import { analyzeRepository } from "../../src/tools/analyze-repository";
  6 | import { recommendSSG } from "../../src/tools/recommend-ssg";
  7 | import { generateConfig } from "../../src/tools/generate-config";
  8 | import { setupStructure } from "../../src/tools/setup-structure";
  9 | import { deployPages } from "../../src/tools/deploy-pages";
 10 | import { verifyDeployment } from "../../src/tools/verify-deployment";
 11 | import { resetKnowledgeGraph } from "../../src/memory/kg-integration";
 12 | import { clearPreferenceManagerCache } from "../../src/memory/user-preferences";
 13 | 
 14 | describe("Integration Testing - Complete Workflows", () => {
 15 |   let tempDir: string;
 16 |   let testProject: string;
 17 |   let originalStorageDir: string | undefined;
 18 | 
 19 |   beforeAll(async () => {
 20 |     // Reset knowledge graph state to ensure clean test environment
 21 |     resetKnowledgeGraph();
 22 |     clearPreferenceManagerCache();
 23 | 
 24 |     tempDir = path.join(os.tmpdir(), "documcp-integration-tests");
 25 |     await fs.mkdir(tempDir, { recursive: true });
 26 | 
 27 |     // Use isolated storage directory for tests to avoid conflicts
 28 |     originalStorageDir = process.env.DOCUMCP_STORAGE_DIR;
 29 |     process.env.DOCUMCP_STORAGE_DIR = path.join(tempDir, ".documcp", "memory");
 30 |     await fs.mkdir(process.env.DOCUMCP_STORAGE_DIR, { recursive: true });
 31 | 
 32 |     testProject = await createRealisticProject();
 33 |   });
 34 | 
 35 |   afterAll(async () => {
 36 |     // Reset knowledge graph and preference managers to avoid state leakage
 37 |     resetKnowledgeGraph();
 38 |     clearPreferenceManagerCache();
 39 | 
 40 |     // Restore original storage directory
 41 |     if (originalStorageDir !== undefined) {
 42 |       process.env.DOCUMCP_STORAGE_DIR = originalStorageDir;
 43 |     } else {
 44 |       delete process.env.DOCUMCP_STORAGE_DIR;
 45 |     }
 46 | 
 47 |     try {
 48 |       await fs.rm(tempDir, { recursive: true, force: true });
 49 |     } catch (error) {
 50 |       console.warn("Failed to cleanup integration test directory:", error);
 51 |     }
 52 |   });
 53 | 
 54 |   describe("End-to-End Documentation Workflow", () => {
 55 |     it("should complete full documentation setup workflow", async () => {
 56 |       const workflowDir = path.join(tempDir, "e2e-workflow");
 57 |       await fs.mkdir(workflowDir, { recursive: true });
 58 | 
 59 |       // Step 1: Analyze Repository
 60 |       console.log("Step 1: Analyzing repository...");
 61 |       const analysisResult = await analyzeRepository({
 62 |         path: testProject,
 63 |         depth: "standard",
 64 |       });
 65 | 
 66 |       expect(analysisResult.content).toBeDefined();
 67 |       expect((analysisResult as any).isError).toBeFalsy();
 68 | 
 69 |       // Extract analysis ID for next step
 70 |       const analysisText = analysisResult.content.find((c) =>
 71 |         c.text.includes('"id"'),
 72 |       );
 73 |       const analysis = JSON.parse(analysisText!.text);
 74 |       const analysisId = analysis.id;
 75 | 
 76 |       expect(analysisId).toBeDefined();
 77 |       expect(analysis.dependencies.ecosystem).toBe("javascript");
 78 | 
 79 |       // Step 2: Get SSG Recommendation
 80 |       console.log("Step 2: Getting SSG recommendation...");
 81 |       const recommendationResult = await recommendSSG({
 82 |         analysisId: analysisId,
 83 |         preferences: {
 84 |           priority: "features",
 85 |           ecosystem: "javascript",
 86 |         },
 87 |       });
 88 | 
 89 |       expect(recommendationResult.content).toBeDefined();
 90 |       const recommendationText = recommendationResult.content.find((c) =>
 91 |         c.text.includes('"recommended"'),
 92 |       );
 93 |       const recommendation = JSON.parse(recommendationText!.text);
 94 | 
 95 |       expect(recommendation.recommended).toBeDefined();
 96 |       expect(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"]).toContain(
 97 |         recommendation.recommended,
 98 |       );
 99 | 
100 |       // Step 3: Generate Configuration
101 |       console.log("Step 3: Generating configuration...");
102 |       const configResult = await generateConfig({
103 |         ssg: recommendation.recommended,
104 |         projectName: "Integration Test Project",
105 |         projectDescription: "End-to-end integration test",
106 |         outputPath: workflowDir,
107 |       });
108 | 
109 |       expect(configResult.content).toBeDefined();
110 |       expect((configResult as any).isError).toBeFalsy();
111 | 
112 |       // Verify config files were created
113 |       const files = await fs.readdir(workflowDir);
114 |       expect(files.length).toBeGreaterThan(0);
115 | 
116 |       // Step 4: Setup Documentation Structure
117 |       console.log("Step 4: Setting up documentation structure...");
118 |       const docsDir = path.join(workflowDir, "docs");
119 |       const structureResult = await setupStructure({
120 |         path: docsDir,
121 |         ssg: recommendation.recommended,
122 |         includeExamples: true,
123 |       });
124 | 
125 |       expect(structureResult.content).toBeDefined();
126 |       expect((structureResult as any).isError).toBeFalsy();
127 | 
128 |       // Verify Diataxis structure was created
129 |       const diataxisCategories = [
130 |         "tutorials",
131 |         "how-to",
132 |         "reference",
133 |         "explanation",
134 |       ];
135 |       for (const category of diataxisCategories) {
136 |         const categoryPath = path.join(docsDir, category);
137 |         expect(
138 |           await fs
139 |             .access(categoryPath)
140 |             .then(() => true)
141 |             .catch(() => false),
142 |         ).toBe(true);
143 |       }
144 | 
145 |       // Step 5: Setup Deployment
146 |       console.log("Step 5: Setting up deployment...");
147 |       const deploymentResult = await deployPages({
148 |         repository: workflowDir,
149 |         ssg: recommendation.recommended,
150 |         branch: "gh-pages",
151 |         customDomain: "docs.example.com",
152 |       });
153 | 
154 |       expect(deploymentResult.content).toBeDefined();
155 |       expect((deploymentResult as any).isError).toBeFalsy();
156 | 
157 |       // Verify workflow and CNAME were created
158 |       const workflowPath = path.join(
159 |         workflowDir,
160 |         ".github",
161 |         "workflows",
162 |         "deploy-docs.yml",
163 |       );
164 |       const cnamePath = path.join(workflowDir, "CNAME");
165 | 
166 |       expect(
167 |         await fs
168 |           .access(workflowPath)
169 |           .then(() => true)
170 |           .catch(() => false),
171 |       ).toBe(true);
172 |       expect(
173 |         await fs
174 |           .access(cnamePath)
175 |           .then(() => true)
176 |           .catch(() => false),
177 |       ).toBe(true);
178 | 
179 |       // Step 6: Verify Deployment Setup
180 |       console.log("Step 6: Verifying deployment setup...");
181 |       const verificationResult = await verifyDeployment({
182 |         repository: workflowDir,
183 |         url: "https://docs.example.com",
184 |       });
185 | 
186 |       expect(verificationResult.content).toBeDefined();
187 | 
188 |       // Parse the JSON response to check actual verification data
189 |       const verificationData = JSON.parse(verificationResult.content[0].text);
190 |       const passCount = verificationData.summary.passed;
191 |       const failCount = verificationData.summary.failed;
192 | 
193 |       console.log("Pass count:", passCount, "Fail count:", failCount);
194 | 
195 |       // Should have at least some passing checks
196 |       expect(passCount).toBeGreaterThan(0);
197 |       expect(passCount).toBeGreaterThanOrEqual(failCount);
198 | 
199 |       console.log("✅ End-to-end workflow completed successfully!");
200 |     }, 30000); // 30 second timeout for full workflow
201 |   });
202 | 
203 |   describe("Workflow Variations", () => {
204 |     it("should handle Python project workflow", async () => {
205 |       const pythonProject = await createPythonProject();
206 | 
207 |       // Analyze Python project
208 |       const analysis = await analyzeRepository({
209 |         path: pythonProject,
210 |         depth: "standard",
211 |       });
212 |       const analysisData = JSON.parse(
213 |         analysis.content.find((c) => c.text.includes('"ecosystem"'))!.text,
214 |       );
215 | 
216 |       expect(analysisData.dependencies.ecosystem).toBe("python");
217 | 
218 |       // Get recommendation (likely MkDocs for Python)
219 |       const recommendation = await recommendSSG({
220 |         analysisId: analysisData.id,
221 |       });
222 |       // const recData = JSON.parse(recommendation.content.find(c => c.text.includes('"recommended"'))!.text);
223 | 
224 |       // Generate MkDocs config
225 |       const configDir = path.join(tempDir, "python-workflow");
226 |       await fs.mkdir(configDir, { recursive: true });
227 | 
228 |       const config = await generateConfig({
229 |         ssg: "mkdocs",
230 |         projectName: "Python Test Project",
231 |         outputPath: configDir,
232 |       });
233 | 
234 |       // Verify MkDocs-specific files
235 |       expect(
236 |         await fs
237 |           .access(path.join(configDir, "mkdocs.yml"))
238 |           .then(() => true)
239 |           .catch(() => false),
240 |       ).toBe(true);
241 |       expect(
242 |         await fs
243 |           .access(path.join(configDir, "requirements.txt"))
244 |           .then(() => true)
245 |           .catch(() => false),
246 |       ).toBe(true);
247 |     });
248 | 
249 |     it("should handle different SSG preferences", async () => {
250 |       const analysisId = "test-preferences-123";
251 | 
252 |       // Test simplicity preference
253 |       const simplicityRec = await recommendSSG({
254 |         analysisId,
255 |         preferences: { priority: "simplicity" },
256 |       });
257 | 
258 |       // Test performance preference
259 |       const performanceRec = await recommendSSG({
260 |         analysisId,
261 |         preferences: { priority: "performance" },
262 |       });
263 | 
264 |       // Test features preference
265 |       const featuresRec = await recommendSSG({
266 |         analysisId,
267 |         preferences: { priority: "features" },
268 |       });
269 | 
270 |       // All should provide valid recommendations
271 |       [simplicityRec, performanceRec, featuresRec].forEach((result) => {
272 |         expect(result.content).toBeDefined();
273 |         const rec = JSON.parse(
274 |           result.content.find((c) => c.text.includes('"recommended"'))!.text,
275 |         );
276 |         expect([
277 |           "jekyll",
278 |           "hugo",
279 |           "docusaurus",
280 |           "mkdocs",
281 |           "eleventy",
282 |         ]).toContain(rec.recommended);
283 |       });
284 |     });
285 | 
286 |     it("should handle deployment workflow variations", async () => {
287 |       const deploymentDir = path.join(tempDir, "deployment-variations");
288 |       await fs.mkdir(deploymentDir, { recursive: true });
289 | 
290 |       // Test different SSGs
291 |       const ssgs = [
292 |         "docusaurus",
293 |         "mkdocs",
294 |         "hugo",
295 |         "jekyll",
296 |         "eleventy",
297 |       ] as const;
298 | 
299 |       for (const ssg of ssgs) {
300 |         const ssgDir = path.join(deploymentDir, ssg);
301 |         await fs.mkdir(ssgDir, { recursive: true });
302 | 
303 |         const result = await deployPages({
304 |           repository: ssgDir,
305 |           ssg: ssg,
306 |           branch: "main",
307 |         });
308 | 
309 |         expect(result.content).toBeDefined();
310 | 
311 |         const workflowPath = path.join(
312 |           ssgDir,
313 |           ".github",
314 |           "workflows",
315 |           "deploy-docs.yml",
316 |         );
317 |         expect(
318 |           await fs
319 |             .access(workflowPath)
320 |             .then(() => true)
321 |             .catch(() => false),
322 |         ).toBe(true);
323 | 
324 |         const workflowContent = await fs.readFile(workflowPath, "utf-8");
325 |         // Handle different SSG name formats
326 |         const expectedName =
327 |           ssg === "mkdocs"
328 |             ? "Deploy MkDocs"
329 |             : `Deploy ${ssg.charAt(0).toUpperCase() + ssg.slice(1)}`;
330 |         expect(workflowContent).toContain(expectedName);
331 | 
332 |         // Verify SSG-specific workflow content
333 |         switch (ssg) {
334 |           case "docusaurus":
335 |             expect(workflowContent).toContain("npm run build");
336 |             expect(workflowContent).toContain("id-token: write"); // OIDC compliance
337 |             break;
338 |           case "mkdocs":
339 |             expect(workflowContent).toContain("mkdocs gh-deploy");
340 |             expect(workflowContent).toContain("python");
341 |             break;
342 |           case "hugo":
343 |             expect(workflowContent).toContain("peaceiris/actions-hugo");
344 |             expect(workflowContent).toContain("hugo --minify");
345 |             break;
346 |           case "jekyll":
347 |             expect(workflowContent).toContain("bundle exec jekyll build");
348 |             expect(workflowContent).toContain("ruby");
349 |             break;
350 |           case "eleventy":
351 |             expect(workflowContent).toContain("npm run build");
352 |             break;
353 |         }
354 |       }
355 |     });
356 |   });
357 | 
358 |   describe("Error Handling and Recovery", () => {
359 |     it("should handle missing repository gracefully", async () => {
360 |       const result = await analyzeRepository({
361 |         path: "/non/existent/path",
362 |         depth: "standard",
363 |       });
364 | 
365 |       expect((result as any).isError).toBe(true);
366 |       expect(result.content[0].text).toContain("Error:");
367 |     });
368 | 
369 |     it("should handle invalid configuration gracefully", async () => {
370 |       const invalidDir = "/invalid/write/path/that/should/fail";
371 | 
372 |       const result = await generateConfig({
373 |         ssg: "docusaurus",
374 |         projectName: "Test",
375 |         outputPath: invalidDir,
376 |       });
377 | 
378 |       expect((result as any).isError).toBe(true);
379 |       expect(result.content[0].text).toContain("Error:");
380 |     });
381 | 
382 |     it("should handle structure setup in non-existent directory", async () => {
383 |       // This should actually work because setupStructure creates directories
384 |       const result = await setupStructure({
385 |         path: path.join(tempDir, "new-structure-dir"),
386 |         ssg: "docusaurus",
387 |         includeExamples: false,
388 |       });
389 | 
390 |       expect((result as any).isError).toBeFalsy();
391 |       expect(result.content).toBeDefined();
392 |     });
393 | 
394 |     it("should provide helpful error messages and resolutions", async () => {
395 |       const errorResult = await analyzeRepository({
396 |         path: "/definitely/does/not/exist",
397 |         depth: "standard",
398 |       });
399 | 
400 |       expect((errorResult as any).isError).toBe(true);
401 |       const errorText = errorResult.content.map((c) => c.text).join(" ");
402 |       // Check for resolution in JSON format (lowercase) or formatted text (capitalized)
403 |       expect(errorText.toLowerCase()).toContain("resolution");
404 |       expect(errorText.toLowerCase()).toContain("ensure");
405 |     });
406 |   });
407 | 
408 |   describe("Performance and Resource Management", () => {
409 |     it("should handle large repository analysis within performance bounds", async () => {
410 |       const largeRepo = await createLargeRepository();
411 | 
412 |       const startTime = Date.now();
413 |       const result = await analyzeRepository({
414 |         path: largeRepo,
415 |         depth: "standard",
416 |       });
417 |       const executionTime = Date.now() - startTime;
418 | 
419 |       // Should complete within reasonable time (large repo target is 60s)
420 |       expect(executionTime).toBeLessThan(60000);
421 |       expect(result.content).toBeDefined();
422 | 
423 |       const analysisData = JSON.parse(
424 |         result.content.find((c) => c.text.includes('"totalFiles"'))!.text,
425 |       );
426 |       expect(analysisData.structure.totalFiles).toBeGreaterThan(1000);
427 |     }, 65000); // 65s timeout for large repo test
428 | 
429 |     it("should clean up resources properly", async () => {
430 |       const tempWorkflowDir = path.join(tempDir, "resource-cleanup");
431 | 
432 |       // Run multiple operations
433 |       await generateConfig({
434 |         ssg: "docusaurus",
435 |         projectName: "Cleanup Test",
436 |         outputPath: tempWorkflowDir,
437 |       });
438 | 
439 |       await setupStructure({
440 |         path: path.join(tempWorkflowDir, "docs"),
441 |         ssg: "docusaurus",
442 |         includeExamples: true,
443 |       });
444 | 
445 |       // Verify files were created
446 |       const files = await fs.readdir(tempWorkflowDir);
447 |       expect(files.length).toBeGreaterThan(0);
448 | 
449 |       // Cleanup should work
450 |       await fs.rm(tempWorkflowDir, { recursive: true, force: true });
451 |       expect(
452 |         await fs
453 |           .access(tempWorkflowDir)
454 |           .then(() => false)
455 |           .catch(() => true),
456 |       ).toBe(true);
457 |     });
458 |   });
459 | 
460 |   // Helper functions
461 |   async function createRealisticProject(): Promise<string> {
462 |     const projectPath = path.join(tempDir, "realistic-project");
463 |     await fs.mkdir(projectPath, { recursive: true });
464 | 
465 |     // package.json with realistic dependencies
466 |     const packageJson = {
467 |       name: "realistic-test-project",
468 |       version: "2.1.0",
469 |       description: "A realistic Node.js project for testing DocuMCP",
470 |       main: "src/index.js",
471 |       scripts: {
472 |         start: "node src/index.js",
473 |         dev: "nodemon src/index.js",
474 |         test: "jest",
475 |         build: "webpack --mode production",
476 |         lint: "eslint src/",
477 |         docs: "jsdoc src/ -d docs/",
478 |       },
479 |       dependencies: {
480 |         express: "^4.18.2",
481 |         lodash: "^4.17.21",
482 |         axios: "^1.4.0",
483 |         moment: "^2.29.4",
484 |         "body-parser": "^1.20.2",
485 |       },
486 |       devDependencies: {
487 |         jest: "^29.5.0",
488 |         nodemon: "^2.0.22",
489 |         eslint: "^8.42.0",
490 |         webpack: "^5.86.0",
491 |         jsdoc: "^4.0.2",
492 |       },
493 |       keywords: ["node", "express", "api", "web"],
494 |       author: "Test Author",
495 |       license: "MIT",
496 |     };
497 | 
498 |     await fs.writeFile(
499 |       path.join(projectPath, "package.json"),
500 |       JSON.stringify(packageJson, null, 2),
501 |     );
502 | 
503 |     // Source directory structure
504 |     await fs.mkdir(path.join(projectPath, "src"), { recursive: true });
505 |     await fs.mkdir(path.join(projectPath, "src", "controllers"), {
506 |       recursive: true,
507 |     });
508 |     await fs.mkdir(path.join(projectPath, "src", "models"), {
509 |       recursive: true,
510 |     });
511 |     await fs.mkdir(path.join(projectPath, "src", "routes"), {
512 |       recursive: true,
513 |     });
514 |     await fs.mkdir(path.join(projectPath, "src", "utils"), { recursive: true });
515 | 
516 |     // Main application files
517 |     await fs.writeFile(
518 |       path.join(projectPath, "src", "index.js"),
519 |       `const express = require('express');
520 | const bodyParser = require('body-parser');
521 | const routes = require('./routes');
522 | 
523 | const app = express();
524 | app.use(bodyParser.json());
525 | app.use('/api', routes);
526 | 
527 | const PORT = process.env.PORT || 3000;
528 | app.listen(PORT, () => {
529 |   console.log(\`Server running on port \${PORT}\`);
530 | });`,
531 |     );
532 | 
533 |     await fs.writeFile(
534 |       path.join(projectPath, "src", "routes", "index.js"),
535 |       `const express = require('express');
536 | const router = express.Router();
537 | 
538 | router.get('/health', (req, res) => {
539 |   res.json({ status: 'OK', timestamp: new Date().toISOString() });
540 | });
541 | 
542 | module.exports = router;`,
543 |     );
544 | 
545 |     await fs.writeFile(
546 |       path.join(projectPath, "src", "controllers", "userController.js"),
547 |       `const { getUserById, createUser } = require('../models/user');
548 | 
549 | async function getUser(req, res) {
550 |   const user = await getUserById(req.params.id);
551 |   res.json(user);
552 | }
553 | 
554 | module.exports = { getUser };`,
555 |     );
556 | 
557 |     await fs.writeFile(
558 |       path.join(projectPath, "src", "models", "user.js"),
559 |       `const users = [];
560 | 
561 | function getUserById(id) {
562 |   return users.find(user => user.id === id);
563 | }
564 | 
565 | function createUser(userData) {
566 |   const user = { id: Date.now(), ...userData };
567 |   users.push(user);
568 |   return user;
569 | }
570 | 
571 | module.exports = { getUserById, createUser };`,
572 |     );
573 | 
574 |     await fs.writeFile(
575 |       path.join(projectPath, "src", "utils", "helpers.js"),
576 |       `const _ = require('lodash');
577 | const moment = require('moment');
578 | 
579 | function formatDate(date) {
580 |   return moment(date).format('YYYY-MM-DD HH:mm:ss');
581 | }
582 | 
583 | function validateEmail(email) {
584 |   return /^[^\\s@]+@[^\\s@]+\\.[^\\s@]+$/.test(email);
585 | }
586 | 
587 | module.exports = { formatDate, validateEmail };`,
588 |     );
589 | 
590 |     // Test directory
591 |     await fs.mkdir(path.join(projectPath, "tests"), { recursive: true });
592 |     await fs.writeFile(
593 |       path.join(projectPath, "tests", "app.test.js"),
594 |       `const { formatDate, validateEmail } = require('../src/utils/helpers');
595 | 
596 | describe('Helper Functions', () => {
597 |   test('formatDate should format date correctly', () => {
598 |     const date = new Date('2023-01-01');
599 |     expect(formatDate(date)).toMatch(/\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}/);
600 |   });
601 | 
602 |   test('validateEmail should validate email correctly', () => {
603 |     expect(validateEmail('[email protected]')).toBe(true);
604 |     expect(validateEmail('invalid-email')).toBe(false);
605 |   });
606 | });`,
607 |     );
608 | 
609 |     // Configuration files
610 |     await fs.writeFile(
611 |       path.join(projectPath, ".eslintrc.js"),
612 |       `module.exports = {
613 |   env: { node: true, es2021: true },
614 |   extends: ['eslint:recommended'],
615 |   parserOptions: { ecmaVersion: 12, sourceType: 'module' },
616 |   rules: { 'no-unused-vars': 'warn' }
617 | };`,
618 |     );
619 | 
620 |     await fs.writeFile(
621 |       path.join(projectPath, "jest.config.js"),
622 |       `module.exports = {
623 |   testEnvironment: 'node',
624 |   collectCoverageFrom: ['src/**/*.js'],
625 |   testMatch: ['**/tests/**/*.test.js']
626 | };`,
627 |     );
628 | 
629 |     // Documentation
630 |     await fs.writeFile(
631 |       path.join(projectPath, "README.md"),
632 |       `# Realistic Test Project
633 | 
634 | A comprehensive Node.js application for testing DocuMCP functionality.
635 | 
636 | ## Features
637 | 
638 | - Express.js web server
639 | - RESTful API endpoints
640 | - User management system
641 | - Comprehensive test suite
642 | - ESLint code quality
643 | - JSDoc documentation
644 | 
645 | ## Getting Started
646 | 
647 | 1. Install dependencies: \`npm install\`
648 | 2. Start development server: \`npm run dev\`
649 | 3. Run tests: \`npm test\`
650 | 
651 | ## API Endpoints
652 | 
653 | - \`GET /api/health\` - Health check endpoint
654 | - \`GET /api/users/:id\` - Get user by ID
655 | 
656 | ## Contributing
657 | 
658 | Please read CONTRIBUTING.md for contribution guidelines.`,
659 |     );
660 | 
661 |     await fs.writeFile(
662 |       path.join(projectPath, "CONTRIBUTING.md"),
663 |       `# Contributing to Realistic Test Project
664 | 
665 | ## Development Setup
666 | 
667 | 1. Fork the repository
668 | 2. Clone your fork
669 | 3. Install dependencies
670 | 4. Create a feature branch
671 | 5. Make changes and test
672 | 6. Submit a pull request
673 | 
674 | ## Code Style
675 | 
676 | - Follow ESLint configuration
677 | - Write tests for new features
678 | - Update documentation as needed`,
679 |     );
680 | 
681 |     await fs.writeFile(
682 |       path.join(projectPath, "LICENSE"),
683 |       "MIT License\n\nCopyright (c) 2023 Test Author",
684 |     );
685 | 
686 |     // CI/CD workflow
687 |     await fs.mkdir(path.join(projectPath, ".github", "workflows"), {
688 |       recursive: true,
689 |     });
690 |     await fs.writeFile(
691 |       path.join(projectPath, ".github", "workflows", "ci.yml"),
692 |       `name: CI
693 | 
694 | on: [push, pull_request]
695 | 
696 | jobs:
697 |   test:
698 |     runs-on: ubuntu-latest
699 |     steps:
700 |       - uses: actions/checkout@v3
701 |       - uses: actions/setup-node@v3
702 |         with:
703 |           node-version: '20'
704 |       - run: npm ci
705 |       - run: npm run lint
706 |       - run: npm test
707 |       - run: npm run build`,
708 |     );
709 | 
710 |     return projectPath;
711 |   }
712 | 
713 |   async function createPythonProject(): Promise<string> {
714 |     const projectPath = path.join(tempDir, "python-project");
715 |     await fs.mkdir(projectPath, { recursive: true });
716 | 
717 |     // Python project structure
718 |     await fs.writeFile(
719 |       path.join(projectPath, "requirements.txt"),
720 |       `flask==2.3.2
721 | requests==2.31.0
722 | pytest==7.4.0
723 | black==23.3.0
724 | flake8==6.0.0`,
725 |     );
726 | 
727 |     await fs.mkdir(path.join(projectPath, "src"), { recursive: true });
728 |     await fs.writeFile(
729 |       path.join(projectPath, "src", "app.py"),
730 |       `from flask import Flask, jsonify
731 | import requests
732 | 
733 | app = Flask(__name__)
734 | 
735 | @app.route('/health')
736 | def health():
737 |     return jsonify({'status': 'OK'})
738 | 
739 | if __name__ == '__main__':
740 |     app.run(debug=True)`,
741 |     );
742 | 
743 |     await fs.mkdir(path.join(projectPath, "tests"), { recursive: true });
744 |     await fs.writeFile(
745 |       path.join(projectPath, "tests", "test_app.py"),
746 |       `import pytest
747 | from src.app import app
748 | 
749 | def test_health():
750 |     client = app.test_client()
751 |     response = client.get('/health')
752 |     assert response.status_code == 200`,
753 |     );
754 | 
755 |     await fs.writeFile(
756 |       path.join(projectPath, "README.md"),
757 |       "# Python Test Project\n\nA Flask application for testing Python project analysis.",
758 |     );
759 | 
760 |     return projectPath;
761 |   }
762 | 
763 |   async function createLargeRepository(): Promise<string> {
764 |     const repoPath = path.join(tempDir, "large-repository");
765 |     await fs.mkdir(repoPath, { recursive: true });
766 | 
767 |     // Create a repository with 1200+ files to trigger large repo categorization
768 |     await fs.writeFile(
769 |       path.join(repoPath, "package.json"),
770 |       '{"name": "large-repo"}',
771 |     );
772 | 
773 |     for (let i = 0; i < 30; i++) {
774 |       const dirPath = path.join(repoPath, `module-${i}`);
775 |       await fs.mkdir(dirPath, { recursive: true });
776 | 
777 |       for (let j = 0; j < 40; j++) {
778 |         const fileName = `component-${j}.js`;
779 |         const content = `// Component ${i}-${j}
780 | export default function Component${i}${j}() {
781 |   return <div>Component ${i}-${j}</div>;
782 | }`;
783 |         await fs.writeFile(path.join(dirPath, fileName), content);
784 |       }
785 |     }
786 | 
787 |     await fs.writeFile(
788 |       path.join(repoPath, "README.md"),
789 |       "# Large Repository\n\nThis repository has 1200+ files for performance testing.",
790 |     );
791 | 
792 |     return repoPath;
793 |   }
794 | });
795 | 
```

--------------------------------------------------------------------------------
/src/prompts/technical-writer-prompts.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { promises as fs } from "fs";
  2 | import { join } from "path";
  3 | 
  4 | export interface ProjectContext {
  5 |   projectType: string;
  6 |   languages: string[];
  7 |   frameworks: string[];
  8 |   hasTests: boolean;
  9 |   hasCI: boolean;
 10 |   readmeExists: boolean;
 11 |   packageManager?: string;
 12 |   documentationGaps: string[];
 13 | }
 14 | 
 15 | export interface PromptMessage {
 16 |   role: "user" | "assistant" | "system";
 17 |   content: {
 18 |     type: "text";
 19 |     text: string;
 20 |   };
 21 | }
 22 | 
 23 | export async function analyzeProjectContext(
 24 |   projectPath: string,
 25 | ): Promise<ProjectContext> {
 26 |   const context: ProjectContext = {
 27 |     projectType: "unknown",
 28 |     languages: [],
 29 |     frameworks: [],
 30 |     hasTests: false,
 31 |     hasCI: false,
 32 |     readmeExists: false,
 33 |     documentationGaps: [],
 34 |   };
 35 | 
 36 |   // Check for README
 37 |   context.readmeExists = await fileExists(join(projectPath, "README.md"));
 38 | 
 39 |   // Analyze package.json for Node.js projects
 40 |   const packageJsonPath = join(projectPath, "package.json");
 41 |   if (await fileExists(packageJsonPath)) {
 42 |     try {
 43 |       const packageJson = JSON.parse(
 44 |         await fs.readFile(packageJsonPath, "utf-8"),
 45 |       );
 46 |       const deps = {
 47 |         ...packageJson.dependencies,
 48 |         ...packageJson.devDependencies,
 49 |       };
 50 | 
 51 |       context.projectType = "node_application";
 52 |       context.languages.push("JavaScript");
 53 | 
 54 |       // Detect frameworks
 55 |       if (deps["react"]) context.frameworks.push("React");
 56 |       if (deps["vue"]) context.frameworks.push("Vue");
 57 |       if (deps["angular"]) context.frameworks.push("Angular");
 58 |       if (deps["express"]) context.frameworks.push("Express");
 59 |       if (deps["next"]) context.frameworks.push("Next.js");
 60 |       if (deps["nuxt"]) context.frameworks.push("Nuxt.js");
 61 |       if (deps["svelte"]) context.frameworks.push("Svelte");
 62 |       if (deps["typescript"]) context.languages.push("TypeScript");
 63 | 
 64 |       // Detect package manager
 65 |       if (await fileExists(join(projectPath, "yarn.lock"))) {
 66 |         context.packageManager = "yarn";
 67 |       } else if (await fileExists(join(projectPath, "pnpm-lock.yaml"))) {
 68 |         context.packageManager = "pnpm";
 69 |       } else {
 70 |         context.packageManager = "npm";
 71 |       }
 72 |     } catch (error) {
 73 |       // If package.json exists but can't be parsed, continue with other detections
 74 |       console.warn("Failed to parse package.json:", error);
 75 |     }
 76 |   }
 77 | 
 78 |   // Check for Python projects
 79 |   if (
 80 |     (await fileExists(join(projectPath, "requirements.txt"))) ||
 81 |     (await fileExists(join(projectPath, "pyproject.toml"))) ||
 82 |     (await fileExists(join(projectPath, "setup.py")))
 83 |   ) {
 84 |     context.projectType = "python_application";
 85 |     context.languages.push("Python");
 86 |   }
 87 | 
 88 |   // Check for Go projects
 89 |   if (await fileExists(join(projectPath, "go.mod"))) {
 90 |     context.projectType = "go_application";
 91 |     context.languages.push("Go");
 92 |   }
 93 | 
 94 |   // Check for Rust projects
 95 |   if (await fileExists(join(projectPath, "Cargo.toml"))) {
 96 |     context.projectType = "rust_application";
 97 |     context.languages.push("Rust");
 98 |   }
 99 | 
100 |   // Check for tests
101 |   context.hasTests = await hasTestFiles(projectPath);
102 | 
103 |   // Check for CI/CD
104 |   context.hasCI = await hasCIConfig(projectPath);
105 | 
106 |   // Identify documentation gaps
107 |   context.documentationGaps = await identifyDocumentationGaps(
108 |     projectPath,
109 |     context,
110 |   );
111 | 
112 |   return context;
113 | }
114 | 
115 | export async function generateTechnicalWriterPrompts(
116 |   promptType: string,
117 |   projectPath: string,
118 |   args: Record<string, any> = {},
119 | ): Promise<PromptMessage[]> {
120 |   const context = await analyzeProjectContext(projectPath);
121 | 
122 |   switch (promptType) {
123 |     case "tutorial-writer":
124 |       return generateTutorialWriterPrompt(context, args);
125 |     case "howto-guide-writer":
126 |       return generateHowToGuideWriterPrompt(context, args);
127 |     case "reference-writer":
128 |       return generateReferenceWriterPrompt(context, args);
129 |     case "explanation-writer":
130 |       return generateExplanationWriterPrompt(context, args);
131 |     case "diataxis-organizer":
132 |       return generateDiataxisOrganizerPrompt(context, args);
133 |     case "readme-optimizer":
134 |       return generateReadmeOptimizerPrompt(context, args);
135 |     case "analyze-and-recommend":
136 |       return generateAnalyzeAndRecommendPrompt(context, args);
137 |     case "setup-documentation":
138 |       return generateSetupDocumentationPrompt(context, args);
139 |     case "troubleshoot-deployment":
140 |       return generateTroubleshootDeploymentPrompt(context, args);
141 |     case "maintain-documentation-freshness":
142 |       return generateMaintainDocumentationFreshnessPrompt(context, args);
143 |     default:
144 |       throw new Error(`Unknown prompt type: ${promptType}`);
145 |   }
146 | }
147 | 
148 | function generateTutorialWriterPrompt(
149 |   context: ProjectContext,
150 |   args: Record<string, any>,
151 | ): PromptMessage[] {
152 |   const targetAudience = args.target_audience || "beginners";
153 |   const learningGoal = args.learning_goal || "get started with the project";
154 | 
155 |   return [
156 |     {
157 |       role: "user",
158 |       content: {
159 |         type: "text",
160 |         text: `Create a comprehensive tutorial for a ${
161 |           context.projectType
162 |         } project following Diataxis framework principles.
163 | 
164 | **Project Context:**
165 | - Type: ${context.projectType}
166 | - Languages: ${context.languages.join(", ")}
167 | - Frameworks: ${context.frameworks.join(", ")}
168 | - Package Manager: ${context.packageManager || "N/A"}
169 | - Target Audience: ${targetAudience}
170 | - Learning Goal: ${learningGoal}
171 | 
172 | **Diataxis Tutorial Requirements:**
173 | 1. Learning-oriented: Focus on helping users learn by doing
174 | 2. Step-by-step progression from simple to complex
175 | 3. Practical exercises with clear outcomes
176 | 4. Safe-to-fail environment for experimentation
177 | 5. Minimal explanation - focus on action
178 | 
179 | **Tutorial Structure:**
180 | 1. Prerequisites and setup
181 | 2. Step-by-step guided exercises
182 | 3. What you'll build/learn
183 | 4. Hands-on activities with immediate feedback
184 | 5. Next steps for continued learning
185 | 
186 | **Integration Hints:**
187 | - Use analyze_repository for project structure insights
188 | - Reference setup_development_environment for environment setup
189 | - Consider validate_tutorial_steps for step verification
190 | 
191 | Please create a tutorial that teaches through guided practice:`,
192 |       },
193 |     },
194 |   ];
195 | }
196 | 
197 | function generateHowToGuideWriterPrompt(
198 |   context: ProjectContext,
199 |   args: Record<string, any>,
200 | ): PromptMessage[] {
201 |   const problemToSolve = args.problem || "common development task";
202 |   const userExperience = args.user_experience || "intermediate";
203 | 
204 |   return [
205 |     {
206 |       role: "user",
207 |       content: {
208 |         type: "text",
209 |         text: `Create a practical how-to guide for a ${
210 |           context.projectType
211 |         } project following Diataxis framework principles.
212 | 
213 | **Project Context:**
214 | - Type: ${context.projectType}
215 | - Languages: ${context.languages.join(", ")}
216 | - Frameworks: ${context.frameworks.join(", ")}
217 | - Problem to Solve: ${problemToSolve}
218 | - User Experience Level: ${userExperience}
219 | 
220 | **Diataxis How-to Guide Requirements:**
221 | 1. Problem-oriented: Address specific real-world problems
222 | 2. Goal-focused: Clear objective and success criteria
223 | 3. Action-oriented: Direct, actionable steps
224 | 4. Assume prior knowledge appropriate to user level
225 | 5. Practical and immediately applicable
226 | 
227 | **How-to Guide Structure:**
228 | 1. Problem statement and context
229 | 2. Prerequisites and assumptions
230 | 3. Step-by-step solution
231 | 4. Verification and testing
232 | 5. Troubleshooting common issues
233 | 6. Related tasks and variations
234 | 
235 | **Integration Hints:**
236 | - Use analyze_codebase for understanding current implementation
237 | - Reference best_practices for recommended approaches
238 | - Consider validate_solution for testing guidance
239 | 
240 | Please create a how-to guide that solves real problems:`,
241 |       },
242 |     },
243 |   ];
244 | }
245 | 
246 | function generateReferenceWriterPrompt(
247 |   context: ProjectContext,
248 |   args: Record<string, any>,
249 | ): PromptMessage[] {
250 |   const referenceType = args.reference_type || "API";
251 |   const completeness = args.completeness || "comprehensive";
252 | 
253 |   return [
254 |     {
255 |       role: "user",
256 |       content: {
257 |         type: "text",
258 |         text: `Create comprehensive reference documentation for a ${
259 |           context.projectType
260 |         } project following Diataxis framework principles.
261 | 
262 | **Project Context:**
263 | - Type: ${context.projectType}
264 | - Languages: ${context.languages.join(", ")}
265 | - Frameworks: ${context.frameworks.join(", ")}
266 | - Reference Type: ${referenceType}
267 | - Completeness Level: ${completeness}
268 | 
269 | **Diataxis Reference Requirements:**
270 | 1. Information-oriented: Provide complete, accurate information
271 | 2. Structured and consistent organization
272 | 3. Comprehensive coverage of all features/APIs
273 | 4. Neutral tone - describe what is, not how to use
274 | 5. Easy to scan and search
275 | 
276 | **Reference Structure:**
277 | 1. Overview and organization
278 | 2. Complete feature/API listings
279 | 3. Parameters, return values, examples
280 | 4. Technical specifications
281 | 5. Cross-references and relationships
282 | 6. Version compatibility information
283 | 
284 | **Integration Hints:**
285 | - Use analyze_api_endpoints for API documentation
286 | - Reference code_analysis for implementation details
287 | - Consider validate_completeness for coverage verification
288 | 
289 | Please create reference documentation that serves as the authoritative source:`,
290 |       },
291 |     },
292 |   ];
293 | }
294 | 
295 | function generateExplanationWriterPrompt(
296 |   context: ProjectContext,
297 |   args: Record<string, any>,
298 | ): PromptMessage[] {
299 |   const conceptToExplain = args.concept || "system architecture";
300 |   const depth = args.depth || "detailed";
301 | 
302 |   return [
303 |     {
304 |       role: "user",
305 |       content: {
306 |         type: "text",
307 |         text: `Create in-depth explanation documentation for a ${
308 |           context.projectType
309 |         } project following Diataxis framework principles.
310 | 
311 | **Project Context:**
312 | - Type: ${context.projectType}
313 | - Languages: ${context.languages.join(", ")}
314 | - Frameworks: ${context.frameworks.join(", ")}
315 | - Concept to Explain: ${conceptToExplain}
316 | - Depth Level: ${depth}
317 | 
318 | **Diataxis Explanation Requirements:**
319 | 1. Understanding-oriented: Help users understand concepts
320 | 2. Theoretical and conceptual focus
321 | 3. Provide context and background
322 | 4. Explain why things work the way they do
323 | 5. Connect ideas and show relationships
324 | 
325 | **Explanation Structure:**
326 | 1. Introduction and context
327 | 2. Core concepts and principles
328 | 3. How components relate and interact
329 | 4. Design decisions and trade-offs
330 | 5. Historical context and evolution
331 | 6. Implications and consequences
332 | 
333 | **Integration Hints:**
334 | - Use analyze_architecture for system understanding
335 | - Reference design_patterns for architectural insights
336 | - Consider validate_understanding for comprehension checks
337 | 
338 | Please create explanatory content that builds deep understanding:`,
339 |       },
340 |     },
341 |   ];
342 | }
343 | 
344 | function generateDiataxisOrganizerPrompt(
345 |   context: ProjectContext,
346 |   args: Record<string, any>,
347 | ): PromptMessage[] {
348 |   const currentDocs = args.current_docs || "mixed documentation";
349 |   const priority = args.priority || "user needs";
350 | 
351 |   return [
352 |     {
353 |       role: "user",
354 |       content: {
355 |         type: "text",
356 |         text: `Organize existing documentation for a ${
357 |           context.projectType
358 |         } project using Diataxis framework principles.
359 | 
360 | **Project Context:**
361 | - Type: ${context.projectType}
362 | - Languages: ${context.languages.join(", ")}
363 | - Current Documentation: ${currentDocs}
364 | - Organization Priority: ${priority}
365 | 
366 | **Diataxis Organization Requirements:**
367 | 1. Categorize content into four types: Tutorials, How-to guides, Reference, Explanation
368 | 2. Ensure each piece serves its intended purpose
369 | 3. Create clear navigation between content types
370 | 4. Identify gaps and overlaps
371 | 5. Establish content relationships and cross-references
372 | 
373 | **Organization Structure:**
374 | 1. Content audit and classification
375 | 2. Diataxis quadrant mapping
376 | 3. Navigation and information architecture
377 | 4. Content gap analysis
378 | 5. Cross-reference strategy
379 | 6. Migration and improvement plan
380 | 
381 | **Integration Hints:**
382 | - Use analyze_existing_docs for current state assessment
383 | - Reference content_classification for categorization
384 | - Consider validate_organization for structure verification
385 | 
386 | Please organize documentation according to Diataxis principles:`,
387 |       },
388 |     },
389 |   ];
390 | }
391 | 
392 | function generateReadmeOptimizerPrompt(
393 |   context: ProjectContext,
394 |   args: Record<string, any>,
395 | ): PromptMessage[] {
396 |   const optimizationFocus = args.optimization_focus || "general";
397 | 
398 |   return [
399 |     {
400 |       role: "user",
401 |       content: {
402 |         type: "text",
403 |         text: `Optimize existing README content for a ${
404 |           context.projectType
405 |         } project using Diataxis-aware principles.
406 | 
407 | **Project Context:**
408 | - Type: ${context.projectType}
409 | - Languages: ${context.languages.join(", ")}
410 | - README Exists: ${context.readmeExists}
411 | - Documentation Gaps: ${
412 |           context.documentationGaps.join(", ") || "None identified"
413 |         }
414 | - Optimization Focus: ${optimizationFocus}
415 | 
416 | **Diataxis-Aware README Requirements:**
417 | 1. Clear content type identification (tutorial, how-to, reference, explanation)
418 | 2. Appropriate depth for each content type
419 | 3. Logical flow from learning to doing to understanding
420 | 4. Clear navigation to detailed documentation
421 | 5. Audience-appropriate entry points
422 | 
423 | **README Structure (Diataxis-organized):**
424 | 1. Quick start (tutorial-style for beginners)
425 | 2. Common tasks (how-to style for users)
426 | 3. API/feature overview (reference-style for developers)
427 | 4. Architecture overview (explanation-style for understanding)
428 | 5. Links to detailed Diataxis-organized documentation
429 | 
430 | **Integration Hints:**
431 | - Use analyze_readme for current content analysis
432 | - Reference diataxis_principles for content organization
433 | - Consider validate_readme_structure for optimization verification
434 | 
435 | Please optimize the README with Diataxis awareness:`,
436 |       },
437 |     },
438 |   ];
439 | }
440 | 
441 | // Helper functions
442 | async function fileExists(path: string): Promise<boolean> {
443 |   try {
444 |     await fs.access(path);
445 |     return true;
446 |   } catch {
447 |     return false;
448 |   }
449 | }
450 | 
451 | function generateMaintainDocumentationFreshnessPrompt(
452 |   context: ProjectContext,
453 |   args: Record<string, any>,
454 | ): PromptMessage[] {
455 |   const docsPath = args.docs_path || "docs";
456 |   const freshnessPreset = args.freshness_preset || "monthly";
457 |   const action = args.action || "track";
458 | 
459 |   const actionDescriptions = {
460 |     validate:
461 |       "Initialize freshness metadata for documentation files that don't have it yet",
462 |     track:
463 |       "Scan all documentation for staleness and generate a freshness report",
464 |     insights: "Analyze freshness trends over time and get recommendations",
465 |   };
466 | 
467 |   return [
468 |     {
469 |       role: "user",
470 |       content: {
471 |         type: "text",
472 |         text: `Maintain documentation freshness for a ${
473 |           context.projectType
474 |         } project with automated staleness tracking.
475 | 
476 | **Project Context:**
477 | - Type: ${context.projectType}
478 | - Languages: ${context.languages.join(", ")}
479 | - Documentation Path: ${docsPath}
480 | - Freshness Preset: ${freshnessPreset}
481 | - Action: ${action} (${
482 |           actionDescriptions[action as keyof typeof actionDescriptions] ||
483 |           "track staleness"
484 |         })
485 | 
486 | **Documentation Freshness Tracking:**
487 | Documentation freshness tracking helps maintain high-quality, up-to-date documentation by:
488 | 1. Adding temporal metadata to markdown frontmatter (last_updated, last_validated)
489 | 2. Scanning documentation for staleness based on configurable thresholds
490 | 3. Providing insights and trends over time using the knowledge graph
491 | 4. Generating recommendations for which files need attention
492 | 
493 | **Available Actions:**
494 | 
495 | 1. **Validate** (${action === "validate" ? "SELECTED" : "available"}):
496 |    - Initialize freshness metadata for files without it
497 |    - Set last_updated and last_validated timestamps
498 |    - Link validation to git commits for traceability
499 |    - Recommended as first step for new documentation sets
500 | 
501 | 2. **Track** (${action === "track" ? "SELECTED" : "available"}):
502 |    - Scan all documentation files for staleness
503 |    - Categorize as: fresh, warning, stale, or critical
504 |    - Generate comprehensive freshness report
505 |    - Store results in knowledge graph for historical tracking
506 | 
507 | 3. **Insights** (${action === "insights" ? "SELECTED" : "available"}):
508 |    - Analyze freshness trends over time
509 |    - Compare current vs. historical freshness scores
510 |    - Identify chronically stale files
511 |    - Get actionable recommendations
512 | 
513 | **Freshness Presets:**
514 | - realtime: Minutes (for API docs, status pages)
515 | - active: Hours (for development docs, release notes)
516 | - recent: Days (for tutorials, getting started)
517 | - weekly: 7 days (for how-to guides, examples)
518 | - monthly: 30 days (for reference, architecture) - DEFAULT
519 | - quarterly: 90 days (for explanations, background)
520 | 
521 | **Integration Tools:**
522 | - validate_documentation_freshness: Initialize and update metadata
523 | - track_documentation_freshness: Scan and report staleness
524 | - update_existing_documentation: Sync docs with code changes
525 | - sync_code_to_docs: Detect drift between code and docs
526 | 
527 | **Workflow Example:**
528 | 1. First time: Run validate_documentation_freshness to initialize metadata
529 | 2. Regular checks: Run track_documentation_freshness to monitor staleness
530 | 3. Deep analysis: Query knowledge graph for trends and insights
531 | 4. Maintenance: Update stale files and re-validate
532 | 
533 | Please ${
534 |           actionDescriptions[action as keyof typeof actionDescriptions] ||
535 |           "track documentation freshness"
536 |         } and provide guidance on maintaining documentation quality:`,
537 |       },
538 |     },
539 |   ];
540 | }
541 | 
542 | async function hasTestFiles(projectPath: string): Promise<boolean> {
543 |   try {
544 |     const files = await fs.readdir(projectPath, { recursive: true });
545 |     return files.some(
546 |       (file) =>
547 |         typeof file === "string" &&
548 |         (file.includes("test") ||
549 |           file.includes("spec") ||
550 |           file.endsWith(".test.js") ||
551 |           file.endsWith(".test.ts") ||
552 |           file.endsWith(".spec.js") ||
553 |           file.endsWith(".spec.ts")),
554 |     );
555 |   } catch {
556 |     return false;
557 |   }
558 | }
559 | 
560 | async function hasCIConfig(projectPath: string): Promise<boolean> {
561 |   const ciFiles = [
562 |     ".github/workflows",
563 |     ".gitlab-ci.yml",
564 |     "circle.yml",
565 |     ".circleci/config.yml",
566 |     "travis.yml",
567 |     ".travis.yml",
568 |   ];
569 | 
570 |   for (const file of ciFiles) {
571 |     if (await fileExists(join(projectPath, file))) {
572 |       return true;
573 |     }
574 |   }
575 |   return false;
576 | }
577 | 
578 | async function identifyDocumentationGaps(
579 |   projectPath: string,
580 |   context: ProjectContext,
581 | ): Promise<string[]> {
582 |   const gaps: string[] = [];
583 | 
584 |   if (!context.readmeExists) {
585 |     gaps.push("readme");
586 |   }
587 | 
588 |   // Check for common documentation files
589 |   const docFiles = [
590 |     "CONTRIBUTING.md",
591 |     "CHANGELOG.md",
592 |     "LICENSE",
593 |     "docs/api.md",
594 |     "docs/tutorial.md",
595 |     "docs/installation.md",
596 |   ];
597 | 
598 |   for (const docFile of docFiles) {
599 |     if (!(await fileExists(join(projectPath, docFile)))) {
600 |       gaps.push(docFile.toLowerCase().replace(".md", "").replace("docs/", ""));
601 |     }
602 |   }
603 | 
604 |   return gaps;
605 | }
606 | 
607 | // Guided workflow prompt generators (ADR-007)
608 | 
609 | function generateAnalyzeAndRecommendPrompt(
610 |   context: ProjectContext,
611 |   args: Record<string, any>,
612 | ): PromptMessage[] {
613 |   const analysisDepth = args.analysis_depth || "standard";
614 |   const preferences =
615 |     args.preferences || "balanced approach with good community support";
616 | 
617 |   return [
618 |     {
619 |       role: "user",
620 |       content: {
621 |         type: "text",
622 |         text: `Execute a complete repository analysis and SSG recommendation workflow for this project.
623 | 
624 | **Project Context:**
625 | - Type: ${context.projectType}
626 | - Languages: ${context.languages.join(", ")}
627 | - Frameworks: ${context.frameworks.join(", ")}
628 | - Package Manager: ${context.packageManager || "N/A"}
629 | - Has Tests: ${context.hasTests}
630 | - Has CI: ${context.hasCI}
631 | - Documentation Gaps: ${context.documentationGaps.join(", ")}
632 | 
633 | **Workflow Parameters:**
634 | - Analysis Depth: ${analysisDepth}
635 | - Preferences: ${preferences}
636 | 
637 | **Expected Workflow:**
638 | 1. **Repository Analysis**: Analyze project structure, dependencies, and complexity
639 | 2. **SSG Recommendation**: Recommend the best static site generator based on project characteristics
640 | 3. **Implementation Guidance**: Provide step-by-step setup instructions
641 | 4. **Best Practices**: Include security, performance, and maintenance recommendations
642 | 
643 | **Required Output Format:**
644 | - Executive summary with key findings
645 | - Detailed analysis results with metrics
646 | - SSG recommendation with justification
647 | - Implementation roadmap with priorities
648 | - Resource requirements and timeline estimates
649 | 
650 | Please execute this workflow systematically and provide actionable recommendations.`,
651 |       },
652 |     },
653 |   ];
654 | }
655 | 
656 | function generateSetupDocumentationPrompt(
657 |   context: ProjectContext,
658 |   args: Record<string, any>,
659 | ): PromptMessage[] {
660 |   const ssgType = args.ssg_type || "recommended based on project analysis";
661 |   const includeExamples = args.include_examples !== false;
662 | 
663 |   return [
664 |     {
665 |       role: "user",
666 |       content: {
667 |         type: "text",
668 |         text: `Create a comprehensive documentation structure with best practices for this project.
669 | 
670 | **Project Context:**
671 | - Type: ${context.projectType}
672 | - Languages: ${context.languages.join(", ")}
673 | - Frameworks: ${context.frameworks.join(", ")}
674 | - Current Documentation Gaps: ${context.documentationGaps.join(", ")}
675 | 
676 | **Setup Parameters:**
677 | - SSG Type: ${ssgType}
678 | - Include Examples: ${includeExamples}
679 | 
680 | **Documentation Structure Requirements:**
681 | 1. **Diataxis Framework Implementation**:
682 |    - Tutorials: Learning-oriented content
683 |    - How-to Guides: Problem-solving content
684 |    - Reference: Information-oriented content
685 |    - Explanations: Understanding-oriented content
686 | 
687 | 2. **Configuration Setup**:
688 |    - SSG configuration files
689 |    - GitHub Pages deployment
690 |    - Automated workflows
691 |    - Security best practices
692 | 
693 | 3. **Content Guidelines**:
694 |    - Writing style guide
695 |    - Contribution guidelines
696 |    - Review processes
697 |    - Maintenance procedures
698 | 
699 | 4. **Development Integration**:
700 |    - Build pipeline integration
701 |    - Automated testing for docs
702 |    - Performance monitoring
703 |    - Analytics setup
704 | 
705 | **Required Deliverables:**
706 | - Complete directory structure
707 | - Configuration files with comments
708 | - Sample content ${includeExamples ? "with examples" : "templates"}
709 | - Deployment automation
710 | - Maintenance runbook
711 | 
712 | Please create a production-ready documentation system that scales with the project.`,
713 |       },
714 |     },
715 |   ];
716 | }
717 | 
718 | function generateTroubleshootDeploymentPrompt(
719 |   context: ProjectContext,
720 |   args: Record<string, any>,
721 | ): PromptMessage[] {
722 |   const repository = args.repository;
723 |   const deploymentUrl = args.deployment_url || "GitHub Pages URL";
724 |   const issueDescription =
725 |     args.issue_description || "deployment not working as expected";
726 | 
727 |   return [
728 |     {
729 |       role: "user",
730 |       content: {
731 |         type: "text",
732 |         text: `Diagnose and fix GitHub Pages deployment issues for this documentation project.
733 | 
734 | **Repository Information:**
735 | - Repository: ${repository}
736 | - Expected URL: ${deploymentUrl}
737 | - Issue Description: ${issueDescription}
738 | 
739 | **Project Context:**
740 | - Type: ${context.projectType}
741 | - Languages: ${context.languages.join(", ")}
742 | - Has CI: ${context.hasCI}
743 | 
744 | **Troubleshooting Checklist:**
745 | 
746 | 1. **Repository Settings**:
747 |    - GitHub Pages source configuration
748 |    - Branch and folder settings
749 |    - Custom domain setup (if applicable)
750 |    - Repository visibility and permissions
751 | 
752 | 2. **Build Configuration**:
753 |    - GitHub Actions workflow validation
754 |    - Build dependencies and versions
755 |    - Output directory configuration
756 |    - Asset and link path issues
757 | 
758 | 3. **Content Issues**:
759 |    - Markdown syntax validation
760 |    - Link and image path verification
761 |    - YAML frontmatter validation
762 |    - Special character handling
763 | 
764 | 4. **Deployment Workflow**:
765 |    - Action permissions and secrets
766 |    - Deployment job configuration
767 |    - Artifact handling
768 |    - Cache and dependency issues
769 | 
770 | 5. **Performance and Security**:
771 |    - Build time optimization
772 |    - Security policy compliance
773 |    - CDN and caching configuration
774 |    - SSL certificate validation
775 | 
776 | **Diagnostic Approach:**
777 | 1. **Immediate Assessment**: Check current status and error messages
778 | 2. **Systematic Testing**: Validate each component step-by-step
779 | 3. **Fix Implementation**: Apply targeted solutions with validation
780 | 4. **Prevention Setup**: Implement monitoring and automated checks
781 | 
782 | **Required Output:**
783 | - Root cause analysis
784 | - Step-by-step fix instructions
785 | - Validation procedures
786 | - Prevention recommendations
787 | - Monitoring setup guide
788 | 
789 | Please provide a comprehensive troubleshooting guide with specific, actionable solutions.`,
790 |       },
791 |     },
792 |   ];
793 | }
794 | 
```

--------------------------------------------------------------------------------
/src/utils/language-parsers-simple.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { CodeElement, APIEndpoint } from "./code-scanner.js";
  2 | import { spawn } from "child_process";
  3 | 
  4 | export interface LanguageParser {
  5 |   extensions: string[];
  6 |   name: string;
  7 |   parseFile(content: string, filePath: string): Promise<LanguageParseResult>;
  8 |   supportsApiEndpoints?: boolean;
  9 |   supportsFrameworkDetection?: boolean;
 10 | }
 11 | 
 12 | export interface LanguageParseResult {
 13 |   functions: CodeElement[];
 14 |   classes: CodeElement[];
 15 |   interfaces: CodeElement[];
 16 |   types: CodeElement[];
 17 |   enums: CodeElement[];
 18 |   exports: CodeElement[];
 19 |   imports: CodeElement[];
 20 |   apiEndpoints: APIEndpoint[];
 21 |   constants: CodeElement[];
 22 |   variables: CodeElement[];
 23 | }
 24 | 
 25 | export class MultiLanguageCodeScanner {
 26 |   private parsers = new Map<string, LanguageParser>();
 27 | 
 28 |   constructor() {
 29 |     this.initializeParsers();
 30 |   }
 31 | 
 32 |   private initializeParsers() {
 33 |     // Register parsers based on your tech stack
 34 |     this.registerParser(new PythonParser());
 35 |     this.registerParser(new GoParser());
 36 |     this.registerParser(new YamlParser());
 37 |     this.registerParser(new BashParser());
 38 |   }
 39 | 
 40 |   private registerParser(parser: LanguageParser) {
 41 |     for (const extension of parser.extensions) {
 42 |       this.parsers.set(extension, parser);
 43 |     }
 44 |   }
 45 | 
 46 |   async parseFile(
 47 |     content: string,
 48 |     filePath: string,
 49 |   ): Promise<LanguageParseResult> {
 50 |     const extension = this.getFileExtension(filePath);
 51 |     const parser = this.parsers.get(extension);
 52 | 
 53 |     if (parser) {
 54 |       return await parser.parseFile(content, filePath);
 55 |     }
 56 | 
 57 |     // Return empty result for unsupported files
 58 |     return this.getEmptyResult();
 59 |   }
 60 | 
 61 |   private getFileExtension(filePath: string): string {
 62 |     return filePath.split(".").pop()?.toLowerCase() || "";
 63 |   }
 64 | 
 65 |   private getEmptyResult(): LanguageParseResult {
 66 |     return {
 67 |       functions: [],
 68 |       classes: [],
 69 |       interfaces: [],
 70 |       types: [],
 71 |       enums: [],
 72 |       exports: [],
 73 |       imports: [],
 74 |       apiEndpoints: [],
 75 |       constants: [],
 76 |       variables: [],
 77 |     };
 78 |   }
 79 | 
 80 |   getSupportedExtensions(): string[] {
 81 |     return Array.from(this.parsers.keys());
 82 |   }
 83 | 
 84 |   getParserInfo(): { extension: string; parser: string }[] {
 85 |     return Array.from(this.parsers.entries()).map(([ext, parser]) => ({
 86 |       extension: ext,
 87 |       parser: parser.name,
 88 |     }));
 89 |   }
 90 | }
 91 | 
 92 | // Python Parser Implementation using subprocess + regex fallback
 93 | export class PythonParser implements LanguageParser {
 94 |   extensions = ["py", "pyi", "pyx", "pxd"];
 95 |   name = "Python";
 96 |   supportsApiEndpoints = true;
 97 |   supportsFrameworkDetection = true;
 98 | 
 99 |   async parseFile(
100 |     content: string,
101 |     filePath: string,
102 |   ): Promise<LanguageParseResult> {
103 |     const result: LanguageParseResult = {
104 |       functions: [],
105 |       classes: [],
106 |       interfaces: [],
107 |       types: [],
108 |       enums: [],
109 |       exports: [],
110 |       imports: [],
111 |       apiEndpoints: [],
112 |       constants: [],
113 |       variables: [],
114 |     };
115 | 
116 |     try {
117 |       // Try subprocess-based AST parsing first
118 |       const astResult = await this.parseWithPythonAST(content, filePath);
119 |       if (astResult) {
120 |         this.mergePythonASTResults(astResult, result, filePath);
121 |       } else {
122 |         // Fall back to regex-based parsing
123 |         this.parseWithRegex(content, result, filePath);
124 |       }
125 | 
126 |       // Look for Flask/FastAPI/Django endpoints
127 |       this.findPythonApiEndpoints(content, result, filePath);
128 |     } catch (error) {
129 |       console.warn(`Failed to parse Python file ${filePath}:`, error);
130 |       // Fall back to regex-based parsing
131 |       this.parseWithRegex(content, result, filePath);
132 |     }
133 | 
134 |     return result;
135 |   }
136 | 
137 |   private async parseWithPythonAST(
138 |     content: string,
139 |     _filePath: string,
140 |   ): Promise<any> {
141 |     return new Promise((resolve) => {
142 |       // Create a Python script to parse the AST
143 |       const pythonScript = `
144 | import ast
145 | import sys
146 | import json
147 | import tempfile
148 | import os
149 | 
150 | try:
151 |     # Read content from stdin
152 |     content = sys.stdin.read()
153 | 
154 |     tree = ast.parse(content)
155 | 
156 |     result = {
157 |         'functions': [],
158 |         'classes': [],
159 |         'imports': [],
160 |         'constants': [],
161 |         'variables': []
162 |     }
163 | 
164 |     for node in ast.walk(tree):
165 |         if isinstance(node, ast.FunctionDef):
166 |             result['functions'].append({
167 |                 'name': node.name,
168 |                 'line': node.lineno,
169 |                 'has_docstring': ast.get_docstring(node) is not None,
170 |                 'docstring': ast.get_docstring(node),
171 |                 'is_async': False,
172 |                 'exported': not node.name.startswith('_')
173 |             })
174 |         elif isinstance(node, ast.AsyncFunctionDef):
175 |             result['functions'].append({
176 |                 'name': node.name,
177 |                 'line': node.lineno,
178 |                 'has_docstring': ast.get_docstring(node) is not None,
179 |                 'docstring': ast.get_docstring(node),
180 |                 'is_async': True,
181 |                 'exported': not node.name.startswith('_')
182 |             })
183 |         elif isinstance(node, ast.ClassDef):
184 |             result['classes'].append({
185 |                 'name': node.name,
186 |                 'line': node.lineno,
187 |                 'has_docstring': ast.get_docstring(node) is not None,
188 |                 'docstring': ast.get_docstring(node),
189 |                 'exported': not node.name.startswith('_')
190 |             })
191 |         elif isinstance(node, (ast.Import, ast.ImportFrom)):
192 |             if isinstance(node, ast.Import):
193 |                 for alias in node.names:
194 |                     result['imports'].append({
195 |                         'name': alias.name,
196 |                         'line': node.lineno
197 |                     })
198 |             else:  # ImportFrom
199 |                 result['imports'].append({
200 |                     'name': node.module or 'relative',
201 |                     'line': node.lineno
202 |                 })
203 |         elif isinstance(node, ast.Assign):
204 |             for target in node.targets:
205 |                 if isinstance(target, ast.Name):
206 |                     is_constant = target.id.isupper()
207 |                     result['constants' if is_constant else 'variables'].append({
208 |                         'name': target.id,
209 |                         'line': node.lineno,
210 |                         'exported': not target.id.startswith('_')
211 |                     })
212 | 
213 |     print(json.dumps(result))
214 | except Exception as e:
215 |     print(json.dumps({'error': str(e)}), file=sys.stderr)
216 | `;
217 | 
218 |       // Try to execute Python AST parsing
219 |       const process = spawn("python3", ["-c", pythonScript], {
220 |         stdio: ["pipe", "pipe", "pipe"],
221 |       });
222 | 
223 |       // Send content via stdin
224 |       process.stdin.write(content);
225 |       process.stdin.end();
226 | 
227 |       let output = "";
228 |       let errorOutput = "";
229 | 
230 |       process.stdout.on("data", (data) => {
231 |         output += data.toString();
232 |       });
233 | 
234 |       process.stderr.on("data", (data) => {
235 |         errorOutput += data.toString();
236 |       });
237 | 
238 |       process.on("close", (code) => {
239 |         if (code === 0 && output.trim()) {
240 |           try {
241 |             const result = JSON.parse(output.trim());
242 |             if (!result.error) {
243 |               resolve(result);
244 |               return;
245 |             }
246 |           } catch (e) {
247 |             // JSON parsing failed
248 |             console.warn("Failed to parse Python AST output:", e);
249 |           }
250 |         }
251 |         if (errorOutput) {
252 |           console.warn("Python AST parsing errors:", errorOutput);
253 |         }
254 |         resolve(null); // Fall back to regex parsing
255 |       });
256 | 
257 |       process.on("error", () => {
258 |         resolve(null); // Python not available or failed
259 |       });
260 | 
261 |       // Timeout after 5 seconds
262 |       setTimeout(() => {
263 |         process.kill();
264 |         resolve(null);
265 |       }, 5000);
266 |     });
267 |   }
268 | 
269 |   private mergePythonASTResults(
270 |     astResult: any,
271 |     result: LanguageParseResult,
272 |     filePath: string,
273 |   ): void {
274 |     astResult.functions?.forEach((func: any) => {
275 |       result.functions.push({
276 |         name: func.name,
277 |         type: "function",
278 |         filePath,
279 |         line: func.line,
280 |         column: 0,
281 |         exported: func.exported,
282 |         isAsync: func.is_async,
283 |         hasJSDoc: func.has_docstring,
284 |         jsDocDescription: func.docstring || undefined,
285 |       });
286 |     });
287 | 
288 |     astResult.classes?.forEach((cls: any) => {
289 |       result.classes.push({
290 |         name: cls.name,
291 |         type: "class",
292 |         filePath,
293 |         line: cls.line,
294 |         column: 0,
295 |         exported: cls.exported,
296 |         hasJSDoc: cls.has_docstring,
297 |         jsDocDescription: cls.docstring || undefined,
298 |       });
299 |     });
300 | 
301 |     astResult.imports?.forEach((imp: any) => {
302 |       result.imports.push({
303 |         name: imp.name,
304 |         type: "import",
305 |         filePath,
306 |         line: imp.line,
307 |         column: 0,
308 |         exported: false,
309 |       });
310 |     });
311 | 
312 |     astResult.constants?.forEach((constant: any) => {
313 |       result.constants.push({
314 |         name: constant.name,
315 |         type: "variable",
316 |         filePath,
317 |         line: constant.line,
318 |         column: 0,
319 |         exported: constant.exported,
320 |         hasJSDoc: false,
321 |       });
322 |     });
323 | 
324 |     astResult.variables?.forEach((variable: any) => {
325 |       result.variables.push({
326 |         name: variable.name,
327 |         type: "variable",
328 |         filePath,
329 |         line: variable.line,
330 |         column: 0,
331 |         exported: variable.exported,
332 |         hasJSDoc: false,
333 |       });
334 |     });
335 |   }
336 | 
337 |   private parseWithRegex(
338 |     content: string,
339 |     result: LanguageParseResult,
340 |     filePath: string,
341 |   ): void {
342 |     const lines = content.split("\n");
343 | 
344 |     lines.forEach((line, index) => {
345 |       const lineNum = index + 1;
346 | 
347 |       // Function definitions
348 |       const funcMatch = line.match(
349 |         /^\s*(async\s+)?def\s+([a-zA-Z_][a-zA-Z0-9_]*)\s*\(/,
350 |       );
351 |       if (funcMatch) {
352 |         const isAsync = !!funcMatch[1];
353 |         const funcName = funcMatch[2];
354 |         const hasDocstring = this.hasDocstringAfterLine(lines, index);
355 | 
356 |         result.functions.push({
357 |           name: funcName,
358 |           type: "function",
359 |           filePath,
360 |           line: lineNum,
361 |           column: 0,
362 |           exported: !funcName.startsWith("_"),
363 |           isAsync,
364 |           hasJSDoc: hasDocstring,
365 |         });
366 |       }
367 | 
368 |       // Class definitions
369 |       const classMatch = line.match(/^\s*class\s+([a-zA-Z_][a-zA-Z0-9_]*)/);
370 |       if (classMatch) {
371 |         const className = classMatch[1];
372 |         const hasDocstring = this.hasDocstringAfterLine(lines, index);
373 | 
374 |         result.classes.push({
375 |           name: className,
376 |           type: "class",
377 |           filePath,
378 |           line: lineNum,
379 |           column: 0,
380 |           exported: !className.startsWith("_"),
381 |           hasJSDoc: hasDocstring,
382 |         });
383 |       }
384 | 
385 |       // Import statements
386 |       const importMatch = line.match(
387 |         /^\s*(?:from\s+([^\s]+)\s+)?import\s+(.+)/,
388 |       );
389 |       if (importMatch) {
390 |         const module = importMatch[1] || importMatch[2].split(",")[0].trim();
391 |         result.imports.push({
392 |           name: module,
393 |           type: "import",
394 |           filePath,
395 |           line: lineNum,
396 |           column: 0,
397 |           exported: false,
398 |         });
399 |       }
400 | 
401 |       // Constants and variables
402 |       const assignMatch = line.match(/^\s*([A-Z_][A-Z0-9_]*)\s*=/);
403 |       if (assignMatch) {
404 |         result.constants.push({
405 |           name: assignMatch[1],
406 |           type: "variable",
407 |           filePath,
408 |           line: lineNum,
409 |           column: 0,
410 |           exported: true,
411 |           hasJSDoc: false,
412 |         });
413 |       }
414 |     });
415 |   }
416 | 
417 |   private hasDocstringAfterLine(lines: string[], lineIndex: number): boolean {
418 |     // Check if next few lines contain a docstring
419 |     for (
420 |       let i = lineIndex + 1;
421 |       i < Math.min(lineIndex + 3, lines.length);
422 |       i++
423 |     ) {
424 |       const line = lines[i].trim();
425 |       if (line.startsWith('"""') || line.startsWith("'''")) {
426 |         return true;
427 |       }
428 |     }
429 |     return false;
430 |   }
431 | 
432 |   private findPythonApiEndpoints(
433 |     content: string,
434 |     result: LanguageParseResult,
435 |     filePath: string,
436 |   ) {
437 |     // Flask patterns
438 |     const flaskPatterns = [
439 |       /@app\.(route|get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
440 |       /@bp\.(route|get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
441 |     ];
442 | 
443 |     // FastAPI patterns
444 |     const fastApiPatterns = [
445 |       /@app\.(get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
446 |       /router\.(get|post|put|delete|patch)\s*\(\s*['"]([^'"]+)['"]/g,
447 |     ];
448 | 
449 |     // Django patterns
450 |     const djangoPatterns = [
451 |       /path\s*\(\s*['"]([^'"]+)['"]/g,
452 |       /url\s*\(\s*r?['"]([^'"]+)['"]/g,
453 |     ];
454 | 
455 |     const allPatterns = [
456 |       ...flaskPatterns,
457 |       ...fastApiPatterns,
458 |       ...djangoPatterns,
459 |     ];
460 | 
461 |     allPatterns.forEach((pattern) => {
462 |       let match;
463 |       while ((match = pattern.exec(content)) !== null) {
464 |         const method =
465 |           match[1] === "route"
466 |             ? "ALL"
467 |             : (match[1].toUpperCase() as APIEndpoint["method"]);
468 |         const path = match[2] || match[1]; // Handle different capture groups
469 | 
470 |         // Find line number
471 |         const beforeMatch = content.substring(0, match.index!);
472 |         const line = beforeMatch.split("\n").length;
473 | 
474 |         result.apiEndpoints.push({
475 |           method,
476 |           path,
477 |           filePath,
478 |           line,
479 |           hasDocumentation: this.hasEndpointDocumentation(
480 |             content,
481 |             match.index!,
482 |           ),
483 |         });
484 |       }
485 |     });
486 |   }
487 | 
488 |   private hasEndpointDocumentation(
489 |     content: string,
490 |     matchIndex: number,
491 |   ): boolean {
492 |     const beforeMatch = content.substring(0, matchIndex);
493 |     const lines = beforeMatch.split("\n");
494 | 
495 |     // Check last few lines for docstrings or comments
496 |     for (let i = Math.max(0, lines.length - 5); i < lines.length; i++) {
497 |       const line = lines[i].trim();
498 |       if (
499 |         line.startsWith('"""') ||
500 |         line.startsWith("'''") ||
501 |         line.startsWith("#")
502 |       ) {
503 |         return true;
504 |       }
505 |     }
506 |     return false;
507 |   }
508 | }
509 | 
510 | // Go Parser Implementation (regex-based)
511 | export class GoParser implements LanguageParser {
512 |   extensions = ["go"];
513 |   name = "Go";
514 |   supportsApiEndpoints = true;
515 | 
516 |   async parseFile(
517 |     content: string,
518 |     filePath: string,
519 |   ): Promise<LanguageParseResult> {
520 |     const result: LanguageParseResult = {
521 |       functions: [],
522 |       classes: [],
523 |       interfaces: [],
524 |       types: [],
525 |       enums: [],
526 |       exports: [],
527 |       imports: [],
528 |       apiEndpoints: [],
529 |       constants: [],
530 |       variables: [],
531 |     };
532 | 
533 |     const lines = content.split("\n");
534 | 
535 |     lines.forEach((line, index) => {
536 |       const lineNum = index + 1;
537 | 
538 |       // Function declarations
539 |       const funcMatch = line.match(
540 |         /^\s*func\s+(?:\([^)]*\)\s+)?([a-zA-Z_][a-zA-Z0-9_]*)\s*\(/,
541 |       );
542 |       if (funcMatch) {
543 |         const funcName = funcMatch[1];
544 |         result.functions.push({
545 |           name: funcName,
546 |           type: "function",
547 |           filePath,
548 |           line: lineNum,
549 |           column: 0,
550 |           exported: this.isGoExported(funcName),
551 |           hasJSDoc: this.hasGoDocComment(lines, index),
552 |         });
553 |       }
554 | 
555 |       // Type declarations (struct, interface, etc.)
556 |       const typeMatch = line.match(
557 |         /^\s*type\s+([a-zA-Z_][a-zA-Z0-9_]*)\s+(struct|interface)/,
558 |       );
559 |       if (typeMatch) {
560 |         const typeName = typeMatch[1];
561 |         const typeKind = typeMatch[2];
562 | 
563 |         if (typeKind === "struct") {
564 |           result.classes.push({
565 |             name: typeName,
566 |             type: "class",
567 |             filePath,
568 |             line: lineNum,
569 |             column: 0,
570 |             exported: this.isGoExported(typeName),
571 |             hasJSDoc: this.hasGoDocComment(lines, index),
572 |           });
573 |         } else if (typeKind === "interface") {
574 |           result.interfaces.push({
575 |             name: typeName,
576 |             type: "interface",
577 |             filePath,
578 |             line: lineNum,
579 |             column: 0,
580 |             exported: this.isGoExported(typeName),
581 |             hasJSDoc: this.hasGoDocComment(lines, index),
582 |           });
583 |         }
584 |       }
585 | 
586 |       // Import declarations
587 |       const importMatch = line.match(/^\s*(?:import\s*)?"([^"]+)"/);
588 |       if (importMatch) {
589 |         result.imports.push({
590 |           name: importMatch[1],
591 |           type: "import",
592 |           filePath,
593 |           line: lineNum,
594 |           column: 0,
595 |           exported: false,
596 |         });
597 |       }
598 | 
599 |       // Constants and variables
600 |       const constMatch = line.match(
601 |         /^\s*(const|var)\s+([a-zA-Z_][a-zA-Z0-9_]*)/,
602 |       );
603 |       if (constMatch) {
604 |         const declType = constMatch[1];
605 |         const varName = constMatch[2];
606 | 
607 |         const element: CodeElement = {
608 |           name: varName,
609 |           type: "variable",
610 |           filePath,
611 |           line: lineNum,
612 |           column: 0,
613 |           exported: this.isGoExported(varName),
614 |           hasJSDoc: this.hasGoDocComment(lines, index),
615 |         };
616 | 
617 |         if (declType === "const") {
618 |           result.constants.push(element);
619 |         } else {
620 |           result.variables.push(element);
621 |         }
622 |       }
623 |     });
624 | 
625 |     // Find Go API endpoints
626 |     this.findGoApiEndpoints(content, result, filePath);
627 | 
628 |     return result;
629 |   }
630 | 
631 |   private isGoExported(name: string): boolean {
632 |     // In Go, exported names start with uppercase letter
633 |     return name.length > 0 && name[0] === name[0].toUpperCase();
634 |   }
635 | 
636 |   private hasGoDocComment(lines: string[], lineIndex: number): boolean {
637 |     // Check if previous line has a doc comment
638 |     if (lineIndex > 0) {
639 |       const prevLine = lines[lineIndex - 1].trim();
640 |       return prevLine.startsWith("//");
641 |     }
642 |     return false;
643 |   }
644 | 
645 |   private findGoApiEndpoints(
646 |     content: string,
647 |     result: LanguageParseResult,
648 |     filePath: string,
649 |   ) {
650 |     // Common Go web framework patterns
651 |     const patterns = [
652 |       // Gin framework
653 |       /\.(GET|POST|PUT|DELETE|PATCH)\s*\(\s*"([^"]+)"/g,
654 |       // Echo framework
655 |       /\.(Get|Post|Put|Delete|Patch)\s*\(\s*"([^"]+)"/g,
656 |       // Gorilla mux
657 |       /\.HandleFunc\s*\(\s*"([^"]+)"/g,
658 |       // Standard library
659 |       /http\.HandleFunc\s*\(\s*"([^"]+)"/g,
660 |     ];
661 | 
662 |     patterns.forEach((pattern) => {
663 |       let match;
664 |       while ((match = pattern.exec(content)) !== null) {
665 |         let method: APIEndpoint["method"] = "ALL";
666 |         let path: string;
667 | 
668 |         if (match[1] && match[2]) {
669 |           method = match[1].toUpperCase() as APIEndpoint["method"];
670 |           path = match[2];
671 |         } else {
672 |           path = match[1] || match[2];
673 |         }
674 | 
675 |         const beforeMatch = content.substring(0, match.index!);
676 |         const line = beforeMatch.split("\n").length;
677 | 
678 |         result.apiEndpoints.push({
679 |           method,
680 |           path,
681 |           filePath,
682 |           line,
683 |           hasDocumentation: this.hasEndpointDocumentation(
684 |             content,
685 |             match.index!,
686 |           ),
687 |         });
688 |       }
689 |     });
690 |   }
691 | 
692 |   private hasEndpointDocumentation(
693 |     content: string,
694 |     matchIndex: number,
695 |   ): boolean {
696 |     const beforeMatch = content.substring(0, matchIndex);
697 |     const lines = beforeMatch.split("\n");
698 | 
699 |     for (let i = Math.max(0, lines.length - 5); i < lines.length; i++) {
700 |       const line = lines[i].trim();
701 |       if (line.startsWith("//") || line.startsWith("/*")) {
702 |         return true;
703 |       }
704 |     }
705 |     return false;
706 |   }
707 | }
708 | 
709 | // YAML Parser for Kubernetes, Terraform, etc.
710 | export class YamlParser implements LanguageParser {
711 |   extensions = ["yml", "yaml"];
712 |   name = "YAML";
713 |   supportsFrameworkDetection = true;
714 | 
715 |   async parseFile(
716 |     content: string,
717 |     filePath: string,
718 |   ): Promise<LanguageParseResult> {
719 |     const result: LanguageParseResult = {
720 |       functions: [],
721 |       classes: [],
722 |       interfaces: [],
723 |       types: [],
724 |       enums: [],
725 |       exports: [],
726 |       imports: [],
727 |       apiEndpoints: [],
728 |       constants: [],
729 |       variables: [],
730 |     };
731 | 
732 |     // YAML parsing focuses on identifying Kubernetes resources, Terraform configs, etc.
733 |     this.identifyKubernetesResources(content, result, filePath);
734 |     this.identifyDockerComposeServices(content, result, filePath);
735 |     this.identifyGitHubActions(content, result, filePath);
736 | 
737 |     return result;
738 |   }
739 | 
740 |   private identifyKubernetesResources(
741 |     content: string,
742 |     result: LanguageParseResult,
743 |     filePath: string,
744 |   ) {
745 |     const lines = content.split("\n");
746 |     let apiVersion = "";
747 |     let kind = "";
748 | 
749 |     lines.forEach((line, index) => {
750 |       const lineNum = index + 1;
751 | 
752 |       const apiMatch = line.match(/^\s*apiVersion:\s*(.+)/);
753 |       if (apiMatch) {
754 |         apiVersion = apiMatch[1].trim();
755 |       }
756 | 
757 |       const kindMatch = line.match(/^\s*kind:\s*(.+)/);
758 |       if (kindMatch) {
759 |         kind = kindMatch[1].trim();
760 | 
761 |         result.types.push({
762 |           name: `${kind} (${apiVersion})`,
763 |           type: "type",
764 |           filePath,
765 |           line: lineNum,
766 |           column: 0,
767 |           exported: true,
768 |           hasJSDoc: false,
769 |         });
770 |       }
771 |     });
772 |   }
773 | 
774 |   private identifyDockerComposeServices(
775 |     content: string,
776 |     result: LanguageParseResult,
777 |     filePath: string,
778 |   ) {
779 |     let inServicesSection = false;
780 | 
781 |     const lines = content.split("\n");
782 | 
783 |     lines.forEach((line, index) => {
784 |       if (line.trim() === "services:") {
785 |         inServicesSection = true;
786 |         return;
787 |       }
788 | 
789 |       if (inServicesSection && line.match(/^[a-zA-Z]/)) {
790 |         inServicesSection = false; // Left services section
791 |       }
792 | 
793 |       if (inServicesSection) {
794 |         const serviceMatch = line.match(/^\s+([a-zA-Z0-9_-]+):\s*$/);
795 |         if (serviceMatch) {
796 |           result.types.push({
797 |             name: `service: ${serviceMatch[1]}`,
798 |             type: "type",
799 |             filePath,
800 |             line: index + 1,
801 |             column: 0,
802 |             exported: true,
803 |             hasJSDoc: false,
804 |           });
805 |         }
806 |       }
807 |     });
808 |   }
809 | 
810 |   private identifyGitHubActions(
811 |     content: string,
812 |     result: LanguageParseResult,
813 |     filePath: string,
814 |   ) {
815 |     if (!filePath.includes(".github/workflows/")) return;
816 | 
817 |     const lines = content.split("\n");
818 |     let inJobsSection = false;
819 | 
820 |     lines.forEach((line, index) => {
821 |       if (line.trim() === "jobs:") {
822 |         inJobsSection = true;
823 |         return;
824 |       }
825 | 
826 |       if (inJobsSection && line.match(/^[a-zA-Z]/)) {
827 |         inJobsSection = false;
828 |       }
829 | 
830 |       if (inJobsSection) {
831 |         const jobMatch = line.match(/^\s+([a-zA-Z0-9_-]+):\s*$/);
832 |         if (jobMatch) {
833 |           result.functions.push({
834 |             name: `job: ${jobMatch[1]}`,
835 |             type: "function",
836 |             filePath,
837 |             line: index + 1,
838 |             column: 0,
839 |             exported: true,
840 |             hasJSDoc: false,
841 |           });
842 |         }
843 |       }
844 |     });
845 |   }
846 | }
847 | 
848 | // Bash Parser for DevOps scripts
849 | export class BashParser implements LanguageParser {
850 |   extensions = ["sh", "bash", "zsh"];
851 |   name = "Bash";
852 | 
853 |   async parseFile(
854 |     content: string,
855 |     filePath: string,
856 |   ): Promise<LanguageParseResult> {
857 |     const result: LanguageParseResult = {
858 |       functions: [],
859 |       classes: [],
860 |       interfaces: [],
861 |       types: [],
862 |       enums: [],
863 |       exports: [],
864 |       imports: [],
865 |       apiEndpoints: [],
866 |       constants: [],
867 |       variables: [],
868 |     };
869 | 
870 |     const lines = content.split("\n");
871 | 
872 |     lines.forEach((line, index) => {
873 |       const lineNum = index + 1;
874 | 
875 |       // Function definitions
876 |       const funcMatch = line.match(
877 |         /^\s*(?:function\s+)?([a-zA-Z_][a-zA-Z0-9_]*)\s*\(\)/,
878 |       );
879 |       if (funcMatch) {
880 |         const functionName = funcMatch[1];
881 | 
882 |         result.functions.push({
883 |           name: functionName,
884 |           type: "function",
885 |           filePath,
886 |           line: lineNum,
887 |           column: 0,
888 |           exported: true, // Bash functions are generally available in scope
889 |           hasJSDoc: this.hasBashDocComment(lines, index),
890 |         });
891 |       }
892 | 
893 |       // Variable assignments
894 |       const varMatch = line.match(/^\s*([A-Z_][A-Z0-9_]*)\s*=/);
895 |       if (varMatch) {
896 |         const varName = varMatch[1];
897 |         const isConstant = varName === varName.toUpperCase();
898 | 
899 |         const element: CodeElement = {
900 |           name: varName,
901 |           type: "variable",
902 |           filePath,
903 |           line: lineNum,
904 |           column: 0,
905 |           exported: true,
906 |           hasJSDoc: this.hasBashDocComment(lines, index),
907 |         };
908 | 
909 |         if (isConstant) {
910 |           result.constants.push(element);
911 |         } else {
912 |           result.variables.push(element);
913 |         }
914 |       }
915 |     });
916 | 
917 |     return result;
918 |   }
919 | 
920 |   private hasBashDocComment(lines: string[], lineIndex: number): boolean {
921 |     // Check if previous line has a comment
922 |     if (lineIndex > 0) {
923 |       const prevLine = lines[lineIndex - 1].trim();
924 |       return prevLine.startsWith("#");
925 |     }
926 |     return false;
927 |   }
928 | }
929 | 
```

--------------------------------------------------------------------------------
/src/memory/pruning.ts:
--------------------------------------------------------------------------------

```typescript
  1 | /**
  2 |  * Memory Pruning & Optimization System for DocuMCP
  3 |  * Intelligent memory cleanup, storage optimization, and performance tuning
  4 |  */
  5 | 
  6 | import { EventEmitter } from "events";
  7 | import { MemoryEntry, JSONLStorage } from "./storage.js";
  8 | import { MemoryManager } from "./manager.js";
  9 | import { IncrementalLearningSystem } from "./learning.js";
 10 | import { KnowledgeGraph } from "./knowledge-graph.js";
 11 | 
 12 | export interface PruningPolicy {
 13 |   maxAge: number; // Maximum age in days
 14 |   maxSize: number; // Maximum storage size in MB
 15 |   maxEntries: number; // Maximum number of entries
 16 |   preservePatterns: string[]; // Pattern types to preserve
 17 |   compressionThreshold: number; // Compress entries older than X days
 18 |   redundancyThreshold: number; // Remove similar entries with similarity > X
 19 | }
 20 | 
 21 | export interface OptimizationMetrics {
 22 |   totalEntries: number;
 23 |   storageSize: number;
 24 |   indexSize: number;
 25 |   compressionRatio: number;
 26 |   duplicatesRemoved: number;
 27 |   entriesPruned: number;
 28 |   performanceGain: number;
 29 |   lastOptimization: Date;
 30 | }
 31 | 
 32 | export interface PruningResult {
 33 |   entriesRemoved: number;
 34 |   spaceSaved: number;
 35 |   patternsPreserved: number;
 36 |   compressionApplied: number;
 37 |   optimizationApplied: boolean;
 38 |   metrics: OptimizationMetrics;
 39 | }
 40 | 
 41 | export interface CompressionStrategy {
 42 |   type: "gzip" | "lz4" | "semantic";
 43 |   threshold: number;
 44 |   ratio: number;
 45 | }
 46 | 
 47 | export interface RedundancyPattern {
 48 |   similarity: number;
 49 |   count: number;
 50 |   representative: string;
 51 |   duplicates: string[];
 52 |   canMerge: boolean;
 53 | }
 54 | 
 55 | export class MemoryPruningSystem extends EventEmitter {
 56 |   private storage: JSONLStorage;
 57 |   private manager: MemoryManager;
 58 |   private learningSystem: IncrementalLearningSystem;
 59 |   private knowledgeGraph: KnowledgeGraph;
 60 |   private defaultPolicy: PruningPolicy;
 61 |   private compressionCache: Map<string, any>;
 62 |   private similarityCache: Map<string, Map<string, number>>;
 63 | 
 64 |   constructor(
 65 |     storage: JSONLStorage,
 66 |     manager: MemoryManager,
 67 |     learningSystem: IncrementalLearningSystem,
 68 |     knowledgeGraph: KnowledgeGraph,
 69 |   ) {
 70 |     super();
 71 |     this.storage = storage;
 72 |     this.manager = manager;
 73 |     this.learningSystem = learningSystem;
 74 |     this.knowledgeGraph = knowledgeGraph;
 75 |     this.compressionCache = new Map();
 76 |     this.similarityCache = new Map();
 77 | 
 78 |     this.defaultPolicy = {
 79 |       maxAge: 180, // 6 months
 80 |       maxSize: 500, // 500MB
 81 |       maxEntries: 50000,
 82 |       preservePatterns: [
 83 |         "successful_deployment",
 84 |         "user_preference",
 85 |         "critical_error",
 86 |       ],
 87 |       compressionThreshold: 30, // Compress after 30 days
 88 |       redundancyThreshold: 0.85, // 85% similarity threshold
 89 |     };
 90 | 
 91 |     this.setupPeriodicCleanup();
 92 |   }
 93 | 
 94 |   /**
 95 |    * Execute comprehensive memory pruning
 96 |    */
 97 |   async prune(policy?: Partial<PruningPolicy>): Promise<PruningResult> {
 98 |     const activePolicy = { ...this.defaultPolicy, ...policy };
 99 |     const startTime = Date.now();
100 | 
101 |     this.emit("pruning_started", { policy: activePolicy });
102 | 
103 |     try {
104 |       // Get current metrics
105 |       const initialMetrics = await this.getOptimizationMetrics();
106 | 
107 |       // Phase 1: Remove aged entries
108 |       const agedResult = await this.removeAgedEntries(activePolicy);
109 | 
110 |       // Phase 2: Apply size-based pruning
111 |       const sizeResult = await this.applySizePruning(activePolicy);
112 | 
113 |       // Phase 3: Remove redundant entries
114 |       const redundancyResult = await this.removeRedundantEntries(activePolicy);
115 | 
116 |       // Phase 4: Apply compression
117 |       const compressionResult = await this.applyCompression(activePolicy);
118 | 
119 |       // Phase 5: Optimize storage structure
120 |       const optimizationResult = await this.optimizeStorage();
121 | 
122 |       // Get final metrics
123 |       const finalMetrics = await this.getOptimizationMetrics();
124 | 
125 |       const result: PruningResult = {
126 |         entriesRemoved:
127 |           agedResult.removed + sizeResult.removed + redundancyResult.removed,
128 |         spaceSaved: initialMetrics.storageSize - finalMetrics.storageSize,
129 |         patternsPreserved: agedResult.preserved + sizeResult.preserved,
130 |         compressionApplied: compressionResult.compressed,
131 |         optimizationApplied: optimizationResult.applied,
132 |         metrics: finalMetrics,
133 |       };
134 | 
135 |       // Update learning system with pruning results
136 |       await this.updateLearningFromPruning(result);
137 | 
138 |       this.emit("pruning_completed", {
139 |         result,
140 |         duration: Date.now() - startTime,
141 |       });
142 | 
143 |       return result;
144 |     } catch (error) {
145 |       this.emit("pruning_error", {
146 |         error: error instanceof Error ? error.message : String(error),
147 |       });
148 |       throw error;
149 |     }
150 |   }
151 | 
152 |   /**
153 |    * Remove entries older than policy threshold
154 |    */
155 |   private async removeAgedEntries(
156 |     policy: PruningPolicy,
157 |   ): Promise<{ removed: number; preserved: number }> {
158 |     const cutoffDate = new Date(
159 |       Date.now() - policy.maxAge * 24 * 60 * 60 * 1000,
160 |     );
161 |     const allEntries = await this.storage.getAll();
162 | 
163 |     let removed = 0;
164 |     let preserved = 0;
165 | 
166 |     for (const entry of allEntries) {
167 |       const entryDate = new Date(entry.timestamp);
168 | 
169 |       if (entryDate < cutoffDate) {
170 |         // Check if entry should be preserved
171 |         if (this.shouldPreserveEntry(entry, policy)) {
172 |           preserved++;
173 |           continue;
174 |         }
175 | 
176 |         // Remove from storage
177 |         await this.storage.delete(entry.id);
178 | 
179 |         // Remove from knowledge graph
180 |         await this.knowledgeGraph.removeNode(entry.id);
181 | 
182 |         removed++;
183 |       }
184 |     }
185 | 
186 |     return { removed, preserved };
187 |   }
188 | 
189 |   /**
190 |    * Apply size-based pruning to stay within limits
191 |    */
192 |   private async applySizePruning(
193 |     policy: PruningPolicy,
194 |   ): Promise<{ removed: number; preserved: number }> {
195 |     const metrics = await this.getOptimizationMetrics();
196 | 
197 |     if (
198 |       metrics.storageSize <= policy.maxSize &&
199 |       metrics.totalEntries <= policy.maxEntries
200 |     ) {
201 |       return { removed: 0, preserved: 0 };
202 |     }
203 | 
204 |     // Get entries sorted by importance score
205 |     const allEntries = await this.storage.getAll();
206 |     const scoredEntries = await Promise.all(
207 |       allEntries.map(async (entry) => ({
208 |         entry,
209 |         score: await this.calculateImportanceScore(entry),
210 |       })),
211 |     );
212 | 
213 |     // Sort by score (ascending - remove least important first)
214 |     scoredEntries.sort((a, b) => a.score - b.score);
215 | 
216 |     let removed = 0;
217 |     let preserved = 0;
218 |     let currentSize = metrics.storageSize;
219 |     let currentEntries = metrics.totalEntries;
220 | 
221 |     for (const { entry, score } of scoredEntries) {
222 |       if (
223 |         currentSize <= policy.maxSize &&
224 |         currentEntries <= policy.maxEntries
225 |       ) {
226 |         break;
227 |       }
228 | 
229 |       if (this.shouldPreserveEntry(entry, policy) || score > 0.8) {
230 |         preserved++;
231 |         continue;
232 |       }
233 | 
234 |       // Remove entry
235 |       await this.storage.delete(entry.id);
236 |       await this.knowledgeGraph.removeNode(entry.id);
237 | 
238 |       // Estimate size reduction (rough approximation)
239 |       const entrySize = JSON.stringify(entry).length / (1024 * 1024);
240 |       currentSize -= entrySize;
241 |       currentEntries--;
242 |       removed++;
243 |     }
244 | 
245 |     return { removed, preserved };
246 |   }
247 | 
248 |   /**
249 |    * Remove redundant and duplicate entries
250 |    */
251 |   private async removeRedundantEntries(
252 |     policy: PruningPolicy,
253 |   ): Promise<{ removed: number; merged: number }> {
254 |     const redundantPatterns = await this.findRedundantPatterns(
255 |       policy.redundancyThreshold,
256 |     );
257 | 
258 |     let removed = 0;
259 |     let merged = 0;
260 | 
261 |     for (const pattern of redundantPatterns) {
262 |       if (pattern.canMerge && pattern.duplicates.length > 1) {
263 |         // Keep the representative, remove duplicates
264 |         for (let i = 1; i < pattern.duplicates.length; i++) {
265 |           await this.storage.delete(pattern.duplicates[i]);
266 |           removed++;
267 |         }
268 | 
269 |         // Optionally merge information into representative
270 |         if (pattern.count > 2) {
271 |           await this.mergeRedundantEntries(
272 |             pattern.representative,
273 |             pattern.duplicates.slice(1),
274 |           );
275 |           merged++;
276 |         }
277 |       }
278 |     }
279 | 
280 |     return { removed, merged };
281 |   }
282 | 
283 |   /**
284 |    * Apply compression to old entries
285 |    */
286 |   private async applyCompression(
287 |     policy: PruningPolicy,
288 |   ): Promise<{ compressed: number; spaceSaved: number }> {
289 |     const cutoffDate = new Date(
290 |       Date.now() - policy.compressionThreshold * 24 * 60 * 60 * 1000,
291 |     );
292 |     const allEntries = await this.storage.getAll();
293 | 
294 |     let compressed = 0;
295 |     let spaceSaved = 0;
296 | 
297 |     for (const entry of allEntries) {
298 |       const entryDate = new Date(entry.timestamp);
299 | 
300 |       if (entryDate < cutoffDate && !this.isCompressed(entry)) {
301 |         const originalSize = JSON.stringify(entry).length;
302 |         const compressedEntry = await this.compressEntry(entry);
303 |         const compressedSize = JSON.stringify(compressedEntry).length;
304 | 
305 |         await this.storage.update(entry.id, compressedEntry);
306 | 
307 |         compressed++;
308 |         spaceSaved += originalSize - compressedSize;
309 |       }
310 |     }
311 | 
312 |     return { compressed, spaceSaved };
313 |   }
314 | 
315 |   /**
316 |    * Optimize storage structure and indices
317 |    */
318 |   private async optimizeStorage(): Promise<{
319 |     applied: boolean;
320 |     improvements: string[];
321 |   }> {
322 |     const improvements: string[] = [];
323 | 
324 |     try {
325 |       // Rebuild indices
326 |       await this.storage.rebuildIndex();
327 |       improvements.push("rebuilt_indices");
328 | 
329 |       // Defragment storage files
330 |       await this.defragmentStorage();
331 |       improvements.push("defragmented_storage");
332 | 
333 |       // Optimize cache sizes
334 |       this.optimizeCaches();
335 |       improvements.push("optimized_caches");
336 | 
337 |       return { applied: true, improvements };
338 |     } catch (error) {
339 |       return { applied: false, improvements };
340 |     }
341 |   }
342 | 
343 |   /**
344 |    * Calculate importance score for an entry
345 |    */
346 |   private async calculateImportanceScore(entry: MemoryEntry): Promise<number> {
347 |     let score = 0;
348 | 
349 |     // Recency score (0-0.3)
350 |     const age = Date.now() - new Date(entry.timestamp).getTime();
351 |     const maxAge = 180 * 24 * 60 * 60 * 1000; // 180 days
352 |     score += Math.max(0, 1 - age / maxAge) * 0.3;
353 | 
354 |     // Type importance (0-0.2)
355 |     const typeScores: Record<string, number> = {
356 |       successful_deployment: 0.2,
357 |       user_preference: 0.18,
358 |       configuration: 0.15,
359 |       analysis: 0.12,
360 |       recommendation: 0.12,
361 |       interaction: 0.08,
362 |       error: 0.05,
363 |     };
364 |     score += typeScores[entry.type] || 0.05;
365 | 
366 |     // Learning value (0-0.2)
367 |     const patterns = await this.learningSystem.getPatterns();
368 |     const relevantPatterns = patterns.filter(
369 |       (p) =>
370 |         p.metadata.technologies?.includes(entry.data.language) ||
371 |         p.metadata.technologies?.includes(entry.data.framework),
372 |     );
373 |     score += Math.min(0.2, relevantPatterns.length * 0.05);
374 | 
375 |     // Knowledge graph centrality (0-0.15)
376 |     try {
377 |       const connections = await this.knowledgeGraph.getConnections(entry.id);
378 |       score += Math.min(0.15, connections.length * 0.02);
379 |     } catch {
380 |       // Node might not exist in graph
381 |     }
382 | 
383 |     // Success indicator (0-0.15)
384 |     if (entry.data.outcome === "success" || entry.data.success === true) {
385 |       score += 0.15;
386 |     }
387 | 
388 |     return Math.min(1, score);
389 |   }
390 | 
391 |   /**
392 |    * Check if entry should be preserved based on policy
393 |    */
394 |   private shouldPreserveEntry(
395 |     entry: MemoryEntry,
396 |     policy: PruningPolicy,
397 |   ): boolean {
398 |     // Check preserve patterns
399 |     for (const pattern of policy.preservePatterns) {
400 |       if (
401 |         entry.type.includes(pattern) ||
402 |         JSON.stringify(entry.data).includes(pattern)
403 |       ) {
404 |         return true;
405 |       }
406 |     }
407 | 
408 |     // Preserve high-value entries
409 |     if (
410 |       entry.data.outcome === "success" ||
411 |       entry.data.success === true ||
412 |       entry.data.critical === true
413 |     ) {
414 |       return true;
415 |     }
416 | 
417 |     return false;
418 |   }
419 | 
420 |   /**
421 |    * Find patterns of redundant entries
422 |    */
423 |   private async findRedundantPatterns(
424 |     threshold: number,
425 |   ): Promise<RedundancyPattern[]> {
426 |     const allEntries = await this.storage.getAll();
427 |     const patterns: RedundancyPattern[] = [];
428 |     const processed = new Set<string>();
429 | 
430 |     for (const entry of allEntries) {
431 |       if (processed.has(entry.id)) continue;
432 | 
433 |       const similar = await this.findSimilarEntries(
434 |         entry,
435 |         allEntries,
436 |         threshold,
437 |       );
438 | 
439 |       if (similar.length > 1) {
440 |         patterns.push({
441 |           similarity: threshold,
442 |           count: similar.length,
443 |           representative: similar[0].id,
444 |           duplicates: similar.map((e) => e.id),
445 |           canMerge: this.canMergeEntries(similar),
446 |         });
447 | 
448 |         similar.forEach((s) => processed.add(s.id));
449 |       }
450 |     }
451 | 
452 |     return patterns;
453 |   }
454 | 
455 |   /**
456 |    * Find entries similar to given entry
457 |    */
458 |   private async findSimilarEntries(
459 |     target: MemoryEntry,
460 |     entries: MemoryEntry[],
461 |     threshold: number,
462 |   ): Promise<MemoryEntry[]> {
463 |     const similar: MemoryEntry[] = [target];
464 | 
465 |     for (const entry of entries) {
466 |       if (entry.id === target.id) continue;
467 | 
468 |       const similarity = await this.calculateSimilarity(target, entry);
469 |       if (similarity >= threshold) {
470 |         similar.push(entry);
471 |       }
472 |     }
473 | 
474 |     return similar;
475 |   }
476 | 
477 |   /**
478 |    * Calculate similarity between two entries
479 |    */
480 |   private async calculateSimilarity(
481 |     entry1: MemoryEntry,
482 |     entry2: MemoryEntry,
483 |   ): Promise<number> {
484 |     // Check cache first
485 |     if (
486 |       this.similarityCache.has(entry1.id) &&
487 |       this.similarityCache.get(entry1.id)?.has(entry2.id)
488 |     ) {
489 |       return this.similarityCache.get(entry1.id)!.get(entry2.id)!;
490 |     }
491 | 
492 |     let similarity = 0;
493 | 
494 |     // Type similarity (0-0.3)
495 |     if (entry1.type === entry2.type) {
496 |       similarity += 0.3;
497 |     }
498 | 
499 |     // Temporal similarity (0-0.2)
500 |     const timeDiff = Math.abs(
501 |       new Date(entry1.timestamp).getTime() -
502 |         new Date(entry2.timestamp).getTime(),
503 |     );
504 |     const maxTimeDiff = 7 * 24 * 60 * 60 * 1000; // 7 days
505 |     similarity += Math.max(0, 1 - timeDiff / maxTimeDiff) * 0.2;
506 | 
507 |     // Data similarity (0-0.5)
508 |     const dataSimilarity = this.calculateDataSimilarity(
509 |       entry1.data,
510 |       entry2.data,
511 |     );
512 |     similarity += dataSimilarity * 0.5;
513 | 
514 |     // Cache result
515 |     if (!this.similarityCache.has(entry1.id)) {
516 |       this.similarityCache.set(entry1.id, new Map());
517 |     }
518 |     this.similarityCache.get(entry1.id)!.set(entry2.id, similarity);
519 | 
520 |     return similarity;
521 |   }
522 | 
523 |   /**
524 |    * Calculate similarity between data objects
525 |    */
526 |   private calculateDataSimilarity(data1: any, data2: any): number {
527 |     const keys1 = new Set(Object.keys(data1));
528 |     const keys2 = new Set(Object.keys(data2));
529 |     const allKeys = new Set([...keys1, ...keys2]);
530 | 
531 |     let matches = 0;
532 |     let total = 0;
533 | 
534 |     for (const key of allKeys) {
535 |       total++;
536 |       if (keys1.has(key) && keys2.has(key)) {
537 |         if (data1[key] === data2[key]) {
538 |           matches++;
539 |         } else if (
540 |           typeof data1[key] === "string" &&
541 |           typeof data2[key] === "string"
542 |         ) {
543 |           // String similarity for text fields
544 |           const stringSim = this.calculateStringSimilarity(
545 |             data1[key],
546 |             data2[key],
547 |           );
548 |           matches += stringSim;
549 |         }
550 |       }
551 |     }
552 | 
553 |     return total > 0 ? matches / total : 0;
554 |   }
555 | 
556 |   /**
557 |    * Calculate string similarity (simple Jaccard similarity)
558 |    */
559 |   private calculateStringSimilarity(str1: string, str2: string): number {
560 |     const words1 = new Set(str1.toLowerCase().split(/\s+/));
561 |     const words2 = new Set(str2.toLowerCase().split(/\s+/));
562 | 
563 |     const intersection = new Set([...words1].filter((w) => words2.has(w)));
564 |     const union = new Set([...words1, ...words2]);
565 | 
566 |     return union.size > 0 ? intersection.size / union.size : 0;
567 |   }
568 | 
569 |   /**
570 |    * Check if entries can be safely merged
571 |    */
572 |   private canMergeEntries(entries: MemoryEntry[]): boolean {
573 |     if (entries.length < 2) return false;
574 | 
575 |     // All entries must have the same type
576 |     const firstType = entries[0].type;
577 |     if (!entries.every((e) => e.type === firstType)) {
578 |       return false;
579 |     }
580 | 
581 |     // Check for conflicting data
582 |     const firstData = entries[0].data;
583 |     for (const entry of entries.slice(1)) {
584 |       if (this.hasConflictingData(firstData, entry.data)) {
585 |         return false;
586 |       }
587 |     }
588 | 
589 |     return true;
590 |   }
591 | 
592 |   /**
593 |    * Check for conflicting data between entries
594 |    */
595 |   private hasConflictingData(data1: any, data2: any): boolean {
596 |     for (const key of Object.keys(data1)) {
597 |       if (key in data2 && data1[key] !== data2[key]) {
598 |         // Special handling for arrays and objects
599 |         if (Array.isArray(data1[key]) && Array.isArray(data2[key])) {
600 |           continue; // Arrays can be merged
601 |         }
602 |         if (typeof data1[key] === "object" && typeof data2[key] === "object") {
603 |           continue; // Objects can be merged
604 |         }
605 |         return true; // Conflicting primitive values
606 |       }
607 |     }
608 |     return false;
609 |   }
610 | 
611 |   /**
612 |    * Merge redundant entries into representative
613 |    */
614 |   private async mergeRedundantEntries(
615 |     representativeId: string,
616 |     duplicateIds: string[],
617 |   ): Promise<void> {
618 |     const representative = await this.storage.get(representativeId);
619 |     if (!representative) return;
620 | 
621 |     const duplicates = await Promise.all(
622 |       duplicateIds.map((id) => this.storage.get(id)),
623 |     );
624 | 
625 |     // Merge data from duplicates
626 |     const mergedData = { ...representative.data };
627 | 
628 |     for (const duplicate of duplicates) {
629 |       if (!duplicate) continue;
630 | 
631 |       // Merge arrays
632 |       for (const [key, value] of Object.entries(duplicate.data)) {
633 |         if (Array.isArray(value) && Array.isArray(mergedData[key])) {
634 |           mergedData[key] = [...new Set([...mergedData[key], ...value])];
635 |         } else if (
636 |           typeof value === "object" &&
637 |           typeof mergedData[key] === "object"
638 |         ) {
639 |           mergedData[key] = { ...mergedData[key], ...value };
640 |         } else if (!(key in mergedData)) {
641 |           mergedData[key] = value;
642 |         }
643 |       }
644 |     }
645 | 
646 |     // Update representative with merged data
647 |     await this.storage.update(representativeId, {
648 |       ...representative,
649 |       data: mergedData,
650 |       metadata: {
651 |         ...representative.metadata,
652 |         merged: true,
653 |         mergedCount: duplicateIds.length,
654 |         mergedAt: new Date().toISOString(),
655 |       },
656 |     });
657 |   }
658 | 
659 |   /**
660 |    * Check if entry is already compressed
661 |    */
662 |   private isCompressed(entry: MemoryEntry): boolean {
663 |     return Boolean(entry.metadata?.compressed);
664 |   }
665 | 
666 |   /**
667 |    * Compress entry data
668 |    */
669 |   private async compressEntry(entry: MemoryEntry): Promise<MemoryEntry> {
670 |     // Simple compression - in production, use actual compression library
671 |     const compressedData = this.simpleCompress(entry.data);
672 | 
673 |     return {
674 |       ...entry,
675 |       data: compressedData,
676 |       metadata: {
677 |         ...entry.metadata,
678 |         compressed: true,
679 |         compressionType: "simple",
680 |         compressedAt: new Date().toISOString(),
681 |         originalSize: JSON.stringify(entry.data).length,
682 |       },
683 |     };
684 |   }
685 | 
686 |   /**
687 |    * Simple compression simulation
688 |    */
689 |   private simpleCompress(data: any): any {
690 |     // This is a placeholder - in production, use proper compression
691 |     const stringified = JSON.stringify(data);
692 |     const compressed = stringified.replace(/\s+/g, " ").trim();
693 | 
694 |     return {
695 |       _compressed: true,
696 |       _data: compressed,
697 |       _type: "simple",
698 |     };
699 |   }
700 | 
701 |   /**
702 |    * Defragment storage files
703 |    */
704 |   private async defragmentStorage(): Promise<void> {
705 |     // Rebuild storage with optimal layout
706 |     const allEntries = await this.storage.getAll();
707 | 
708 |     // Sort entries for optimal access patterns
709 |     allEntries.sort(
710 |       (a, b) =>
711 |         new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime(),
712 |     );
713 | 
714 |     // This would typically rewrite storage files
715 |     // For now, just trigger a rebuild
716 |     await this.storage.rebuildIndex();
717 |   }
718 | 
719 |   /**
720 |    * Optimize cache sizes based on usage patterns
721 |    */
722 |   private optimizeCaches(): void {
723 |     // Clear old cache entries
724 |     // Clear similarity cache entries older than 24 hours
725 |     for (const [key1, innerMap] of this.similarityCache.entries()) {
726 |       for (const [key2] of innerMap.entries()) {
727 |         // Simple heuristic - remove if keys suggest old timestamps
728 |         if (Math.random() < 0.1) {
729 |           // 10% chance to clear each entry
730 |           innerMap.delete(key2);
731 |         }
732 |       }
733 |       if (innerMap.size === 0) {
734 |         this.similarityCache.delete(key1);
735 |       }
736 |     }
737 | 
738 |     // Limit cache sizes
739 |     if (this.compressionCache.size > 10000) {
740 |       const entries = Array.from(this.compressionCache.entries());
741 |       this.compressionCache.clear();
742 |       // Keep only the most recent 5000 entries
743 |       entries.slice(-5000).forEach(([key, value]) => {
744 |         this.compressionCache.set(key, value);
745 |       });
746 |     }
747 |   }
748 | 
749 |   /**
750 |    * Get comprehensive optimization metrics
751 |    */
752 |   async getOptimizationMetrics(): Promise<OptimizationMetrics> {
753 |     const allEntries = await this.storage.getAll();
754 |     const totalEntries = allEntries.length;
755 | 
756 |     // Calculate storage size (approximate)
757 |     const storageSize =
758 |       allEntries.reduce((total, entry) => {
759 |         return total + JSON.stringify(entry).length;
760 |       }, 0) /
761 |       (1024 * 1024); // Convert to MB
762 | 
763 |     // Calculate index size (approximate)
764 |     const indexSize = (totalEntries * 100) / (1024 * 1024); // Rough estimate
765 | 
766 |     // Calculate compression ratio
767 |     const compressedEntries = allEntries.filter((e) => this.isCompressed(e));
768 |     const compressionRatio = compressedEntries.length / totalEntries;
769 | 
770 |     return {
771 |       totalEntries,
772 |       storageSize,
773 |       indexSize,
774 |       compressionRatio,
775 |       duplicatesRemoved: 0, // Would be tracked during runtime
776 |       entriesPruned: 0, // Would be tracked during runtime
777 |       performanceGain: 0, // Would be calculated based on before/after metrics
778 |       lastOptimization: new Date(),
779 |     };
780 |   }
781 | 
782 |   /**
783 |    * Update learning system based on pruning results
784 |    */
785 |   private async updateLearningFromPruning(
786 |     result: PruningResult,
787 |   ): Promise<void> {
788 |     // Create a learning entry about pruning effectiveness
789 |     const pruningLearning = {
790 |       action: "memory_pruning",
791 |       outcome: result.spaceSaved > 0 ? "success" : "neutral",
792 |       metrics: {
793 |         entriesRemoved: result.entriesRemoved,
794 |         spaceSaved: result.spaceSaved,
795 |         patternsPreserved: result.patternsPreserved,
796 |       },
797 |       timestamp: new Date().toISOString(),
798 |     };
799 | 
800 |     // This would integrate with the learning system
801 |     // For now, just emit an event
802 |     this.emit("learning_update", pruningLearning);
803 |   }
804 | 
805 |   /**
806 |    * Setup periodic cleanup
807 |    */
808 |   private setupPeriodicCleanup(): void {
809 |     // Run optimization every 24 hours
810 |     setInterval(
811 |       async () => {
812 |         try {
813 |           await this.prune();
814 |           this.emit("periodic_cleanup_completed");
815 |         } catch (error) {
816 |           this.emit("periodic_cleanup_error", {
817 |             error: error instanceof Error ? error.message : String(error),
818 |           });
819 |         }
820 |       },
821 |       24 * 60 * 60 * 1000,
822 |     );
823 |   }
824 | 
825 |   /**
826 |    * Get pruning recommendations
827 |    */
828 |   async getPruningRecommendations(): Promise<{
829 |     shouldPrune: boolean;
830 |     reasons: string[];
831 |     estimatedSavings: number;
832 |     recommendedPolicy: Partial<PruningPolicy>;
833 |   }> {
834 |     const metrics = await this.getOptimizationMetrics();
835 |     const reasons: string[] = [];
836 |     let shouldPrune = false;
837 |     let estimatedSavings = 0;
838 | 
839 |     // Check storage size
840 |     if (metrics.storageSize > this.defaultPolicy.maxSize * 0.8) {
841 |       shouldPrune = true;
842 |       reasons.push(
843 |         `Storage size (${metrics.storageSize.toFixed(2)}MB) approaching limit`,
844 |       );
845 |       estimatedSavings += metrics.storageSize * 0.2;
846 |     }
847 | 
848 |     // Check entry count
849 |     if (metrics.totalEntries > this.defaultPolicy.maxEntries * 0.8) {
850 |       shouldPrune = true;
851 |       reasons.push(`Entry count (${metrics.totalEntries}) approaching limit`);
852 |     }
853 | 
854 |     // Check compression ratio
855 |     if (metrics.compressionRatio < 0.3) {
856 |       reasons.push("Low compression ratio indicates optimization opportunity");
857 |       estimatedSavings += metrics.storageSize * 0.15;
858 |     }
859 | 
860 |     // Time-based recommendation
861 |     const daysSinceLastOptimization =
862 |       (Date.now() - metrics.lastOptimization.getTime()) / (24 * 60 * 60 * 1000);
863 |     if (daysSinceLastOptimization > 7) {
864 |       shouldPrune = true;
865 |       reasons.push("Regular maintenance window (weekly optimization)");
866 |     }
867 | 
868 |     return {
869 |       shouldPrune,
870 |       reasons,
871 |       estimatedSavings,
872 |       recommendedPolicy: {
873 |         maxAge: Math.max(30, this.defaultPolicy.maxAge - 30), // More aggressive if needed
874 |         compressionThreshold: Math.max(
875 |           7,
876 |           this.defaultPolicy.compressionThreshold - 7,
877 |         ),
878 |       },
879 |     };
880 |   }
881 | }
882 | 
```

--------------------------------------------------------------------------------
/src/tools/deploy-pages.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { promises as fs } from "fs";
  2 | import path from "path";
  3 | import { z } from "zod";
  4 | import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
  5 | import {
  6 |   createOrUpdateProject,
  7 |   trackDeployment,
  8 |   getDeploymentRecommendations,
  9 |   getKnowledgeGraph,
 10 | } from "../memory/kg-integration.js";
 11 | import { getUserPreferenceManager } from "../memory/user-preferences.js";
 12 | 
 13 | const inputSchema = z.object({
 14 |   repository: z.string(),
 15 |   ssg: z
 16 |     .enum(["jekyll", "hugo", "docusaurus", "mkdocs", "eleventy"])
 17 |     .optional()
 18 |     .describe(
 19 |       "Static site generator to use. If not provided, will be retrieved from knowledge graph using analysisId",
 20 |     ),
 21 |   branch: z.string().optional().default("gh-pages"),
 22 |   customDomain: z.string().optional(),
 23 |   projectPath: z
 24 |     .string()
 25 |     .optional()
 26 |     .describe("Local path to the project for tracking"),
 27 |   projectName: z.string().optional().describe("Project name for tracking"),
 28 |   analysisId: z
 29 |     .string()
 30 |     .optional()
 31 |     .describe("ID from repository analysis for linking and SSG retrieval"),
 32 |   userId: z
 33 |     .string()
 34 |     .optional()
 35 |     .default("default")
 36 |     .describe("User ID for preference tracking"),
 37 | });
 38 | 
 39 | interface BuildConfig {
 40 |   workingDirectory: string | null;
 41 |   buildCommand: string;
 42 |   outputPath: string;
 43 |   nodeVersion?: string;
 44 |   packageManager?: "npm" | "yarn" | "pnpm";
 45 | }
 46 | 
 47 | /**
 48 |  * Retrieve SSG from knowledge graph using analysisId
 49 |  */
 50 | async function getSSGFromKnowledgeGraph(
 51 |   analysisId: string,
 52 | ): Promise<string | null> {
 53 |   try {
 54 |     const kg = await getKnowledgeGraph();
 55 | 
 56 |     // Find project node by analysis ID
 57 |     const projectNode = await kg.findNode({
 58 |       type: "project",
 59 |       properties: { id: analysisId },
 60 |     });
 61 | 
 62 |     if (!projectNode) {
 63 |       return null;
 64 |     }
 65 | 
 66 |     // Get deployment recommendations for this project
 67 |     const recommendations = await getDeploymentRecommendations(analysisId);
 68 | 
 69 |     if (recommendations.length > 0) {
 70 |       // Return the highest confidence SSG
 71 |       const topRecommendation = recommendations.sort(
 72 |         (a, b) => b.confidence - a.confidence,
 73 |       )[0];
 74 |       return topRecommendation.ssg;
 75 |     }
 76 | 
 77 |     // Fallback: check if there are any previous successful deployments
 78 |     const edges = await kg.findEdges({
 79 |       source: projectNode.id,
 80 |     });
 81 | 
 82 |     const deploymentEdges = edges.filter((e) =>
 83 |       e.type.startsWith("project_deployed_with"),
 84 |     );
 85 | 
 86 |     if (deploymentEdges.length > 0) {
 87 |       // Get the most recent successful deployment
 88 |       const successfulDeployments = deploymentEdges.filter(
 89 |         (e) => e.properties?.success === true,
 90 |       );
 91 | 
 92 |       if (successfulDeployments.length > 0) {
 93 |         const mostRecent = successfulDeployments.sort(
 94 |           (a, b) =>
 95 |             new Date(b.properties?.timestamp || 0).getTime() -
 96 |             new Date(a.properties?.timestamp || 0).getTime(),
 97 |         )[0];
 98 | 
 99 |         const configNode = (await kg.getAllNodes()).find(
100 |           (n) => n.id === mostRecent.target,
101 |         );
102 | 
103 |         return configNode?.properties?.ssg || null;
104 |       }
105 |     }
106 | 
107 |     return null;
108 |   } catch (error) {
109 |     console.warn("Failed to retrieve SSG from knowledge graph:", error);
110 |     return null;
111 |   }
112 | }
113 | 
114 | /**
115 |  * Detect documentation folder in repository
116 |  */
117 | async function detectDocsFolder(repoPath: string): Promise<string | null> {
118 |   const commonFolders = [
119 |     "docs",
120 |     "documentation",
121 |     "website",
122 |     "doc",
123 |     "site",
124 |     "pages",
125 |   ];
126 | 
127 |   for (const folder of commonFolders) {
128 |     const folderPath = path.join(repoPath, folder);
129 |     try {
130 |       const stat = await fs.stat(folderPath);
131 |       if (stat.isDirectory()) {
132 |         // Check if it has package.json or other SSG-specific files
133 |         const hasPackageJson = await fs
134 |           .access(path.join(folderPath, "package.json"))
135 |           .then(() => true)
136 |           .catch(() => false);
137 |         const hasMkDocsYml = await fs
138 |           .access(path.join(folderPath, "mkdocs.yml"))
139 |           .then(() => true)
140 |           .catch(() => false);
141 |         const hasConfigToml = await fs
142 |           .access(path.join(folderPath, "config.toml"))
143 |           .then(() => true)
144 |           .catch(() => false);
145 | 
146 |         if (hasPackageJson || hasMkDocsYml || hasConfigToml) {
147 |           return folder;
148 |         }
149 |       }
150 |     } catch {
151 |       continue;
152 |     }
153 |   }
154 | 
155 |   return null;
156 | }
157 | 
158 | /**
159 |  * Detect build configuration from package.json
160 |  */
161 | async function detectBuildConfig(
162 |   repoPath: string,
163 |   ssg: string,
164 |   docsFolder: string | null,
165 | ): Promise<BuildConfig> {
166 |   const workingDir = docsFolder || ".";
167 |   const packageJsonPath = path.join(repoPath, workingDir, "package.json");
168 | 
169 |   const defaults: Record<string, BuildConfig> = {
170 |     docusaurus: {
171 |       workingDirectory: docsFolder,
172 |       buildCommand: "npm run build",
173 |       outputPath: "./build",
174 |     },
175 |     eleventy: {
176 |       workingDirectory: docsFolder,
177 |       buildCommand: "npm run build",
178 |       outputPath: "./_site",
179 |     },
180 |     hugo: {
181 |       workingDirectory: docsFolder,
182 |       buildCommand: "hugo --minify",
183 |       outputPath: "./public",
184 |     },
185 |     jekyll: {
186 |       workingDirectory: docsFolder,
187 |       buildCommand: "bundle exec jekyll build",
188 |       outputPath: "./_site",
189 |     },
190 |     mkdocs: {
191 |       workingDirectory: docsFolder,
192 |       buildCommand: "mkdocs build",
193 |       outputPath: "./site",
194 |     },
195 |   };
196 | 
197 |   const config = defaults[ssg] || defaults.docusaurus;
198 | 
199 |   try {
200 |     const packageJson = JSON.parse(await fs.readFile(packageJsonPath, "utf-8"));
201 | 
202 |     // Detect build command from scripts
203 |     const scripts = packageJson.scripts || {};
204 |     if (scripts.build) {
205 |       config.buildCommand = "npm run build";
206 |     } else if (scripts["docs:build"]) {
207 |       config.buildCommand = "npm run docs:build";
208 |     } else if (scripts.start && scripts.start.includes("docusaurus")) {
209 |       config.buildCommand = "npm run build";
210 |     }
211 | 
212 |     // Detect package manager
213 |     const hasYarnLock = await fs
214 |       .access(path.join(repoPath, workingDir, "yarn.lock"))
215 |       .then(() => true)
216 |       .catch(() => false);
217 |     const hasPnpmLock = await fs
218 |       .access(path.join(repoPath, workingDir, "pnpm-lock.yaml"))
219 |       .then(() => true)
220 |       .catch(() => false);
221 | 
222 |     if (hasYarnLock) {
223 |       config.packageManager = "yarn";
224 |       config.buildCommand = config.buildCommand.replace("npm", "yarn");
225 |     } else if (hasPnpmLock) {
226 |       config.packageManager = "pnpm";
227 |       config.buildCommand = config.buildCommand.replace("npm", "pnpm");
228 |     } else {
229 |       config.packageManager = "npm";
230 |     }
231 | 
232 |     // Detect Node version from engines field
233 |     if (packageJson.engines?.node) {
234 |       config.nodeVersion = packageJson.engines.node;
235 |     }
236 |   } catch (error) {
237 |     // If package.json doesn't exist or can't be read, use defaults
238 |     console.warn("Using default build configuration:", error);
239 |   }
240 | 
241 |   return config;
242 | }
243 | 
244 | export async function deployPages(
245 |   args: unknown,
246 |   context?: any,
247 | ): Promise<{ content: any[] }> {
248 |   const startTime = Date.now();
249 |   const {
250 |     repository,
251 |     ssg: providedSSG,
252 |     branch,
253 |     customDomain,
254 |     projectPath,
255 |     projectName,
256 |     analysisId,
257 |     userId,
258 |   } = inputSchema.parse(args);
259 | 
260 |   // Declare ssg outside try block so it's accessible in catch
261 |   let ssg:
262 |     | "jekyll"
263 |     | "hugo"
264 |     | "docusaurus"
265 |     | "mkdocs"
266 |     | "eleventy"
267 |     | undefined = providedSSG;
268 | 
269 |   // Report initial progress
270 |   if (context?.meta?.progressToken) {
271 |     await context.meta.reportProgress?.({
272 |       progress: 0,
273 |       total: 100,
274 |     });
275 |   }
276 | 
277 |   await context?.info?.("🚀 Starting GitHub Pages deployment configuration...");
278 | 
279 |   try {
280 |     // Determine repository path (local or remote)
281 |     const repoPath = repository.startsWith("http") ? "." : repository;
282 |     await context?.info?.(`📂 Target repository: ${repository}`);
283 | 
284 |     if (context?.meta?.progressToken) {
285 |       await context.meta.reportProgress?.({
286 |         progress: 10,
287 |         total: 100,
288 |       });
289 |     }
290 | 
291 |     // Retrieve SSG from knowledge graph if not provided
292 |     ssg = providedSSG;
293 |     if (!ssg && analysisId) {
294 |       await context?.info?.(
295 |         `🔍 Retrieving SSG recommendation from analysis ${analysisId}...`,
296 |       );
297 |       const retrievedSSG = await getSSGFromKnowledgeGraph(analysisId);
298 |       if (retrievedSSG) {
299 |         ssg = retrievedSSG as
300 |           | "jekyll"
301 |           | "hugo"
302 |           | "docusaurus"
303 |           | "mkdocs"
304 |           | "eleventy";
305 |         await context?.info?.(`✅ Found recommended SSG: ${ssg}`);
306 |       }
307 |     } else if (ssg) {
308 |       await context?.info?.(`ℹ️ Using specified SSG: ${ssg}`);
309 |     }
310 | 
311 |     if (!ssg) {
312 |       const errorResponse: MCPToolResponse = {
313 |         success: false,
314 |         error: {
315 |           code: "SSG_NOT_SPECIFIED",
316 |           message:
317 |             "SSG parameter is required. Either provide it directly or ensure analysisId points to a project with SSG recommendations.",
318 |           resolution:
319 |             "Run analyze_repository and recommend_ssg first, or specify the SSG parameter explicitly.",
320 |         },
321 |         metadata: {
322 |           toolVersion: "1.0.0",
323 |           executionTime: Date.now() - startTime,
324 |           timestamp: new Date().toISOString(),
325 |         },
326 |       };
327 |       return formatMCPResponse(errorResponse);
328 |     }
329 | 
330 |     if (context?.meta?.progressToken) {
331 |       await context.meta.reportProgress?.({
332 |         progress: 25,
333 |         total: 100,
334 |       });
335 |     }
336 | 
337 |     // Detect documentation folder
338 |     await context?.info?.("📑 Detecting documentation folder...");
339 |     const docsFolder = await detectDocsFolder(repoPath);
340 |     await context?.info?.(
341 |       `📁 Documentation folder: ${docsFolder || "root directory"}`,
342 |     );
343 | 
344 |     if (context?.meta?.progressToken) {
345 |       await context.meta.reportProgress?.({
346 |         progress: 40,
347 |         total: 100,
348 |       });
349 |     }
350 | 
351 |     // Detect build configuration
352 |     await context?.info?.(`⚙️ Detecting build configuration for ${ssg}...`);
353 |     const buildConfig = await detectBuildConfig(repoPath, ssg, docsFolder);
354 | 
355 |     if (context?.meta?.progressToken) {
356 |       await context.meta.reportProgress?.({
357 |         progress: 55,
358 |         total: 100,
359 |       });
360 |     }
361 | 
362 |     // Create .github/workflows directory
363 |     await context?.info?.("📂 Creating GitHub Actions workflow directory...");
364 |     const workflowsDir = path.join(repoPath, ".github", "workflows");
365 |     await fs.mkdir(workflowsDir, { recursive: true });
366 | 
367 |     if (context?.meta?.progressToken) {
368 |       await context.meta.reportProgress?.({
369 |         progress: 70,
370 |         total: 100,
371 |       });
372 |     }
373 | 
374 |     // Generate workflow based on SSG and build config
375 |     await context?.info?.(`✍️ Generating ${ssg} deployment workflow...`);
376 |     const workflow = generateWorkflow(ssg, branch, customDomain, buildConfig);
377 |     const workflowPath = path.join(workflowsDir, "deploy-docs.yml");
378 |     await fs.writeFile(workflowPath, workflow);
379 |     await context?.info?.(
380 |       `✅ Workflow created: .github/workflows/deploy-docs.yml`,
381 |     );
382 | 
383 |     if (context?.meta?.progressToken) {
384 |       await context.meta.reportProgress?.({
385 |         progress: 85,
386 |         total: 100,
387 |       });
388 |     }
389 | 
390 |     // Create CNAME file if custom domain is specified
391 |     let cnameCreated = false;
392 |     if (customDomain) {
393 |       await context?.info?.(
394 |         `🌐 Creating CNAME file for custom domain: ${customDomain}...`,
395 |       );
396 |       const cnamePath = path.join(repoPath, "CNAME");
397 |       await fs.writeFile(cnamePath, customDomain);
398 |       cnameCreated = true;
399 |       await context?.info?.("✅ CNAME file created");
400 |     }
401 | 
402 |     const deploymentResult = {
403 |       repository,
404 |       ssg,
405 |       branch,
406 |       customDomain,
407 |       workflowPath: "deploy-docs.yml",
408 |       cnameCreated,
409 |       repoPath,
410 |       detectedConfig: {
411 |         docsFolder: docsFolder || "root",
412 |         buildCommand: buildConfig.buildCommand,
413 |         outputPath: buildConfig.outputPath,
414 |         packageManager: buildConfig.packageManager || "npm",
415 |         workingDirectory: buildConfig.workingDirectory,
416 |       },
417 |     };
418 | 
419 |     // Phase 2.3: Track deployment setup in knowledge graph
420 |     await context?.info?.("💾 Tracking deployment in Knowledge Graph...");
421 |     try {
422 |       // Create or update project in knowledge graph
423 |       if (projectPath || projectName) {
424 |         const timestamp = new Date().toISOString();
425 |         const project = await createOrUpdateProject({
426 |           id:
427 |             analysisId ||
428 |             `deploy_${repository.replace(/[^a-zA-Z0-9]/g, "_")}_${Date.now()}`,
429 |           timestamp,
430 |           path: projectPath || repository,
431 |           projectName: projectName || repository,
432 |           structure: {
433 |             totalFiles: 0, // Unknown at this point
434 |             languages: {},
435 |             hasTests: false,
436 |             hasCI: true, // We just added CI
437 |             hasDocs: true, // Setting up docs deployment
438 |           },
439 |         });
440 | 
441 |         // Track successful deployment setup
442 |         await trackDeployment(project.id, ssg, true, {
443 |           buildTime: Date.now() - startTime,
444 |         });
445 | 
446 |         // Update user preferences with SSG usage
447 |         const userPreferenceManager = await getUserPreferenceManager(userId);
448 |         await userPreferenceManager.trackSSGUsage({
449 |           ssg,
450 |           success: true, // Setup successful
451 |           timestamp,
452 |           projectType: projectPath || repository,
453 |         });
454 |       }
455 |     } catch (trackingError) {
456 |       // Don't fail the whole deployment if tracking fails
457 |       console.warn(
458 |         "Failed to track deployment in knowledge graph:",
459 |         trackingError,
460 |       );
461 |     }
462 | 
463 |     if (context?.meta?.progressToken) {
464 |       await context.meta.reportProgress?.({
465 |         progress: 100,
466 |         total: 100,
467 |       });
468 |     }
469 | 
470 |     const executionTime = Date.now() - startTime;
471 |     await context?.info?.(
472 |       `✅ Deployment configuration complete! ${ssg} workflow created in ${Math.round(
473 |         executionTime / 1000,
474 |       )}s`,
475 |     );
476 | 
477 |     const response: MCPToolResponse<typeof deploymentResult> = {
478 |       success: true,
479 |       data: deploymentResult,
480 |       metadata: {
481 |         toolVersion: "2.0.0",
482 |         executionTime,
483 |         timestamp: new Date().toISOString(),
484 |       },
485 |       recommendations: [
486 |         {
487 |           type: "info",
488 |           title: "Deployment Workflow Created",
489 |           description: `GitHub Actions workflow configured for ${ssg} deployment to ${branch} branch`,
490 |         },
491 |         ...(!providedSSG && analysisId
492 |           ? [
493 |               {
494 |                 type: "info" as const,
495 |                 title: "SSG Auto-Detected",
496 |                 description: `Retrieved ${ssg} from knowledge graph using analysisId`,
497 |               },
498 |             ]
499 |           : []),
500 |         ...(docsFolder
501 |           ? [
502 |               {
503 |                 type: "info" as const,
504 |                 title: "Documentation Folder Detected",
505 |                 description: `Found documentation in '${docsFolder}/' folder. Workflow configured with working-directory.`,
506 |               },
507 |             ]
508 |           : []),
509 |         ...(buildConfig.packageManager !== "npm"
510 |           ? [
511 |               {
512 |                 type: "info" as const,
513 |                 title: "Package Manager Detected",
514 |                 description: `Using ${buildConfig.packageManager} based on lockfile detection`,
515 |               },
516 |             ]
517 |           : []),
518 |         ...(customDomain
519 |           ? [
520 |               {
521 |                 type: "info" as const,
522 |                 title: "Custom Domain Configured",
523 |                 description: `CNAME file created for ${customDomain}`,
524 |               },
525 |             ]
526 |           : []),
527 |       ],
528 |       nextSteps: [
529 |         {
530 |           action: "Verify Deployment Setup",
531 |           toolRequired: "verify_deployment",
532 |           description: "Check that all deployment requirements are met",
533 |           priority: "high",
534 |         },
535 |         {
536 |           action: "Commit and Push",
537 |           toolRequired: "git",
538 |           description: "Commit workflow files and push to trigger deployment",
539 |           priority: "high",
540 |         },
541 |       ],
542 |     };
543 | 
544 |     return formatMCPResponse(response);
545 |   } catch (error) {
546 |     // Phase 2.3: Track failed deployment setup
547 |     try {
548 |       if ((projectPath || projectName) && ssg) {
549 |         const timestamp = new Date().toISOString();
550 |         const project = await createOrUpdateProject({
551 |           id:
552 |             analysisId ||
553 |             `deploy_${repository.replace(/[^a-zA-Z0-9]/g, "_")}_${Date.now()}`,
554 |           timestamp,
555 |           path: projectPath || repository,
556 |           projectName: projectName || repository,
557 |           structure: {
558 |             totalFiles: 0,
559 |             languages: {},
560 |             hasTests: false,
561 |             hasCI: false,
562 |             hasDocs: false,
563 |           },
564 |         });
565 | 
566 |         // Track failed deployment (only if ssg is known)
567 |         await trackDeployment(project.id, ssg, false, {
568 |           errorMessage: String(error),
569 |         });
570 | 
571 |         // Update user preferences with failed SSG usage
572 |         const userPreferenceManager = await getUserPreferenceManager(userId);
573 |         await userPreferenceManager.trackSSGUsage({
574 |           ssg,
575 |           success: false,
576 |           timestamp,
577 |           projectType: projectPath || repository,
578 |         });
579 |       }
580 |     } catch (trackingError) {
581 |       console.warn("Failed to track deployment failure:", trackingError);
582 |     }
583 | 
584 |     const errorResponse: MCPToolResponse = {
585 |       success: false,
586 |       error: {
587 |         code: "DEPLOYMENT_SETUP_FAILED",
588 |         message: `Failed to setup deployment: ${error}`,
589 |         resolution:
590 |           "Ensure repository path is accessible and GitHub Actions are enabled",
591 |       },
592 |       metadata: {
593 |         toolVersion: "1.0.0",
594 |         executionTime: Date.now() - startTime,
595 |         timestamp: new Date().toISOString(),
596 |       },
597 |     };
598 |     return formatMCPResponse(errorResponse);
599 |   }
600 | }
601 | 
602 | function generateWorkflow(
603 |   ssg: string,
604 |   branch: string,
605 |   _customDomain: string | undefined,
606 |   buildConfig: BuildConfig,
607 | ): string {
608 |   const workingDirPrefix = buildConfig.workingDirectory
609 |     ? `      working-directory: ${buildConfig.workingDirectory}\n`
610 |     : "";
611 | 
612 |   const nodeVersion = buildConfig.nodeVersion || "20";
613 |   const packageManager = buildConfig.packageManager || "npm";
614 | 
615 |   // Helper to get install command
616 |   const getInstallCmd = () => {
617 |     if (packageManager === "yarn") return "yarn install --frozen-lockfile";
618 |     if (packageManager === "pnpm") return "pnpm install --frozen-lockfile";
619 |     return "npm ci";
620 |   };
621 | 
622 |   // Helper to add working directory to steps
623 |   // const _addWorkingDir = (step: string) => {
624 |   //   if (!buildConfig.workingDirectory) return step;
625 |   //   return step.replace(
626 |   //     /^(\s+)run:/gm,
627 |   //     `$1working-directory: ${buildConfig.workingDirectory}\n$1run:`,
628 |   //   );
629 |   // };
630 | 
631 |   const workflows: Record<string, string> = {
632 |     docusaurus: `name: Deploy Docusaurus to GitHub Pages
633 | 
634 | on:
635 |   push:
636 |     branches: [main]
637 |   workflow_dispatch:
638 | 
639 | permissions:
640 |   contents: read
641 |   pages: write
642 |   id-token: write
643 | 
644 | concurrency:
645 |   group: "pages"
646 |   cancel-in-progress: false
647 | 
648 | jobs:
649 |   build:
650 |     runs-on: ubuntu-latest
651 |     steps:
652 |       - name: Checkout
653 |         uses: actions/checkout@v4
654 | 
655 |       - name: Setup Node.js
656 |         uses: actions/setup-node@v4
657 |         with:
658 |           node-version: '${nodeVersion}'
659 |           cache: '${packageManager}'${
660 |             buildConfig.workingDirectory
661 |               ? `\n          cache-dependency-path: ${buildConfig.workingDirectory}/package-lock.json`
662 |               : ""
663 |           }
664 | 
665 |       - name: Install dependencies
666 | ${workingDirPrefix}        run: ${getInstallCmd()}
667 | 
668 |       - name: Build website
669 | ${workingDirPrefix}        run: ${buildConfig.buildCommand}
670 | 
671 |       - name: Upload artifact
672 |         uses: actions/upload-pages-artifact@v2
673 |         with:
674 |           path: ${
675 |             buildConfig.workingDirectory
676 |               ? `${buildConfig.workingDirectory}/${buildConfig.outputPath}`
677 |               : buildConfig.outputPath
678 |           }
679 | 
680 |   deploy:
681 |     environment:
682 |       name: github-pages
683 |       url: \${{ steps.deployment.outputs.page_url }}
684 |     runs-on: ubuntu-latest
685 |     needs: build
686 |     steps:
687 |       - name: Deploy to GitHub Pages
688 |         id: deployment
689 |         uses: actions/deploy-pages@v3`,
690 | 
691 |     mkdocs: `name: Deploy MkDocs to GitHub Pages
692 | 
693 | on:
694 |   push:
695 |     branches: [main]
696 |   workflow_dispatch:
697 | 
698 | permissions:
699 |   contents: write
700 | 
701 | jobs:
702 |   deploy:
703 |     runs-on: ubuntu-latest${
704 |       buildConfig.workingDirectory
705 |         ? `\n    defaults:\n      run:\n        working-directory: ${buildConfig.workingDirectory}`
706 |         : ""
707 |     }
708 |     steps:
709 |       - uses: actions/checkout@v4
710 | 
711 |       - name: Setup Python
712 |         uses: actions/setup-python@v4
713 |         with:
714 |           python-version: '3.x'
715 | 
716 |       - name: Install dependencies
717 |         run: |
718 |           pip install -r requirements.txt
719 | 
720 |       - name: Build and Deploy
721 |         run: mkdocs gh-deploy --force --branch ${branch}`,
722 | 
723 |     hugo: `name: Deploy Hugo to GitHub Pages
724 | 
725 | on:
726 |   push:
727 |     branches: [main]
728 |   workflow_dispatch:
729 | 
730 | permissions:
731 |   contents: read
732 |   pages: write
733 |   id-token: write
734 | 
735 | concurrency:
736 |   group: "pages"
737 |   cancel-in-progress: false
738 | 
739 | jobs:
740 |   build:
741 |     runs-on: ubuntu-latest${
742 |       buildConfig.workingDirectory
743 |         ? `\n    defaults:\n      run:\n        working-directory: ${buildConfig.workingDirectory}`
744 |         : ""
745 |     }
746 |     steps:
747 |       - name: Checkout
748 |         uses: actions/checkout@v4
749 |         with:
750 |           submodules: recursive
751 | 
752 |       - name: Setup Hugo
753 |         uses: peaceiris/actions-hugo@v2
754 |         with:
755 |           hugo-version: 'latest'
756 |           extended: true
757 | 
758 |       - name: Build
759 |         run: ${buildConfig.buildCommand}
760 | 
761 |       - name: Upload artifact
762 |         uses: actions/upload-pages-artifact@v2
763 |         with:
764 |           path: ${
765 |             buildConfig.workingDirectory
766 |               ? `${buildConfig.workingDirectory}/${buildConfig.outputPath}`
767 |               : buildConfig.outputPath
768 |           }
769 | 
770 |   deploy:
771 |     environment:
772 |       name: github-pages
773 |       url: \${{ steps.deployment.outputs.page_url }}
774 |     runs-on: ubuntu-latest
775 |     needs: build
776 |     steps:
777 |       - name: Deploy to GitHub Pages
778 |         id: deployment
779 |         uses: actions/deploy-pages@v3`,
780 | 
781 |     jekyll: `name: Deploy Jekyll to GitHub Pages
782 | 
783 | on:
784 |   push:
785 |     branches: [main]
786 |   workflow_dispatch:
787 | 
788 | permissions:
789 |   contents: read
790 |   pages: write
791 |   id-token: write
792 | 
793 | concurrency:
794 |   group: "pages"
795 |   cancel-in-progress: false
796 | 
797 | jobs:
798 |   build:
799 |     runs-on: ubuntu-latest${
800 |       buildConfig.workingDirectory
801 |         ? `\n    defaults:\n      run:\n        working-directory: ${buildConfig.workingDirectory}`
802 |         : ""
803 |     }
804 |     steps:
805 |       - name: Checkout
806 |         uses: actions/checkout@v4
807 | 
808 |       - name: Setup Ruby
809 |         uses: ruby/setup-ruby@v1
810 |         with:
811 |           ruby-version: '3.1'
812 |           bundler-cache: true${
813 |             buildConfig.workingDirectory
814 |               ? `\n          working-directory: ${buildConfig.workingDirectory}`
815 |               : ""
816 |           }
817 | 
818 |       - name: Build with Jekyll
819 |         run: ${buildConfig.buildCommand}
820 |         env:
821 |           JEKYLL_ENV: production
822 | 
823 |       - name: Upload artifact
824 |         uses: actions/upload-pages-artifact@v2${
825 |           buildConfig.workingDirectory
826 |             ? `\n        with:\n          path: ${buildConfig.workingDirectory}/${buildConfig.outputPath}`
827 |             : ""
828 |         }
829 | 
830 |   deploy:
831 |     environment:
832 |       name: github-pages
833 |       url: \${{ steps.deployment.outputs.page_url }}
834 |     runs-on: ubuntu-latest
835 |     needs: build
836 |     steps:
837 |       - name: Deploy to GitHub Pages
838 |         id: deployment
839 |         uses: actions/deploy-pages@v3`,
840 | 
841 |     eleventy: `name: Deploy Eleventy to GitHub Pages
842 | 
843 | on:
844 |   push:
845 |     branches: [main]
846 |   workflow_dispatch:
847 | 
848 | permissions:
849 |   contents: read
850 |   pages: write
851 |   id-token: write
852 | 
853 | concurrency:
854 |   group: "pages"
855 |   cancel-in-progress: false
856 | 
857 | jobs:
858 |   build:
859 |     runs-on: ubuntu-latest
860 |     steps:
861 |       - name: Checkout
862 |         uses: actions/checkout@v4
863 | 
864 |       - name: Setup Node.js
865 |         uses: actions/setup-node@v4
866 |         with:
867 |           node-version: '${nodeVersion}'
868 |           cache: '${packageManager}'${
869 |             buildConfig.workingDirectory
870 |               ? `\n          cache-dependency-path: ${buildConfig.workingDirectory}/package-lock.json`
871 |               : ""
872 |           }
873 | 
874 |       - name: Install dependencies
875 | ${workingDirPrefix}        run: ${getInstallCmd()}
876 | 
877 |       - name: Build site
878 | ${workingDirPrefix}        run: ${buildConfig.buildCommand}
879 | 
880 |       - name: Upload artifact
881 |         uses: actions/upload-pages-artifact@v2
882 |         with:
883 |           path: ${
884 |             buildConfig.workingDirectory
885 |               ? `${buildConfig.workingDirectory}/${buildConfig.outputPath}`
886 |               : buildConfig.outputPath
887 |           }
888 | 
889 |   deploy:
890 |     environment:
891 |       name: github-pages
892 |       url: \${{ steps.deployment.outputs.page_url }}
893 |     runs-on: ubuntu-latest
894 |     needs: build
895 |     steps:
896 |       - name: Deploy to GitHub Pages
897 |         id: deployment
898 |         uses: actions/deploy-pages@v3`,
899 |   };
900 | 
901 |   return workflows[ssg] || workflows.jekyll;
902 | }
903 | 
```

--------------------------------------------------------------------------------
/tests/tools/validate-readme-checklist.test.ts:
--------------------------------------------------------------------------------

```typescript
  1 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
  2 | import { promises as fs } from "fs";
  3 | import * as path from "path";
  4 | import * as tmp from "tmp";
  5 | import {
  6 |   validateReadmeChecklist,
  7 |   ReadmeChecklistValidator,
  8 |   ValidateReadmeChecklistSchema,
  9 | } from "../../src/tools/validate-readme-checklist";
 10 | 
 11 | describe("README Checklist Validator", () => {
 12 |   let tempDir: string;
 13 |   let validator: ReadmeChecklistValidator;
 14 | 
 15 |   beforeEach(() => {
 16 |     tempDir = tmp.dirSync({ unsafeCleanup: true }).name;
 17 |     validator = new ReadmeChecklistValidator();
 18 |   });
 19 | 
 20 |   afterEach(async () => {
 21 |     try {
 22 |       await fs.rmdir(tempDir, { recursive: true });
 23 |     } catch {
 24 |       // Ignore cleanup errors
 25 |     }
 26 |   });
 27 | 
 28 |   async function createTestReadme(
 29 |     content: string,
 30 |     filename = "README.md",
 31 |   ): Promise<string> {
 32 |     const readmePath = path.join(tempDir, filename);
 33 |     await fs.writeFile(readmePath, content, "utf-8");
 34 |     return readmePath;
 35 |   }
 36 | 
 37 |   async function createProjectFile(
 38 |     filename: string,
 39 |     content = "",
 40 |   ): Promise<void> {
 41 |     await fs.writeFile(path.join(tempDir, filename), content, "utf-8");
 42 |   }
 43 | 
 44 |   describe("Input Validation", () => {
 45 |     it("should validate required fields", () => {
 46 |       expect(() => ValidateReadmeChecklistSchema.parse({})).toThrow();
 47 |       expect(() =>
 48 |         ValidateReadmeChecklistSchema.parse({
 49 |           readmePath: "",
 50 |         }),
 51 |       ).toThrow();
 52 |     });
 53 | 
 54 |     it("should accept valid input with defaults", () => {
 55 |       const input = ValidateReadmeChecklistSchema.parse({
 56 |         readmePath: "/path/to/README.md",
 57 |       });
 58 | 
 59 |       expect(input.strict).toBe(false);
 60 |       expect(input.outputFormat).toBe("console");
 61 |     });
 62 | 
 63 |     it("should validate output format options", () => {
 64 |       const validFormats = ["json", "markdown", "console"];
 65 | 
 66 |       for (const format of validFormats) {
 67 |         expect(() =>
 68 |           ValidateReadmeChecklistSchema.parse({
 69 |             readmePath: "/test/README.md",
 70 |             outputFormat: format,
 71 |           }),
 72 |         ).not.toThrow();
 73 |       }
 74 | 
 75 |       expect(() =>
 76 |         ValidateReadmeChecklistSchema.parse({
 77 |           readmePath: "/test/README.md",
 78 |           outputFormat: "invalid",
 79 |         }),
 80 |       ).toThrow();
 81 |     });
 82 |   });
 83 | 
 84 |   describe("Essential Sections Validation", () => {
 85 |     it("should detect project title", async () => {
 86 |       const goodReadme = await createTestReadme(
 87 |         "# My Project\n\nDescription here",
 88 |         "good-README.md",
 89 |       );
 90 |       const badReadme = await createTestReadme(
 91 |         "## Not a main title\n\nNo main heading",
 92 |         "bad-README.md",
 93 |       );
 94 | 
 95 |       const goodInput = ValidateReadmeChecklistSchema.parse({
 96 |         readmePath: goodReadme,
 97 |       });
 98 |       const badInput = ValidateReadmeChecklistSchema.parse({
 99 |         readmePath: badReadme,
100 |       });
101 |       const result = await validateReadmeChecklist(goodInput);
102 |       const result2 = await validateReadmeChecklist(badInput);
103 | 
104 |       const titleCheck = result.categories["Essential Sections"].results.find(
105 |         (r) => r.item.id === "title",
106 |       );
107 |       const badTitleCheck = result2.categories[
108 |         "Essential Sections"
109 |       ].results.find((r) => r.item.id === "title");
110 | 
111 |       expect(titleCheck?.passed).toBe(true);
112 |       expect(badTitleCheck?.passed).toBe(false);
113 |     });
114 | 
115 |     it("should detect project description", async () => {
116 |       const withSubtitle = await createTestReadme(
117 |         "# Project\n\n> A great project description",
118 |         "subtitle-README.md",
119 |       );
120 |       const withParagraph = await createTestReadme(
121 |         "# Project\n\nThis is a description paragraph",
122 |         "paragraph-README.md",
123 |       );
124 |       const withoutDesc = await createTestReadme(
125 |         "# Project\n\n## Installation",
126 |         "no-desc-README.md",
127 |       );
128 | 
129 |       const subtitleResult = await validateReadmeChecklist(
130 |         ValidateReadmeChecklistSchema.parse({ readmePath: withSubtitle }),
131 |       );
132 |       const paragraphResult = await validateReadmeChecklist(
133 |         ValidateReadmeChecklistSchema.parse({ readmePath: withParagraph }),
134 |       );
135 |       const noDescResult = await validateReadmeChecklist(
136 |         ValidateReadmeChecklistSchema.parse({ readmePath: withoutDesc }),
137 |       );
138 | 
139 |       const getDescCheck = (result: any) =>
140 |         result.categories["Essential Sections"].results.find(
141 |           (r: any) => r.item.id === "description",
142 |         );
143 | 
144 |       expect(getDescCheck(subtitleResult)?.passed).toBe(true);
145 |       expect(getDescCheck(paragraphResult)?.passed).toBe(true);
146 |       expect(getDescCheck(noDescResult)?.passed).toBe(false);
147 |     });
148 | 
149 |     it("should detect TL;DR section", async () => {
150 |       const withTldr = await createTestReadme(
151 |         "# Project\n\n## TL;DR\n\nQuick summary",
152 |         "tldr-README.md",
153 |       );
154 |       const withQuickStart = await createTestReadme(
155 |         "# Project\n\n## Quick Start\n\nQuick summary",
156 |         "quickstart-README.md",
157 |       );
158 |       const withoutTldr = await createTestReadme(
159 |         "# Project\n\n## Installation",
160 |         "no-tldr-README.md",
161 |       );
162 | 
163 |       const tldrInput = ValidateReadmeChecklistSchema.parse({
164 |         readmePath: withTldr,
165 |       });
166 |       const quickStartInput = ValidateReadmeChecklistSchema.parse({
167 |         readmePath: withQuickStart,
168 |       });
169 |       const noTldrInput = ValidateReadmeChecklistSchema.parse({
170 |         readmePath: withoutTldr,
171 |       });
172 |       const result = await validateReadmeChecklist(tldrInput);
173 |       const result2 = await validateReadmeChecklist(quickStartInput);
174 |       const result3 = await validateReadmeChecklist(noTldrInput);
175 | 
176 |       const getTldrCheck = (result: any) =>
177 |         result.categories["Essential Sections"].results.find(
178 |           (r: any) => r.item.id === "tldr",
179 |         );
180 | 
181 |       expect(getTldrCheck(result)?.passed).toBe(true);
182 |       expect(getTldrCheck(result2)?.passed).toBe(true);
183 |       expect(getTldrCheck(result3)?.passed).toBe(false);
184 |     });
185 | 
186 |     it("should detect installation instructions with code blocks", async () => {
187 |       const goodInstall = await createTestReadme(
188 |         `
189 | # Project
190 | ## Installation
191 | \`\`\`bash
192 | npm install project
193 | \`\`\`
194 |       `,
195 |         "good-install-README.md",
196 |       );
197 | 
198 |       const noCodeBlocks = await createTestReadme(
199 |         `
200 | # Project
201 | ## Installation
202 | Just install it somehow
203 |       `,
204 |         "no-code-README.md",
205 |       );
206 | 
207 |       const noInstallSection = await createTestReadme(
208 |         "# Project\n\nSome content",
209 |         "no-install-README.md",
210 |       );
211 | 
212 |       const goodResult = await validateReadmeChecklist(
213 |         ValidateReadmeChecklistSchema.parse({ readmePath: goodInstall }),
214 |       );
215 |       const noCodeResult = await validateReadmeChecklist(
216 |         ValidateReadmeChecklistSchema.parse({ readmePath: noCodeBlocks }),
217 |       );
218 |       const noSectionResult = await validateReadmeChecklist(
219 |         ValidateReadmeChecklistSchema.parse({ readmePath: noInstallSection }),
220 |       );
221 | 
222 |       const getInstallCheck = (result: any) =>
223 |         result.categories["Essential Sections"].results.find(
224 |           (r: any) => r.item.id === "installation",
225 |         );
226 | 
227 |       expect(getInstallCheck(goodResult)?.passed).toBe(true);
228 |       expect(getInstallCheck(noCodeResult)?.passed).toBe(true); // This should pass because it has Installation section
229 |       expect(getInstallCheck(noSectionResult)?.passed).toBe(false);
230 |     });
231 | 
232 |     it("should detect usage examples", async () => {
233 |       const goodUsage = await createTestReadme(
234 |         `
235 | # Project
236 | ## Usage
237 | \`\`\`javascript
238 | const lib = require('lib');
239 | lib.doSomething();
240 | \`\`\`
241 |       `,
242 |         "good-usage-README.md",
243 |       );
244 | 
245 |       const noUsage = await createTestReadme(
246 |         "# Project\n\nNo usage section",
247 |         "no-usage-README.md",
248 |       );
249 | 
250 |       const goodResult = await validateReadmeChecklist(
251 |         ValidateReadmeChecklistSchema.parse({ readmePath: goodUsage }),
252 |       );
253 |       const noUsageResult = await validateReadmeChecklist(
254 |         ValidateReadmeChecklistSchema.parse({ readmePath: noUsage }),
255 |       );
256 | 
257 |       const getUsageCheck = (result: any) =>
258 |         result.categories["Essential Sections"].results.find(
259 |           (r: any) => r.item.id === "usage",
260 |         );
261 | 
262 |       expect(getUsageCheck(goodResult)?.passed).toBe(true);
263 |       expect(getUsageCheck(noUsageResult)?.passed).toBe(false);
264 |     });
265 | 
266 |     it("should detect license information", async () => {
267 |       const readmeWithLicense = await createTestReadme(
268 |         "# Project\n\n## License\n\nMIT",
269 |         "license-README.md",
270 |       );
271 |       const readmeWithoutLicense = await createTestReadme(
272 |         "# Project\n\nNo license info",
273 |         "no-license-README.md",
274 |       );
275 | 
276 |       // Test without LICENSE file first
277 |       const withLicenseResult = await validateReadmeChecklist(
278 |         ValidateReadmeChecklistSchema.parse({
279 |           readmePath: readmeWithLicense,
280 |           projectPath: tempDir,
281 |         }),
282 |       );
283 |       const withoutLicenseResult = await validateReadmeChecklist(
284 |         ValidateReadmeChecklistSchema.parse({
285 |           readmePath: readmeWithoutLicense,
286 |           projectPath: tempDir,
287 |         }),
288 |       );
289 | 
290 |       // Test with LICENSE file
291 |       await createProjectFile("LICENSE", "MIT License...");
292 |       const readmeWithLicenseFile = await createTestReadme(
293 |         "# Project\n\nSome content",
294 |         "license-file-README.md",
295 |       );
296 |       const withLicenseFileResult = await validateReadmeChecklist(
297 |         ValidateReadmeChecklistSchema.parse({
298 |           readmePath: readmeWithLicenseFile,
299 |           projectPath: tempDir,
300 |         }),
301 |       );
302 | 
303 |       const getLicenseCheck = (result: any) =>
304 |         result.categories["Essential Sections"].results.find(
305 |           (r: any) => r.item.id === "license",
306 |         );
307 | 
308 |       expect(getLicenseCheck(withLicenseResult)?.passed).toBe(true);
309 |       expect(getLicenseCheck(withoutLicenseResult)?.passed).toBe(false);
310 |       expect(getLicenseCheck(withLicenseFileResult)?.passed).toBe(true);
311 |     });
312 |   });
313 | 
314 |   describe("Community Health Validation", () => {
315 |     it("should detect contributing guidelines", async () => {
316 |       const readmeWithContributing = await createTestReadme(
317 |         "# Project\n\n## Contributing\n\nSee CONTRIBUTING.md",
318 |       );
319 |       await createProjectFile("CONTRIBUTING.md", "Contributing guidelines...");
320 | 
321 |       const result = await validateReadmeChecklist(
322 |         ValidateReadmeChecklistSchema.parse({
323 |           readmePath: readmeWithContributing,
324 |           projectPath: tempDir,
325 |         }),
326 |       );
327 | 
328 |       const contributingCheck = result.categories[
329 |         "Community Health"
330 |       ].results.find((r) => r.item.id === "contributing");
331 |       expect(contributingCheck?.passed).toBe(true);
332 |     });
333 | 
334 |     it("should detect code of conduct", async () => {
335 |       await createProjectFile("CODE_OF_CONDUCT.md", "Code of conduct...");
336 |       const readme = await createTestReadme("# Project\n\nSome content");
337 | 
338 |       const result = await validateReadmeChecklist(
339 |         ValidateReadmeChecklistSchema.parse({
340 |           readmePath: readme,
341 |           projectPath: tempDir,
342 |         }),
343 |       );
344 | 
345 |       const cocCheck = result.categories["Community Health"].results.find(
346 |         (r) => r.item.id === "code-of-conduct",
347 |       );
348 |       expect(cocCheck?.passed).toBe(true);
349 |     });
350 | 
351 |     it("should detect security policy", async () => {
352 |       await createProjectFile("SECURITY.md", "Security policy...");
353 |       const readme = await createTestReadme("# Project\n\nSome content");
354 | 
355 |       const result = await validateReadmeChecklist(
356 |         ValidateReadmeChecklistSchema.parse({
357 |           readmePath: readme,
358 |           projectPath: tempDir,
359 |         }),
360 |       );
361 | 
362 |       const securityCheck = result.categories["Community Health"].results.find(
363 |         (r) => r.item.id === "security",
364 |       );
365 |       expect(securityCheck?.passed).toBe(true);
366 |     });
367 |   });
368 | 
369 |   describe("Visual Elements Validation", () => {
370 |     it("should detect status badges", async () => {
371 |       const withBadges = await createTestReadme(
372 |         `
373 | # Project
374 | [![Build Status](https://travis-ci.org/user/repo.svg?branch=main)](https://travis-ci.org/user/repo)
375 | [![npm version](https://badge.fury.io/js/package.svg)](https://badge.fury.io/js/package)
376 |       `,
377 |         "with-badges-README.md",
378 |       );
379 | 
380 |       const withoutBadges = await createTestReadme(
381 |         "# Project\n\nNo badges here",
382 |         "no-badges-README.md",
383 |       );
384 | 
385 |       const withBadgesResult = await validateReadmeChecklist(
386 |         ValidateReadmeChecklistSchema.parse({ readmePath: withBadges }),
387 |       );
388 |       const withoutBadgesResult = await validateReadmeChecklist(
389 |         ValidateReadmeChecklistSchema.parse({ readmePath: withoutBadges }),
390 |       );
391 | 
392 |       const getBadgeCheck = (result: any) =>
393 |         result.categories["Visual Elements"].results.find(
394 |           (r: any) => r.item.id === "badges",
395 |         );
396 | 
397 |       expect(getBadgeCheck(withBadgesResult)?.passed).toBe(true);
398 |       expect(getBadgeCheck(withoutBadgesResult)?.passed).toBe(false);
399 |     });
400 | 
401 |     it("should detect screenshots and images", async () => {
402 |       const withScreenshots = await createTestReadme(
403 |         `
404 | # Project
405 | ![Screenshot](screenshot.png)
406 | ![Demo](demo.gif)
407 |       `,
408 |         "with-screenshots-README.md",
409 |       );
410 | 
411 |       const withoutScreenshots = await createTestReadme(
412 |         "# Project\n\nNo images",
413 |         "no-screenshots-README.md",
414 |       );
415 | 
416 |       const withScreenshotsResult = await validateReadmeChecklist(
417 |         ValidateReadmeChecklistSchema.parse({ readmePath: withScreenshots }),
418 |       );
419 |       const withoutScreenshotsResult = await validateReadmeChecklist(
420 |         ValidateReadmeChecklistSchema.parse({ readmePath: withoutScreenshots }),
421 |       );
422 | 
423 |       const getScreenshotCheck = (result: any) =>
424 |         result.categories["Visual Elements"].results.find(
425 |           (r: any) => r.item.id === "screenshots",
426 |         );
427 | 
428 |       expect(getScreenshotCheck(withScreenshotsResult)?.passed).toBe(true);
429 |       expect(getScreenshotCheck(withoutScreenshotsResult)?.passed).toBe(false);
430 |     });
431 | 
432 |     it("should validate markdown formatting", async () => {
433 |       const goodFormatting = await createTestReadme(
434 |         `
435 | # Main Title
436 | ## Section 1
437 | ### Subsection
438 | ## Section 2
439 |       `,
440 |         "good-formatting-README.md",
441 |       );
442 | 
443 |       const poorFormatting = await createTestReadme(
444 |         `
445 | # Title
446 | #Another Title
447 | ##Poor Spacing
448 |       `,
449 |         "poor-formatting-README.md",
450 |       );
451 | 
452 |       const goodResult = await validateReadmeChecklist(
453 |         ValidateReadmeChecklistSchema.parse({ readmePath: goodFormatting }),
454 |       );
455 |       const poorResult = await validateReadmeChecklist(
456 |         ValidateReadmeChecklistSchema.parse({ readmePath: poorFormatting }),
457 |       );
458 | 
459 |       const getFormattingCheck = (result: any) =>
460 |         result.categories["Visual Elements"].results.find(
461 |           (r: any) => r.item.id === "formatting",
462 |         );
463 | 
464 |       expect(getFormattingCheck(goodResult)?.passed).toBe(true);
465 |       expect(getFormattingCheck(poorResult)?.passed).toBe(false);
466 |     });
467 |   });
468 | 
469 |   describe("Content Quality Validation", () => {
470 |     it("should detect working code examples", async () => {
471 |       const withCodeExamples = await createTestReadme(
472 |         `
473 | # Project
474 | \`\`\`javascript
475 | const lib = require('lib');
476 | lib.doSomething();
477 | \`\`\`
478 | 
479 | \`\`\`bash
480 | npm install lib
481 | \`\`\`
482 |       `,
483 |         "with-code-README.md",
484 |       );
485 | 
486 |       const withoutCodeExamples = await createTestReadme(
487 |         "# Project\n\nNo code examples",
488 |         "no-code-examples-README.md",
489 |       );
490 | 
491 |       const withCodeResult = await validateReadmeChecklist(
492 |         ValidateReadmeChecklistSchema.parse({ readmePath: withCodeExamples }),
493 |       );
494 |       const withoutCodeResult = await validateReadmeChecklist(
495 |         ValidateReadmeChecklistSchema.parse({
496 |           readmePath: withoutCodeExamples,
497 |         }),
498 |       );
499 | 
500 |       const getCodeCheck = (result: any) =>
501 |         result.categories["Content Quality"].results.find(
502 |           (r: any) => r.item.id === "working-examples",
503 |         );
504 | 
505 |       expect(getCodeCheck(withCodeResult)?.passed).toBe(true);
506 |       expect(getCodeCheck(withoutCodeResult)?.passed).toBe(false);
507 |     });
508 | 
509 |     it("should validate appropriate length", async () => {
510 |       const shortReadme = await createTestReadme(
511 |         "# Project\n\nShort content",
512 |         "short-README.md",
513 |       );
514 |       const longContent =
515 |         "# Project\n\n" + "Long line of content.\n".repeat(350);
516 |       const longReadme = await createTestReadme(longContent, "long-README.md");
517 | 
518 |       const shortResult = await validateReadmeChecklist(
519 |         ValidateReadmeChecklistSchema.parse({ readmePath: shortReadme }),
520 |       );
521 |       const longResult = await validateReadmeChecklist(
522 |         ValidateReadmeChecklistSchema.parse({ readmePath: longReadme }),
523 |       );
524 | 
525 |       const getLengthCheck = (result: any) =>
526 |         result.categories["Content Quality"].results.find(
527 |           (r: any) => r.item.id === "appropriate-length",
528 |         );
529 | 
530 |       expect(getLengthCheck(shortResult)?.passed).toBe(true);
531 |       expect(getLengthCheck(longResult)?.passed).toBe(false);
532 |     });
533 | 
534 |     it("should validate scannable structure", async () => {
535 |       const goodStructure = await createTestReadme(
536 |         `
537 | # Main Title
538 | ## Section 1
539 | ### Subsection 1.1
540 | - Item 1
541 | - Item 2
542 | ### Subsection 1.2
543 | ## Section 2
544 | ### Subsection 2.1
545 | - Another item
546 | - Yet another item
547 |       `,
548 |         "good-structure-README.md",
549 |       );
550 | 
551 |       const poorStructure = await createTestReadme(
552 |         `
553 | # Title
554 | #### Skipped levels
555 | ## Back to level 2
556 |       `,
557 |         "poor-structure-README.md",
558 |       );
559 | 
560 |       const goodResult = await validateReadmeChecklist(
561 |         ValidateReadmeChecklistSchema.parse({ readmePath: goodStructure }),
562 |       );
563 |       const poorResult = await validateReadmeChecklist(
564 |         ValidateReadmeChecklistSchema.parse({ readmePath: poorStructure }),
565 |       );
566 | 
567 |       const getStructureCheck = (result: any) =>
568 |         result.categories["Content Quality"].results.find(
569 |           (r: any) => r.item.id === "scannable-structure",
570 |         );
571 | 
572 |       expect(getStructureCheck(goodResult)?.passed).toBe(true);
573 |       expect(getStructureCheck(poorResult)?.passed).toBe(false);
574 |     });
575 |   });
576 | 
577 |   describe("Report Generation", () => {
578 |     it("should generate comprehensive report with all categories", async () => {
579 |       const readme = await createTestReadme(`
580 | # Test Project
581 | > A test project description
582 | 
583 | ## TL;DR
584 | Quick summary of the project.
585 | 
586 | ## Quick Start
587 | \`\`\`bash
588 | npm install test-project
589 | \`\`\`
590 | 
591 | ## Usage
592 | \`\`\`javascript
593 | const test = require('test-project');
594 | test.run();
595 | \`\`\`
596 | 
597 | ## License
598 | MIT
599 |       `);
600 | 
601 |       const result = await validateReadmeChecklist(
602 |         ValidateReadmeChecklistSchema.parse({ readmePath: readme }),
603 |       );
604 | 
605 |       expect(result.overallScore).toBeGreaterThan(0);
606 |       expect(result.totalItems).toBeGreaterThan(0);
607 |       expect(result.passedItems).toBeGreaterThan(0);
608 |       expect(result.categories).toHaveProperty("Essential Sections");
609 |       expect(result.categories).toHaveProperty("Community Health");
610 |       expect(result.categories).toHaveProperty("Visual Elements");
611 |       expect(result.categories).toHaveProperty("Content Quality");
612 |       expect(result.wordCount).toBeGreaterThan(0);
613 |       expect(result.estimatedReadTime).toBeGreaterThan(0);
614 |     });
615 | 
616 |     it("should calculate scores correctly", async () => {
617 |       const perfectReadme = await createTestReadme(`
618 | # Perfect Project
619 | > An amazing project that does everything right
620 | 
621 | [![Build Status](https://travis-ci.org/user/repo.svg)](https://travis-ci.org/user/repo)
622 | 
623 | ## TL;DR
624 | This project is perfect and demonstrates all best practices.
625 | 
626 | ## Quick Start
627 | \`\`\`bash
628 | npm install perfect-project
629 | \`\`\`
630 | 
631 | ## Usage
632 | \`\`\`javascript
633 | const perfect = require('perfect-project');
634 | perfect.doSomething();
635 | \`\`\`
636 | 
637 | ## Contributing
638 | See CONTRIBUTING.md for guidelines.
639 | 
640 | ## License
641 | MIT © Author
642 |       `);
643 | 
644 |       await createProjectFile("CONTRIBUTING.md", "Guidelines...");
645 |       await createProjectFile("LICENSE", "MIT License...");
646 | 
647 |       const result = await validateReadmeChecklist(
648 |         ValidateReadmeChecklistSchema.parse({
649 |           readmePath: perfectReadme,
650 |           projectPath: tempDir,
651 |         }),
652 |       );
653 | 
654 |       expect(result.overallScore).toBeGreaterThan(70);
655 |       expect(result.categories["Essential Sections"].score).toBeGreaterThan(80);
656 |     });
657 | 
658 |     it("should provide helpful recommendations", async () => {
659 |       const poorReadme = await createTestReadme(
660 |         "# Poor Project\n\nMinimal content",
661 |       );
662 | 
663 |       const result = await validateReadmeChecklist(
664 |         ValidateReadmeChecklistSchema.parse({ readmePath: poorReadme }),
665 |       );
666 | 
667 |       expect(result.recommendations.length).toBeGreaterThan(0);
668 |       expect(result.overallScore).toBeLessThan(50);
669 |     });
670 |   });
671 | 
672 |   describe("Output Formatting", () => {
673 |     it("should format console output correctly", async () => {
674 |       const readme = await createTestReadme("# Test\n\nContent");
675 |       const result = await validateReadmeChecklist(
676 |         ValidateReadmeChecklistSchema.parse({
677 |           readmePath: readme,
678 |           outputFormat: "console",
679 |         }),
680 |       );
681 | 
682 |       const formatted = validator.formatReport(result, "console");
683 | 
684 |       expect(formatted).toContain("📋 README Checklist Report");
685 |       expect(formatted).toContain("Overall Score:");
686 |       expect(formatted).toContain("Essential Sections");
687 |       expect(formatted).toContain("✅");
688 |       expect(formatted).toContain("❌");
689 |     });
690 | 
691 |     it("should format markdown output correctly", async () => {
692 |       const readme = await createTestReadme("# Test\n\nContent");
693 |       const result = await validateReadmeChecklist(
694 |         ValidateReadmeChecklistSchema.parse({
695 |           readmePath: readme,
696 |           outputFormat: "markdown",
697 |         }),
698 |       );
699 | 
700 |       const formatted = validator.formatReport(result, "markdown");
701 | 
702 |       expect(formatted).toContain("# README Checklist Report");
703 |       expect(formatted).toContain("## Overall Score:");
704 |       expect(formatted).toContain("### Essential Sections");
705 |       expect(formatted).toContain("- ✅");
706 |       expect(formatted).toContain("- ❌");
707 |     });
708 | 
709 |     it("should format JSON output correctly", async () => {
710 |       const readme = await createTestReadme("# Test\n\nContent");
711 |       const result = await validateReadmeChecklist(
712 |         ValidateReadmeChecklistSchema.parse({
713 |           readmePath: readme,
714 |           outputFormat: "json",
715 |         }),
716 |       );
717 | 
718 |       const formatted = validator.formatReport(result, "json");
719 |       const parsed = JSON.parse(formatted);
720 | 
721 |       expect(parsed).toHaveProperty("overallScore");
722 |       expect(parsed).toHaveProperty("categories");
723 |       expect(parsed).toHaveProperty("recommendations");
724 |     });
725 |   });
726 | 
727 |   describe("Error Handling", () => {
728 |     it("should handle non-existent README file", async () => {
729 |       const nonExistentPath = path.join(tempDir, "nonexistent.md");
730 | 
731 |       await expect(
732 |         validateReadmeChecklist(
733 |           ValidateReadmeChecklistSchema.parse({ readmePath: nonExistentPath }),
734 |         ),
735 |       ).rejects.toThrow();
736 |     });
737 | 
738 |     it("should handle invalid project path gracefully", async () => {
739 |       const readme = await createTestReadme("# Test\n\nContent");
740 | 
741 |       const result = await validateReadmeChecklist(
742 |         ValidateReadmeChecklistSchema.parse({
743 |           readmePath: readme,
744 |           projectPath: "/invalid/path",
745 |         }),
746 |       );
747 | 
748 |       // Should still work, just without project file context
749 |       expect(result.overallScore).toBeGreaterThan(0);
750 |     });
751 | 
752 |     it("should handle empty README file", async () => {
753 |       const emptyReadme = await createTestReadme("", "empty-README.md");
754 | 
755 |       const result = await validateReadmeChecklist(
756 |         ValidateReadmeChecklistSchema.parse({ readmePath: emptyReadme }),
757 |       );
758 | 
759 |       // Empty README should pass length test (0 words <= 300) and external links test (no links to fail)
760 |       // but fail most other tests, resulting in a low overall score
761 |       expect(result.overallScore).toBeLessThan(20); // Very low score due to missing content
762 |       expect(result.passedItems).toBe(2); // Only length and external-links should pass
763 |       expect(result.failedItems).toBe(15); // Most checks should fail
764 |     });
765 |   });
766 | 
767 |   describe("Suggestions Generation", () => {
768 |     it("should provide specific suggestions for failed checks", async () => {
769 |       const incompleteReadme = await createTestReadme(
770 |         "# Project\n\nMinimal content",
771 |       );
772 | 
773 |       const result = await validateReadmeChecklist(
774 |         ValidateReadmeChecklistSchema.parse({ readmePath: incompleteReadme }),
775 |       );
776 | 
777 |       const failedChecks = Object.values(result.categories)
778 |         .flatMap((cat) => cat.results)
779 |         .filter((r) => !r.passed && r.suggestions);
780 | 
781 |       expect(failedChecks.length).toBeGreaterThan(0);
782 | 
783 |       for (const check of failedChecks) {
784 |         expect(check.suggestions).toBeDefined();
785 |         expect(check.suggestions!.length).toBeGreaterThan(0);
786 |       }
787 |     });
788 |   });
789 | });
790 | 
```
Page 20/33FirstPrevNextLast