This is page 11 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/tests/tools/deploy-pages-tracking.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for Phase 2.3: Deployment Outcome Tracking
3 | * Tests the enhanced deploy_pages tool with knowledge graph integration
4 | */
5 |
6 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
7 | import { promises as fs } from "fs";
8 | import { join } from "path";
9 | import { tmpdir } from "os";
10 | import {
11 | initializeKnowledgeGraph,
12 | getKnowledgeGraph,
13 | } from "../../src/memory/kg-integration.js";
14 | import { deployPages } from "../../src/tools/deploy-pages.js";
15 | import {
16 | getUserPreferenceManager,
17 | clearPreferenceManagerCache,
18 | } from "../../src/memory/user-preferences.js";
19 |
20 | describe("deployPages with Deployment Tracking (Phase 2.3)", () => {
21 | let testDir: string;
22 | let originalEnv: string | undefined;
23 |
24 | beforeEach(async () => {
25 | // Create temporary test directory
26 | testDir = join(tmpdir(), `deploy-pages-tracking-test-${Date.now()}`);
27 | await fs.mkdir(testDir, { recursive: true });
28 |
29 | // Set environment variable for storage
30 | originalEnv = process.env.DOCUMCP_STORAGE_DIR;
31 | process.env.DOCUMCP_STORAGE_DIR = testDir;
32 |
33 | // Initialize KG
34 | await initializeKnowledgeGraph(testDir);
35 |
36 | // Clear preference manager cache
37 | clearPreferenceManagerCache();
38 | });
39 |
40 | afterEach(async () => {
41 | // Restore environment
42 | if (originalEnv) {
43 | process.env.DOCUMCP_STORAGE_DIR = originalEnv;
44 | } else {
45 | delete process.env.DOCUMCP_STORAGE_DIR;
46 | }
47 |
48 | // Clean up test directory
49 | try {
50 | await fs.rm(testDir, { recursive: true, force: true });
51 | } catch (error) {
52 | console.warn("Failed to clean up test directory:", error);
53 | }
54 |
55 | // Clear preference manager cache
56 | clearPreferenceManagerCache();
57 | });
58 |
59 | describe("Deployment Tracking", () => {
60 | it("should track successful deployment setup in knowledge graph", async () => {
61 | const projectPath = testDir;
62 |
63 | const result = await deployPages({
64 | repository: projectPath,
65 | ssg: "docusaurus",
66 | projectPath,
67 | projectName: "Test Project",
68 | userId: "test-user-1",
69 | });
70 |
71 | const content = result.content[0];
72 | expect(content.type).toBe("text");
73 | const data = JSON.parse(content.text);
74 | expect(data.repository).toBeDefined();
75 | expect(data.ssg).toBe("docusaurus");
76 |
77 | // Verify deployment was tracked in knowledge graph
78 | const kg = await getKnowledgeGraph();
79 | const projects = await kg.findNodes({ type: "project" });
80 | expect(projects.length).toBeGreaterThan(0);
81 |
82 | // Find deployments
83 | const deployments = await kg.findEdges({
84 | properties: { baseType: "project_deployed_with" },
85 | });
86 | expect(deployments.length).toBeGreaterThan(0);
87 | expect(deployments[0].properties.success).toBe(true);
88 | });
89 |
90 | it("should track SSG usage in user preferences", async () => {
91 | const projectPath = testDir;
92 | const userId = "test-user-2";
93 |
94 | await deployPages({
95 | repository: projectPath,
96 | ssg: "mkdocs",
97 | projectPath,
98 | projectName: "Python Docs",
99 | userId,
100 | });
101 |
102 | // Check if user preferences were updated
103 | const manager = await getUserPreferenceManager(userId);
104 | const recommendations = await manager.getSSGRecommendations();
105 |
106 | expect(recommendations.length).toBeGreaterThan(0);
107 | expect(recommendations[0].ssg).toBe("mkdocs");
108 | expect(recommendations[0].reason).toContain("Used 1 time");
109 | });
110 |
111 | it("should track deployment with custom analysisId", async () => {
112 | const projectPath = testDir;
113 | const analysisId = "test_analysis_123";
114 |
115 | await deployPages({
116 | repository: projectPath,
117 | ssg: "hugo",
118 | projectPath,
119 | projectName: "Hugo Site",
120 | analysisId,
121 | userId: "test-user-3",
122 | });
123 |
124 | const kg = await getKnowledgeGraph();
125 | const projects = await kg.findNodes({ type: "project" });
126 |
127 | // At least one project should be created with tracking
128 | expect(projects.length).toBeGreaterThan(0);
129 |
130 | // Verify deployment was tracked
131 | const deployments = await kg.findEdges({
132 | properties: { baseType: "project_deployed_with" },
133 | });
134 | expect(deployments.length).toBeGreaterThan(0);
135 | });
136 |
137 | it("should track deployment for multiple users independently", async () => {
138 | const projectPath = testDir;
139 |
140 | await deployPages({
141 | repository: projectPath,
142 | ssg: "eleventy",
143 | projectPath,
144 | projectName: "User1 Site",
145 | userId: "user1",
146 | });
147 |
148 | await deployPages({
149 | repository: projectPath,
150 | ssg: "jekyll",
151 | projectPath,
152 | projectName: "User2 Site",
153 | userId: "user2",
154 | });
155 |
156 | // Check user1 preferences
157 | const manager1 = await getUserPreferenceManager("user1");
158 | const recs1 = await manager1.getSSGRecommendations();
159 | expect(recs1[0].ssg).toBe("eleventy");
160 |
161 | // Check user2 preferences
162 | const manager2 = await getUserPreferenceManager("user2");
163 | const recs2 = await manager2.getSSGRecommendations();
164 | expect(recs2[0].ssg).toBe("jekyll");
165 | });
166 | });
167 |
168 | describe("Deployment without Tracking", () => {
169 | it("should work without projectPath (no tracking)", async () => {
170 | const result = await deployPages({
171 | repository: testDir,
172 | ssg: "docusaurus",
173 | });
174 |
175 | const content = result.content[0];
176 | expect(content.type).toBe("text");
177 | const data = JSON.parse(content.text);
178 | expect(data.repository).toBeDefined();
179 | expect(data.ssg).toBe("docusaurus");
180 |
181 | // No projects should be created
182 | const kg = await getKnowledgeGraph();
183 | const projects = await kg.findNodes({ type: "project" });
184 | expect(projects.length).toBe(0);
185 | });
186 |
187 | it("should handle tracking errors gracefully", async () => {
188 | // Set invalid storage directory to trigger tracking error
189 | const invalidEnv = process.env.DOCUMCP_STORAGE_DIR;
190 | process.env.DOCUMCP_STORAGE_DIR = "/invalid/path/that/does/not/exist";
191 |
192 | const result = await deployPages({
193 | repository: testDir,
194 | ssg: "hugo",
195 | projectPath: testDir,
196 | projectName: "Test",
197 | });
198 |
199 | // Restore environment
200 | process.env.DOCUMCP_STORAGE_DIR = invalidEnv;
201 |
202 | const content = result.content[0];
203 | expect(content.type).toBe("text");
204 | const data = JSON.parse(content.text);
205 |
206 | // Deployment should still succeed even if tracking fails
207 | expect(data.repository).toBe(testDir);
208 | expect(data.ssg).toBe("hugo");
209 | });
210 | });
211 |
212 | describe("Custom Domain and Branches", () => {
213 | it("should track deployment with custom domain", async () => {
214 | const result = await deployPages({
215 | repository: testDir,
216 | ssg: "jekyll",
217 | customDomain: "docs.example.com",
218 | projectPath: testDir,
219 | projectName: "Custom Domain Site",
220 | });
221 |
222 | const content = result.content[0];
223 | const data = JSON.parse(content.text);
224 | expect(data.customDomain).toBe("docs.example.com");
225 | expect(data.cnameCreated).toBe(true);
226 | });
227 |
228 | it("should track deployment with custom branch", async () => {
229 | const result = await deployPages({
230 | repository: testDir,
231 | ssg: "mkdocs",
232 | branch: "docs",
233 | projectPath: testDir,
234 | projectName: "Custom Branch Site",
235 | });
236 |
237 | const content = result.content[0];
238 | const data = JSON.parse(content.text);
239 | expect(data.branch).toBe("docs");
240 | });
241 | });
242 |
243 | describe("Preference Learning", () => {
244 | it("should increase user preference for repeatedly used SSG", async () => {
245 | const userId = "test-user-repeat";
246 | const projectPath = testDir;
247 |
248 | // Deploy with Hugo 3 times
249 | for (let i = 0; i < 3; i++) {
250 | await deployPages({
251 | repository: projectPath,
252 | ssg: "hugo",
253 | projectPath: `${projectPath}/project${i}`,
254 | projectName: `Project ${i}`,
255 | userId,
256 | });
257 | }
258 |
259 | const manager = await getUserPreferenceManager(userId);
260 | const recommendations = await manager.getSSGRecommendations();
261 |
262 | expect(recommendations.length).toBeGreaterThan(0);
263 | expect(recommendations[0].ssg).toBe("hugo");
264 | expect(recommendations[0].reason).toContain("Used 3 time");
265 | expect(recommendations[0].score).toBeGreaterThan(0);
266 | });
267 |
268 | it("should track successful deployments with 100% success rate", async () => {
269 | const userId = "test-user-success";
270 |
271 | // Multiple successful deployments
272 | await deployPages({
273 | repository: testDir,
274 | ssg: "docusaurus",
275 | projectPath: `${testDir}/site1`,
276 | projectName: "Site 1",
277 | userId,
278 | });
279 |
280 | await deployPages({
281 | repository: testDir,
282 | ssg: "docusaurus",
283 | projectPath: `${testDir}/site2`,
284 | projectName: "Site 2",
285 | userId,
286 | });
287 |
288 | const manager = await getUserPreferenceManager(userId);
289 | const recommendations = await manager.getSSGRecommendations();
290 |
291 | expect(recommendations[0].ssg).toBe("docusaurus");
292 | expect(recommendations[0].reason).toContain("100% success rate");
293 | });
294 |
295 | test("should handle Eleventy SSG configuration", async () => {
296 | await fs.mkdir(join(testDir, "src"), { recursive: true });
297 | await fs.writeFile(join(testDir, ".eleventy.js"), "module.exports = {}");
298 | await fs.writeFile(join(testDir, "package.json"), '{"name": "test"}');
299 |
300 | const result = await deployPages({
301 | repository: testDir,
302 | ssg: "eleventy",
303 | projectPath: testDir,
304 | projectName: "Eleventy Test",
305 | userId: "test-user-eleventy",
306 | });
307 |
308 | const content = result.content[0];
309 | expect(content.type).toBe("text");
310 | const data = JSON.parse(content.text);
311 | expect(data.ssg).toBe("eleventy");
312 | expect(data.repository).toBeDefined();
313 | });
314 |
315 | test("should handle MkDocs SSG configuration", async () => {
316 | await fs.mkdir(join(testDir, "docs"), { recursive: true });
317 | await fs.writeFile(join(testDir, "mkdocs.yml"), "site_name: Test");
318 | await fs.writeFile(join(testDir, "docs", "index.md"), "# Test");
319 |
320 | const result = await deployPages({
321 | repository: testDir,
322 | ssg: "mkdocs",
323 | projectPath: testDir,
324 | projectName: "MkDocs Test",
325 | userId: "test-user-mkdocs",
326 | });
327 |
328 | const content = result.content[0];
329 | expect(content.type).toBe("text");
330 | const data = JSON.parse(content.text);
331 | expect(data.ssg).toBe("mkdocs");
332 | expect(data.repository).toBeDefined();
333 | });
334 |
335 | test("should handle Hugo SSG with custom config", async () => {
336 | await fs.mkdir(join(testDir, "content"), { recursive: true });
337 | await fs.writeFile(join(testDir, "config.toml"), 'baseURL = "/"');
338 | await fs.writeFile(join(testDir, "content", "test.md"), "# Test");
339 |
340 | const result = await deployPages({
341 | repository: testDir,
342 | ssg: "hugo",
343 | projectPath: testDir,
344 | projectName: "Hugo Test",
345 | userId: "test-user-hugo",
346 | });
347 |
348 | const content = result.content[0];
349 | expect(content.type).toBe("text");
350 | const data = JSON.parse(content.text);
351 | expect(data.ssg).toBe("hugo");
352 | expect(data.repository).toBeDefined();
353 | });
354 |
355 | test("should fallback gracefully when no config detected", async () => {
356 | const emptyDir = join(tmpdir(), "empty-" + Date.now());
357 | await fs.mkdir(emptyDir, { recursive: true });
358 |
359 | const result = await deployPages({
360 | repository: emptyDir,
361 | ssg: "jekyll",
362 | projectPath: emptyDir,
363 | projectName: "Empty Test",
364 | userId: "test-user-empty",
365 | });
366 |
367 | const content = result.content[0];
368 | expect(content.type).toBe("text");
369 | const data = JSON.parse(content.text);
370 | expect(data.ssg).toBe("jekyll");
371 | expect(data.repository).toBeDefined();
372 |
373 | await fs.rm(emptyDir, { recursive: true, force: true });
374 | });
375 |
376 | test("should detect docs:build script in package.json", async () => {
377 | await fs.writeFile(
378 | join(testDir, "package.json"),
379 | JSON.stringify({
380 | name: "test",
381 | scripts: { "docs:build": "docusaurus build" },
382 | }),
383 | );
384 |
385 | const result = await deployPages({
386 | repository: testDir,
387 | ssg: "docusaurus",
388 | projectPath: testDir,
389 | projectName: "Docs Build Test",
390 | userId: "test-user-docs-build",
391 | });
392 |
393 | const content = result.content[0];
394 | expect(content.type).toBe("text");
395 | const data = JSON.parse(content.text);
396 | expect(data.ssg).toBe("docusaurus");
397 | });
398 |
399 | test("should detect docusaurus in start script", async () => {
400 | await fs.writeFile(
401 | join(testDir, "package.json"),
402 | JSON.stringify({
403 | name: "test",
404 | scripts: { start: "docusaurus start" },
405 | }),
406 | );
407 |
408 | const result = await deployPages({
409 | repository: testDir,
410 | ssg: "docusaurus",
411 | projectPath: testDir,
412 | projectName: "Start Script Test",
413 | userId: "test-user-start-script",
414 | });
415 |
416 | const content = result.content[0];
417 | expect(content.type).toBe("text");
418 | expect(content.text).toBeDefined();
419 | });
420 |
421 | test("should detect yarn package manager", async () => {
422 | await fs.writeFile(join(testDir, "yarn.lock"), "# yarn lockfile");
423 | await fs.writeFile(
424 | join(testDir, "package.json"),
425 | JSON.stringify({
426 | name: "test",
427 | scripts: { build: "yarn build" },
428 | }),
429 | );
430 |
431 | const result = await deployPages({
432 | repository: testDir,
433 | ssg: "docusaurus",
434 | projectPath: testDir,
435 | projectName: "Yarn Test",
436 | userId: "test-user-yarn",
437 | });
438 |
439 | const content = result.content[0];
440 | expect(content.type).toBe("text");
441 | expect(content.text).toBeDefined();
442 | });
443 |
444 | test("should detect pnpm package manager", async () => {
445 | await fs.writeFile(
446 | join(testDir, "pnpm-lock.yaml"),
447 | "lockfileVersion: 5.4",
448 | );
449 | await fs.writeFile(
450 | join(testDir, "package.json"),
451 | JSON.stringify({
452 | name: "test",
453 | scripts: { build: "pnpm build" },
454 | }),
455 | );
456 |
457 | const result = await deployPages({
458 | repository: testDir,
459 | ssg: "docusaurus",
460 | projectPath: testDir,
461 | projectName: "Pnpm Test",
462 | userId: "test-user-pnpm",
463 | });
464 |
465 | const content = result.content[0];
466 | expect(content.type).toBe("text");
467 | expect(content.text).toBeDefined();
468 | });
469 |
470 | test("should detect Node version from engines field", async () => {
471 | await fs.writeFile(
472 | join(testDir, "package.json"),
473 | JSON.stringify({
474 | name: "test",
475 | engines: { node: ">=18.0.0" },
476 | }),
477 | );
478 |
479 | const result = await deployPages({
480 | repository: testDir,
481 | ssg: "docusaurus",
482 | projectPath: testDir,
483 | projectName: "Node Version Test",
484 | userId: "test-user-node-version",
485 | });
486 |
487 | const content = result.content[0];
488 | expect(content.type).toBe("text");
489 | expect(content.text).toBeDefined();
490 | });
491 |
492 | test("should retrieve SSG from knowledge graph when analysisId provided", async () => {
493 | // First deployment to populate knowledge graph
494 | const analysisId = "kg-test-analysis-" + Date.now();
495 |
496 | await deployPages({
497 | repository: testDir,
498 | ssg: "docusaurus",
499 | projectPath: testDir,
500 | projectName: "KG Test Project",
501 | userId: "test-user-kg",
502 | analysisId,
503 | });
504 |
505 | // Second deployment using same analysisId should query KG
506 | const result = await deployPages({
507 | repository: testDir,
508 | ssg: "docusaurus",
509 | projectPath: testDir,
510 | projectName: "KG Test Project Repeat",
511 | userId: "test-user-kg",
512 | analysisId,
513 | });
514 |
515 | const content = result.content[0];
516 | expect(content.type).toBe("text");
517 | const data = JSON.parse(content.text);
518 | expect(data.ssg).toBe("docusaurus");
519 | });
520 |
521 | test("should handle Jekyll SSG with custom config file", async () => {
522 | await fs.writeFile(
523 | join(testDir, "_config.yml"),
524 | "title: Test Site\ntheme: minima",
525 | );
526 |
527 | const result = await deployPages({
528 | repository: testDir,
529 | ssg: "jekyll",
530 | projectPath: testDir,
531 | projectName: "Jekyll Test",
532 | userId: "test-user-jekyll",
533 | });
534 |
535 | const content = result.content[0];
536 | expect(content.type).toBe("text");
537 | const data = JSON.parse(content.text);
538 | expect(data.ssg).toBe("jekyll");
539 | });
540 |
541 | test("should detect Python-based SSG from requirements.txt", async () => {
542 | await fs.mkdir(join(testDir, "docs"), { recursive: true });
543 | await fs.writeFile(join(testDir, "requirements.txt"), "mkdocs>=1.0");
544 | await fs.writeFile(join(testDir, "mkdocs.yml"), "site_name: Test");
545 |
546 | const result = await deployPages({
547 | repository: testDir,
548 | ssg: "mkdocs",
549 | projectPath: testDir,
550 | projectName: "Python SSG Test",
551 | userId: "test-user-python-ssg",
552 | });
553 |
554 | const content = result.content[0];
555 | expect(content.type).toBe("text");
556 | expect(content.text).toBeDefined();
557 | });
558 | });
559 | });
560 |
```
--------------------------------------------------------------------------------
/docs/phase-2-intelligence.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.959Z"
4 | last_validated: "2025-11-20T00:46:21.959Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # Phase 2: Intelligence & Learning System
10 |
11 | DocuMCP Phase 2 introduces a comprehensive intelligence and learning system that makes the MCP server continuously smarter with each deployment. The system learns from historical data, user preferences, and deployment outcomes to provide increasingly accurate recommendations and insights.
12 |
13 | ## Overview
14 |
15 | Phase 2 consists of four major components:
16 |
17 | 1. **Historical Deployment Intelligence** (Phase 2.1)
18 | 2. **User Preference Management** (Phase 2.2)
19 | 3. **Deployment Outcome Tracking** (Phase 2.3)
20 | 4. **Deployment Analytics & Insights** (Phase 2.4)
21 |
22 | Together, these components create a self-improving feedback loop where deployment outcomes continuously inform and improve future recommendations.
23 |
24 | ## Phase 2.1: Historical Deployment Intelligence
25 |
26 | ### Overview
27 |
28 | The `recommend_ssg` tool now integrates with the Knowledge Graph to access historical deployment data from similar projects, providing data-driven recommendations based on real success patterns.
29 |
30 | ### Key Features
31 |
32 | - **Similar Project Detection**: Finds projects with similar technologies and stack
33 | - **Success Rate Analysis**: Calculates SSG-specific success rates from historical deployments
34 | - **Intelligent Scoring**: Boosts confidence scores for SSGs with proven success rates
35 | - **Context-Aware Recommendations**: Considers both current project and historical patterns
36 |
37 | ### Usage Example
38 |
39 | ```typescript
40 | // Recommendation with historical data
41 | const result = await recommendSSG({
42 | repository: "/path/to/project",
43 | primaryLanguage: "typescript",
44 | frameworks: ["react"],
45 | hasTests: true,
46 | hasCI: true
47 | });
48 |
49 | // Response includes historical data
50 | {
51 | recommended: "docusaurus",
52 | confidence: 0.95,
53 | reasoning: [
54 | "docusaurus has 100% success rate in similar projects",
55 | "5 deployment(s) across 2 similar project(s)",
56 | "React framework detected - excellent match for Docusaurus"
57 | ],
58 | historicalData: {
59 | similarProjectCount: 2,
60 | successRates: {
61 | docusaurus: { rate: 1.0, deployments: 5, projects: 2 }
62 | },
63 | topPerformer: { ssg: "docusaurus", rate: 1.0, deployments: 5 }
64 | }
65 | }
66 | ```
67 |
68 | ### Intelligence Features
69 |
70 | 1. **Confidence Boosting**: SSGs with >90% success rate get +0.2 confidence boost
71 | 2. **Performance Switching**: Automatically switches to top performer if 20% better
72 | 3. **Alternative Suggestions**: Mentions high-performing alternatives in reasoning
73 | 4. **Statistical Context**: Includes deployment counts and project counts in recommendations
74 |
75 | ## Phase 2.2: User Preference Management
76 |
77 | ### Overview
78 |
79 | A comprehensive user preference system that personalizes recommendations based on individual user patterns and explicit preferences.
80 |
81 | ### User Preference Schema
82 |
83 | ```typescript
84 | interface UserPreferences {
85 | preferredSSGs: string[]; // Favorite SSGs
86 | documentationStyle: "minimal" | "comprehensive" | "tutorial-heavy";
87 | expertiseLevel: "beginner" | "intermediate" | "advanced";
88 | preferredTechnologies: string[]; // Favorite techs/frameworks
89 | preferredDiataxisCategories: (
90 | | "tutorials"
91 | | "how-to"
92 | | "reference"
93 | | "explanation"
94 | )[];
95 | autoApplyPreferences: boolean;
96 | }
97 | ```
98 |
99 | ### SSG Usage History
100 |
101 | The system automatically tracks SSG usage patterns:
102 |
103 | ```typescript
104 | interface SSGUsageHistory {
105 | ssg: string;
106 | usageCount: number;
107 | successCount: number;
108 | failureCount: number;
109 | successRate: number;
110 | lastUsed: string;
111 | projectTypes: string[];
112 | }
113 | ```
114 |
115 | ### Usage with manage_preferences Tool
116 |
117 | ```bash
118 | # Get current preferences
119 | manage_preferences({ action: "get", userId: "user123" })
120 |
121 | # Update preferences
122 | manage_preferences({
123 | action: "update",
124 | userId: "user123",
125 | preferences: {
126 | preferredSSGs: ["docusaurus", "hugo"],
127 | documentationStyle: "comprehensive",
128 | expertiseLevel: "intermediate",
129 | autoApplyPreferences: true
130 | }
131 | })
132 |
133 | # Get personalized SSG recommendations
134 | manage_preferences({
135 | action: "recommendations",
136 | userId: "user123"
137 | })
138 |
139 | # Export preferences (backup)
140 | manage_preferences({ action: "export", userId: "user123" })
141 |
142 | # Import preferences (restore)
143 | manage_preferences({
144 | action: "import",
145 | userId: "user123",
146 | json: "<exported-json-string>"
147 | })
148 |
149 | # Reset to defaults
150 | manage_preferences({ action: "reset", userId: "user123" })
151 | ```
152 |
153 | ### Preference Scoring Algorithm
154 |
155 | The system scores SSGs based on:
156 |
157 | 1. **Usage History** (40%): Frequency and success rate
158 | 2. **Explicit Preferences** (30%): User's preferred SSG list
159 | 3. **Project Compatibility** (30%): Match with project technologies
160 |
161 | ### Integration Points
162 |
163 | User preferences are automatically integrated into:
164 |
165 | - `recommend_ssg` - Personalized SSG recommendations
166 | - `populate_content` - Content style adaptation
167 | - `generate_config` - Configuration customization
168 |
169 | ## Phase 2.3: Deployment Outcome Tracking
170 |
171 | ### Overview
172 |
173 | The `deploy_pages` tool now tracks deployment outcomes in the Knowledge Graph, creating a feedback loop for continuous improvement.
174 |
175 | ### Enhanced deploy_pages Tool
176 |
177 | ```typescript
178 | // Deployment with tracking
179 | const result = await deployPages({
180 | repository: "/path/to/repo",
181 | ssg: "docusaurus",
182 | branch: "gh-pages",
183 |
184 | // New tracking parameters
185 | projectPath: "/path/to/repo",
186 | projectName: "My Awesome Project",
187 | analysisId: "analysis_123", // Link to analysis
188 | userId: "user123", // Link to user preferences
189 | });
190 | ```
191 |
192 | ### What Gets Tracked
193 |
194 | 1. **Project Metadata**
195 |
196 | - Project structure and languages
197 | - Technologies detected
198 | - CI/CD status
199 |
200 | 2. **Deployment Details**
201 |
202 | - SSG used
203 | - Success/failure status
204 | - Build time (milliseconds)
205 | - Error messages (if failed)
206 | - Timestamp
207 |
208 | 3. **User Association**
209 | - Links deployment to user
210 | - Updates user's SSG usage history
211 | - Feeds into preference learning
212 |
213 | ### Knowledge Graph Structure
214 |
215 | ```
216 | Project Node
217 | ├─→ [project_deployed_with] → Configuration Node (SSG)
218 | │ Properties: ssg, successRate, usageCount
219 | │
220 | └─→ [project_uses_technology] → Technology Nodes
221 | ```
222 |
223 | ### Deployment Edges
224 |
225 | Each deployment creates an edge with properties:
226 |
227 | ```typescript
228 | {
229 | type: "project_deployed_with",
230 | properties: {
231 | success: boolean,
232 | timestamp: string,
233 | buildTime?: number,
234 | errorMessage?: string
235 | }
236 | }
237 | ```
238 |
239 | ### Graceful Degradation
240 |
241 | Tracking failures don't affect deployment:
242 |
243 | - Deployment continues even if tracking fails
244 | - Warnings logged but not propagated
245 | - No impact on user workflow
246 |
247 | ## Phase 2.4: Deployment Analytics & Insights
248 |
249 | ### Overview
250 |
251 | Comprehensive analytics engine that identifies patterns, generates insights, and provides actionable recommendations based on deployment history.
252 |
253 | ### analyze_deployments Tool
254 |
255 | The tool supports 5 analysis types:
256 |
257 | #### 1. Full Report
258 |
259 | ```typescript
260 | analyzeDeployments({ analysisType: "full_report" });
261 | ```
262 |
263 | Returns comprehensive analytics:
264 |
265 | ```typescript
266 | {
267 | summary: {
268 | totalProjects: number,
269 | totalDeployments: number,
270 | overallSuccessRate: number,
271 | mostUsedSSG: string,
272 | mostSuccessfulSSG: string
273 | },
274 | patterns: DeploymentPattern[],
275 | insights: DeploymentInsight[],
276 | recommendations: string[]
277 | }
278 | ```
279 |
280 | #### 2. SSG Statistics
281 |
282 | ```typescript
283 | analyzeDeployments({
284 | analysisType: "ssg_stats",
285 | ssg: "docusaurus",
286 | });
287 | ```
288 |
289 | Returns detailed statistics for specific SSG:
290 |
291 | ```typescript
292 | {
293 | ssg: "docusaurus",
294 | totalDeployments: 15,
295 | successfulDeployments: 14,
296 | failedDeployments: 1,
297 | successRate: 0.93,
298 | averageBuildTime: 24500,
299 | commonTechnologies: ["typescript", "react"],
300 | projectCount: 8
301 | }
302 | ```
303 |
304 | #### 3. SSG Comparison
305 |
306 | ```typescript
307 | analyzeDeployments({
308 | analysisType: "compare",
309 | ssgs: ["docusaurus", "hugo", "mkdocs"],
310 | });
311 | ```
312 |
313 | Returns sorted comparison by success rate:
314 |
315 | ```typescript
316 | [
317 | { ssg: "hugo", pattern: { successRate: 1.0, ... } },
318 | { ssg: "docusaurus", pattern: { successRate: 0.93, ... } },
319 | { ssg: "mkdocs", pattern: { successRate: 0.75, ... } }
320 | ]
321 | ```
322 |
323 | #### 4. Health Score
324 |
325 | ```typescript
326 | analyzeDeployments({ analysisType: "health" });
327 | ```
328 |
329 | Returns 0-100 health score with factors:
330 |
331 | ```typescript
332 | {
333 | score: 78,
334 | factors: [
335 | {
336 | name: "Overall Success Rate",
337 | impact: 36,
338 | status: "good"
339 | },
340 | {
341 | name: "Active Projects",
342 | impact: 16,
343 | status: "good"
344 | },
345 | {
346 | name: "Deployment Activity",
347 | impact: 18,
348 | status: "good"
349 | },
350 | {
351 | name: "SSG Diversity",
352 | impact: 8,
353 | status: "warning"
354 | }
355 | ]
356 | }
357 | ```
358 |
359 | **Health Score Algorithm:**
360 |
361 | - Overall Success Rate: 40 points (0-40)
362 | - Active Projects: 20 points (0-20)
363 | - Deployment Activity: 20 points (0-20)
364 | - SSG Diversity: 20 points (0-20)
365 |
366 | **Status Thresholds:**
367 |
368 | - **Success Rate**: good >80%, warning >50%, critical ≤50%
369 | - **Projects**: good >5, warning >2, critical ≤2
370 | - **Deployments**: good >10, warning >5, critical ≤5
371 | - **Diversity**: good >3 SSGs, warning >1, critical ≤1
372 |
373 | #### 5. Trend Analysis
374 |
375 | ```typescript
376 | analyzeDeployments({
377 | analysisType: "trends",
378 | periodDays: 30, // Default: 30 days
379 | });
380 | ```
381 |
382 | Returns deployment trends over time:
383 |
384 | ```typescript
385 | [
386 | {
387 | period: "0 periods ago",
388 | deployments: 12,
389 | successRate: 0.92,
390 | topSSG: "docusaurus",
391 | },
392 | {
393 | period: "1 periods ago",
394 | deployments: 8,
395 | successRate: 0.88,
396 | topSSG: "hugo",
397 | },
398 | ];
399 | ```
400 |
401 | ### Insight Generation
402 |
403 | The analytics engine automatically generates insights:
404 |
405 | **Success Insights:**
406 |
407 | - High success rates (>80%)
408 | - Perfect track records (100% with ≥3 deployments)
409 | - Fast builds (<30s average)
410 |
411 | **Warning Insights:**
412 |
413 | - Low success rates (<50%)
414 | - Struggling SSGs (<50% success, ≥2 deployments)
415 | - Slow builds (>120s average)
416 |
417 | ### Smart Recommendations
418 |
419 | The system generates actionable recommendations:
420 |
421 | 1. **Best SSG Suggestion**: Recommends SSGs with >80% success rate
422 | 2. **Problem Identification**: Flags SSGs with <50% success and ≥3 failures
423 | 3. **Diversity Advice**: Suggests experimenting with different SSGs
424 | 4. **Activity Recommendations**: Encourages more deployments for better data
425 | 5. **Multi-Issue Alerts**: Warns when multiple deployment issues detected
426 |
427 | ### Usage Examples
428 |
429 | **Example 1: Get deployment overview**
430 |
431 | ```bash
432 | "Analyze my deployment history"
433 | # → Uses analyze_deployments with full_report
434 | ```
435 |
436 | **Example 2: Compare SSG performance**
437 |
438 | ```bash
439 | "Compare the success rates of Docusaurus and Hugo"
440 | # → Uses analyze_deployments with compare type
441 | ```
442 |
443 | **Example 3: Check deployment health**
444 |
445 | ```bash
446 | "What's the health score of my deployments?"
447 | # → Uses analyze_deployments with health type
448 | ```
449 |
450 | **Example 4: Identify trends**
451 |
452 | ```bash
453 | "Show me deployment trends over the last 60 days"
454 | # → Uses analyze_deployments with trends, periodDays: 60
455 | ```
456 |
457 | ## The Feedback Loop
458 |
459 | Phase 2 creates a continuous improvement cycle:
460 |
461 | ```
462 | 1. User deploys documentation (deploy_pages)
463 | ↓
464 | 2. Deployment outcome tracked (Phase 2.3)
465 | ↓
466 | 3. User preferences updated (Phase 2.2)
467 | ↓
468 | 4. Analytics identify patterns (Phase 2.4)
469 | ↓
470 | 5. Historical data enriched (Phase 2.1)
471 | ↓
472 | 6. Future recommendations improved
473 | ↓
474 | [Cycle continues with each deployment]
475 | ```
476 |
477 | ## Data Storage
478 |
479 | All Phase 2 data is stored in the Knowledge Graph:
480 |
481 | **Storage Location:**
482 |
483 | - Default: `~/.documcp/knowledge-graph.jsonl`
484 | - Custom: Set `DOCUMCP_STORAGE_DIR` environment variable
485 |
486 | **Data Format:**
487 |
488 | - JSONL (JSON Lines) format
489 | - One record per line
490 | - Efficient for append operations
491 | - Human-readable for debugging
492 |
493 | **Data Privacy:**
494 |
495 | - All data stored locally
496 | - No external transmission
497 | - User-specific via userId
498 | - Can be exported/imported
499 |
500 | ## Best Practices
501 |
502 | ### For Users
503 |
504 | 1. **Provide User ID**: Include `userId` in deploy_pages for personalized learning
505 | 2. **Link Deployments**: Use `analysisId` to connect analysis → deployment
506 | 3. **Review Analytics**: Periodically check `analyze_deployments` for insights
507 | 4. **Set Preferences**: Configure preferences early for better recommendations
508 | 5. **Track Projects**: Always provide `projectPath` and `projectName` for tracking
509 |
510 | ### For Developers
511 |
512 | 1. **Graceful Degradation**: Don't fail operations if tracking fails
513 | 2. **Efficient Queries**: Use Knowledge Graph indexes for performance
514 | 3. **Data Validation**: Validate all inputs before storage
515 | 4. **Privacy First**: Keep all data local, respect user boundaries
516 | 5. **Clear Errors**: Provide helpful error messages and resolutions
517 |
518 | ## Performance Considerations
519 |
520 | **Query Optimization:**
521 |
522 | - Knowledge Graph queries are O(n) where n = relevant nodes/edges
523 | - Use type filters to reduce search space
524 | - Cache frequently accessed data in UserPreferenceManager
525 |
526 | **Storage Growth:**
527 |
528 | - Each deployment adds ~2 nodes and 2 edges
529 | - JSONL format appends efficiently
530 | - Periodic pruning recommended for large datasets
531 |
532 | **Memory Usage:**
533 |
534 | - Knowledge Graph loaded into memory
535 | - Singleton pattern prevents multiple instances
536 | - UserPreferenceManager caches per user
537 |
538 | ## Future Enhancements
539 |
540 | Planned improvements for Phase 2:
541 |
542 | 1. **Machine Learning Integration**: Train models on deployment patterns
543 | 2. **Cross-User Insights**: Aggregate anonymous patterns (opt-in)
544 | 3. **Predictive Analytics**: Predict deployment success before execution
545 | 4. **Automated Optimization**: Auto-tune SSG configurations
546 | 5. **Advanced Visualizations**: Charts and graphs for analytics
547 | 6. **Export/Import**: Backup and restore full deployment history
548 | 7. **Multi-Tenancy**: Better isolation for team environments
549 |
550 | ## API Reference
551 |
552 | ### recommend_ssg (Enhanced)
553 |
554 | Now includes historical data integration.
555 |
556 | **Input:**
557 |
558 | ```typescript
559 | {
560 | repository: string,
561 | primaryLanguage?: string,
562 | frameworks?: string[],
563 | hasTests?: boolean,
564 | hasCI?: boolean,
565 | userId?: string // New: for preference integration
566 | }
567 | ```
568 |
569 | **Output:**
570 |
571 | ```typescript
572 | {
573 | recommended: string,
574 | confidence: number,
575 | reasoning: string[],
576 | historicalData?: {
577 | similarProjectCount: number,
578 | successRates: Record<string, { rate: number, deployments: number, projects: number }>,
579 | topPerformer?: { ssg: string, rate: number, deployments: number }
580 | },
581 | alternatives: Array<{ ssg: string, confidence: number }>
582 | }
583 | ```
584 |
585 | ### manage_preferences
586 |
587 | Manage user preferences and get personalized recommendations.
588 |
589 | **Actions:**
590 |
591 | - `get`: Retrieve current preferences
592 | - `update`: Update preferences
593 | - `reset`: Reset to defaults
594 | - `export`: Export as JSON
595 | - `import`: Import from JSON
596 | - `recommendations`: Get SSG recommendations based on preferences
597 |
598 | **Input:**
599 |
600 | ```typescript
601 | {
602 | action: "get" | "update" | "reset" | "export" | "import" | "recommendations",
603 | userId?: string, // Default: "default"
604 | preferences?: UserPreferences, // For update
605 | json?: string // For import
606 | }
607 | ```
608 |
609 | ### deploy_pages (Enhanced)
610 |
611 | Now tracks deployment outcomes.
612 |
613 | **New Parameters:**
614 |
615 | ```typescript
616 | {
617 | // Existing parameters
618 | repository: string,
619 | ssg: string,
620 | branch?: string,
621 | customDomain?: string,
622 |
623 | // New tracking parameters
624 | projectPath?: string, // Required for tracking
625 | projectName?: string, // Required for tracking
626 | analysisId?: string, // Link to analysis
627 | userId?: string // Default: "default"
628 | }
629 | ```
630 |
631 | ### analyze_deployments
632 |
633 | Analyze deployment patterns and generate insights.
634 |
635 | **Input:**
636 |
637 | ```typescript
638 | {
639 | analysisType?: "full_report" | "ssg_stats" | "compare" | "health" | "trends",
640 | ssg?: string, // Required for ssg_stats
641 | ssgs?: string[], // Required for compare (min 2)
642 | periodDays?: number // For trends (default: 30)
643 | }
644 | ```
645 |
646 | ## Testing
647 |
648 | Phase 2 includes comprehensive test coverage:
649 |
650 | - **Phase 2.1**: Historical integration tests (recommend-ssg-historical.test.ts)
651 | - **Phase 2.2**: User preference tests (manage-preferences.test.ts)
652 | - **Phase 2.3**: Deployment tracking tests (deploy-pages-tracking.test.ts)
653 | - **Phase 2.4**: Analytics tests (analyze-deployments.test.ts)
654 |
655 | **Run Phase 2 Tests:**
656 |
657 | ```bash
658 | npm test -- tests/tools/recommend-ssg-historical.test.ts
659 | npm test -- tests/tools/deploy-pages-tracking.test.ts
660 | npm test -- tests/tools/analyze-deployments.test.ts
661 | ```
662 |
663 | ## Troubleshooting
664 |
665 | ### Issue: Historical data not showing in recommendations
666 |
667 | **Solution:**
668 |
669 | - Ensure deployments are being tracked (check Knowledge Graph file)
670 | - Verify `projectPath` and `projectName` provided in deploy_pages
671 | - Check that similar projects exist in the graph
672 |
673 | ### Issue: User preferences not applying
674 |
675 | **Solution:**
676 |
677 | - Confirm `autoApplyPreferences: true` in preferences
678 | - Ensure `userId` matches between deploy_pages and manage_preferences
679 | - Verify preferences are saved (use `action: "get"`)
680 |
681 | ### Issue: Analytics showing no data
682 |
683 | **Solution:**
684 |
685 | - Check that deployments were tracked (look for project_deployed_with edges)
686 | - Verify Knowledge Graph file exists and is readable
687 | - Ensure DOCUMCP_STORAGE_DIR is set correctly
688 |
689 | ### Issue: Health score seems low
690 |
691 | **Solution:**
692 |
693 | - Review the 4 health factors individually
694 | - Check for failed deployments reducing success rate
695 | - Increase deployment activity for better scores
696 | - Try deploying with different SSGs for diversity
697 |
698 | ## Summary
699 |
700 | Phase 2 transforms DocuMCP from a stateless tool into an intelligent, learning system that continuously improves with use. By tracking deployments, learning user preferences, and analyzing patterns, DocuMCP provides increasingly accurate and personalized recommendations that help users make better documentation decisions.
701 |
702 | The self-improving feedback loop ensures that every deployment makes the system smarter, creating a virtuous cycle of continuous improvement that benefits all users.
703 |
```
--------------------------------------------------------------------------------
/src/tools/analyze-readme.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from "zod";
2 | import { promises as fs } from "fs";
3 | import path from "path";
4 | import { MCPToolResponse } from "../types/api.js";
5 |
6 | // Input validation schema
7 | const AnalyzeReadmeInputSchema = z.object({
8 | project_path: z.string().min(1, "Project path is required"),
9 | target_audience: z
10 | .enum([
11 | "community_contributors",
12 | "enterprise_users",
13 | "developers",
14 | "general",
15 | ])
16 | .optional()
17 | .default("community_contributors"),
18 | optimization_level: z
19 | .enum(["light", "moderate", "aggressive"])
20 | .optional()
21 | .default("moderate"),
22 | max_length_target: z.number().min(50).max(1000).optional().default(300),
23 | });
24 |
25 | export type AnalyzeReadmeInput = z.infer<typeof AnalyzeReadmeInputSchema>;
26 |
27 | interface ReadmeAnalysis {
28 | lengthAnalysis: {
29 | currentLines: number;
30 | currentWords: number;
31 | targetLines: number;
32 | exceedsTarget: boolean;
33 | reductionNeeded: number;
34 | };
35 | structureAnalysis: {
36 | scannabilityScore: number;
37 | headingHierarchy: HeadingInfo[];
38 | sectionLengths: SectionLength[];
39 | hasProperSpacing: boolean;
40 | };
41 | contentAnalysis: {
42 | hasTldr: boolean;
43 | hasQuickStart: boolean;
44 | hasPrerequisites: boolean;
45 | hasTroubleshooting: boolean;
46 | codeBlockCount: number;
47 | linkCount: number;
48 | };
49 | communityReadiness: {
50 | hasContributing: boolean;
51 | hasIssueTemplates: boolean;
52 | hasCodeOfConduct: boolean;
53 | hasSecurity: boolean;
54 | badgeCount: number;
55 | };
56 | optimizationOpportunities: OptimizationOpportunity[];
57 | overallScore: number;
58 | recommendations: string[];
59 | }
60 |
61 | interface HeadingInfo {
62 | level: number;
63 | text: string;
64 | line: number;
65 | sectionLength: number;
66 | }
67 |
68 | interface SectionLength {
69 | heading: string;
70 | lines: number;
71 | words: number;
72 | tooLong: boolean;
73 | }
74 |
75 | interface OptimizationOpportunity {
76 | type:
77 | | "length_reduction"
78 | | "structure_improvement"
79 | | "content_enhancement"
80 | | "community_health";
81 | priority: "high" | "medium" | "low";
82 | description: string;
83 | impact: string;
84 | effort: "low" | "medium" | "high";
85 | }
86 |
87 | /**
88 | * Analyzes README files for community health, accessibility, and onboarding effectiveness.
89 | *
90 | * Performs comprehensive README analysis including length assessment, structure evaluation,
91 | * content completeness, and community readiness scoring. Provides actionable recommendations
92 | * for improving README effectiveness and developer onboarding experience.
93 | *
94 | * @param input - The input parameters for README analysis
95 | * @param input.project_path - The file system path to the project containing the README
96 | * @param input.target_audience - The target audience for the README (default: "community_contributors")
97 | * @param input.optimization_level - The level of optimization to apply (default: "moderate")
98 | * @param input.max_length_target - Target maximum length in lines (default: 300)
99 | *
100 | * @returns Promise resolving to comprehensive README analysis results
101 | * @returns analysis - Complete analysis including length, structure, content, and community readiness
102 | * @returns nextSteps - Array of recommended next actions for README improvement
103 | *
104 | * @throws {Error} When project path is inaccessible or invalid
105 | * @throws {Error} When README file cannot be found or read
106 | * @throws {Error} When analysis processing fails
107 | *
108 | * @example
109 | * ```typescript
110 | * // Analyze README for community contributors
111 | * const result = await analyzeReadme({
112 | * project_path: "/path/to/project",
113 | * target_audience: "community_contributors",
114 | * optimization_level: "moderate"
115 | * });
116 | *
117 | * console.log(`README Score: ${result.data.analysis.overallScore}/100`);
118 | * console.log(`Recommendations: ${result.data.nextSteps.length} suggestions`);
119 | *
120 | * // Analyze for enterprise users with aggressive optimization
121 | * const enterprise = await analyzeReadme({
122 | * project_path: "/path/to/enterprise/project",
123 | * target_audience: "enterprise_users",
124 | * optimization_level: "aggressive",
125 | * max_length_target: 200
126 | * });
127 | * ```
128 | *
129 | * @since 1.0.0
130 | */
131 | export async function analyzeReadme(
132 | input: Partial<AnalyzeReadmeInput>,
133 | ): Promise<MCPToolResponse<{ analysis: ReadmeAnalysis; nextSteps: string[] }>> {
134 | const startTime = Date.now();
135 |
136 | try {
137 | // Validate input
138 | const validatedInput = AnalyzeReadmeInputSchema.parse(input);
139 | const {
140 | project_path,
141 | target_audience,
142 | optimization_level,
143 | max_length_target,
144 | } = validatedInput;
145 |
146 | // Find README file
147 | const readmePath = await findReadmeFile(project_path);
148 | if (!readmePath) {
149 | return {
150 | success: false,
151 | error: {
152 | code: "README_NOT_FOUND",
153 | message: "No README file found in the project directory",
154 | details:
155 | "Looked for README.md, README.txt, readme.md in project root",
156 | resolution: "Create a README.md file in the project root directory",
157 | },
158 | metadata: {
159 | toolVersion: "1.0.0",
160 | executionTime: Date.now() - startTime,
161 | timestamp: new Date().toISOString(),
162 | },
163 | };
164 | }
165 |
166 | // Read README content
167 | const readmeContent = await fs.readFile(readmePath, "utf-8");
168 |
169 | // Get project context
170 | const projectContext = await analyzeProjectContext(project_path);
171 |
172 | // Perform comprehensive analysis
173 | const lengthAnalysis = analyzeLengthMetrics(
174 | readmeContent,
175 | max_length_target,
176 | );
177 | const structureAnalysis = analyzeStructure(readmeContent);
178 | const contentAnalysis = analyzeContent(readmeContent);
179 | const communityReadiness = analyzeCommunityReadiness(
180 | readmeContent,
181 | projectContext,
182 | );
183 |
184 | // Generate optimization opportunities
185 | const optimizationOpportunities = generateOptimizationOpportunities(
186 | lengthAnalysis,
187 | structureAnalysis,
188 | contentAnalysis,
189 | communityReadiness,
190 | optimization_level,
191 | target_audience,
192 | );
193 |
194 | // Calculate overall score
195 | const overallScore = calculateOverallScore(
196 | lengthAnalysis,
197 | structureAnalysis,
198 | contentAnalysis,
199 | communityReadiness,
200 | );
201 |
202 | // Generate recommendations
203 | const recommendations = generateRecommendations(
204 | optimizationOpportunities,
205 | target_audience,
206 | optimization_level,
207 | );
208 |
209 | const analysis: ReadmeAnalysis = {
210 | lengthAnalysis,
211 | structureAnalysis,
212 | contentAnalysis,
213 | communityReadiness,
214 | optimizationOpportunities,
215 | overallScore,
216 | recommendations,
217 | };
218 |
219 | const nextSteps = generateNextSteps(analysis, optimization_level);
220 |
221 | return {
222 | success: true,
223 | data: {
224 | analysis,
225 | nextSteps,
226 | },
227 | metadata: {
228 | toolVersion: "1.0.0",
229 | executionTime: Date.now() - startTime,
230 | timestamp: new Date().toISOString(),
231 | analysisId: `readme-analysis-${Date.now()}`,
232 | },
233 | };
234 | } catch (error) {
235 | return {
236 | success: false,
237 | error: {
238 | code: "ANALYSIS_FAILED",
239 | message: "Failed to analyze README",
240 | details: error instanceof Error ? error.message : "Unknown error",
241 | resolution: "Check project path and README file accessibility",
242 | },
243 | metadata: {
244 | toolVersion: "1.0.0",
245 | executionTime: Date.now() - startTime,
246 | timestamp: new Date().toISOString(),
247 | },
248 | };
249 | }
250 | }
251 |
252 | async function findReadmeFile(projectPath: string): Promise<string | null> {
253 | const possibleNames = [
254 | "README.md",
255 | "README.txt",
256 | "readme.md",
257 | "Readme.md",
258 | "README",
259 | ];
260 |
261 | for (const name of possibleNames) {
262 | const filePath = path.join(projectPath, name);
263 | try {
264 | await fs.access(filePath);
265 | return filePath;
266 | } catch {
267 | continue;
268 | }
269 | }
270 |
271 | return null;
272 | }
273 |
274 | async function analyzeProjectContext(projectPath: string): Promise<any> {
275 | try {
276 | const files = await fs.readdir(projectPath);
277 | return {
278 | hasPackageJson: files.includes("package.json"),
279 | hasContributing: files.includes("CONTRIBUTING.md"),
280 | hasCodeOfConduct: files.includes("CODE_OF_CONDUCT.md"),
281 | hasSecurity: files.includes("SECURITY.md"),
282 | hasGithubDir: files.includes(".github"),
283 | hasDocsDir: files.includes("docs"),
284 | projectType: detectProjectType(files),
285 | };
286 | } catch {
287 | return {};
288 | }
289 | }
290 |
291 | function detectProjectType(files: string[]): string {
292 | if (files.includes("package.json")) return "javascript";
293 | if (files.includes("requirements.txt") || files.includes("setup.py"))
294 | return "python";
295 | if (files.includes("Cargo.toml")) return "rust";
296 | if (files.includes("go.mod")) return "go";
297 | if (files.includes("pom.xml") || files.includes("build.gradle"))
298 | return "java";
299 | return "unknown";
300 | }
301 |
302 | function analyzeLengthMetrics(content: string, targetLines: number) {
303 | const lines = content.split("\n");
304 | const words = content.split(/\s+/).length;
305 | const currentLines = lines.length;
306 |
307 | return {
308 | currentLines,
309 | currentWords: words,
310 | targetLines,
311 | exceedsTarget: currentLines > targetLines,
312 | reductionNeeded: Math.max(0, currentLines - targetLines),
313 | };
314 | }
315 |
316 | function analyzeStructure(content: string) {
317 | const lines = content.split("\n");
318 | const headings = extractHeadings(lines);
319 | const sectionLengths = calculateSectionLengths(lines, headings);
320 |
321 | // Calculate scannability score
322 | const hasGoodSpacing = /\n\s*\n/.test(content);
323 | const hasLists = /^\s*[-*+]\s+/m.test(content);
324 | const hasCodeBlocks = /```/.test(content);
325 | const properHeadingHierarchy = checkHeadingHierarchy(headings);
326 |
327 | const scannabilityScore = Math.round(
328 | (hasGoodSpacing ? 25 : 0) +
329 | (hasLists ? 25 : 0) +
330 | (hasCodeBlocks ? 25 : 0) +
331 | (properHeadingHierarchy ? 25 : 0),
332 | );
333 |
334 | return {
335 | scannabilityScore,
336 | headingHierarchy: headings,
337 | sectionLengths,
338 | hasProperSpacing: hasGoodSpacing,
339 | };
340 | }
341 |
342 | function extractHeadings(lines: string[]): HeadingInfo[] {
343 | const headings: HeadingInfo[] = [];
344 |
345 | lines.forEach((line, index) => {
346 | const match = line.match(/^(#{1,6})\s+(.+)$/);
347 | if (match) {
348 | headings.push({
349 | level: match[1].length,
350 | text: match[2].trim(),
351 | line: index + 1,
352 | sectionLength: 0, // Will be calculated later
353 | });
354 | }
355 | });
356 |
357 | return headings;
358 | }
359 |
360 | function calculateSectionLengths(
361 | lines: string[],
362 | headings: HeadingInfo[],
363 | ): SectionLength[] {
364 | const sections: SectionLength[] = [];
365 |
366 | headings.forEach((heading, index) => {
367 | const startLine = heading.line - 1;
368 | const endLine =
369 | index < headings.length - 1 ? headings[index + 1].line - 1 : lines.length;
370 |
371 | const sectionLines = lines.slice(startLine, endLine);
372 | const sectionText = sectionLines.join("\n");
373 | const wordCount = sectionText.split(/\s+/).length;
374 |
375 | sections.push({
376 | heading: heading.text,
377 | lines: sectionLines.length,
378 | words: wordCount,
379 | tooLong: sectionLines.length > 50 || wordCount > 500,
380 | });
381 | });
382 |
383 | return sections;
384 | }
385 |
386 | function checkHeadingHierarchy(headings: HeadingInfo[]): boolean {
387 | if (headings.length === 0) return false;
388 |
389 | // Check if starts with H1
390 | if (headings[0].level !== 1) return false;
391 |
392 | // Check for logical hierarchy
393 | for (let i = 1; i < headings.length; i++) {
394 | const levelDiff = headings[i].level - headings[i - 1].level;
395 | if (levelDiff > 1) return false; // Skipping levels
396 | }
397 |
398 | return true;
399 | }
400 |
401 | function analyzeContent(content: string) {
402 | return {
403 | hasTldr: content.includes("## TL;DR") || content.includes("# TL;DR"),
404 | hasQuickStart: /quick start|getting started|installation/i.test(content),
405 | hasPrerequisites: /prerequisite|requirement|dependencies/i.test(content),
406 | hasTroubleshooting: /troubleshoot|faq|common issues|problems/i.test(
407 | content,
408 | ),
409 | codeBlockCount: (content.match(/```/g) || []).length / 2,
410 | linkCount: (content.match(/\[.*?\]\(.*?\)/g) || []).length,
411 | };
412 | }
413 |
414 | function analyzeCommunityReadiness(content: string, projectContext: any) {
415 | return {
416 | hasContributing:
417 | /contributing|contribute/i.test(content) ||
418 | projectContext.hasContributing,
419 | hasIssueTemplates:
420 | /issue template|bug report/i.test(content) || projectContext.hasGithubDir,
421 | hasCodeOfConduct:
422 | /code of conduct/i.test(content) || projectContext.hasCodeOfConduct,
423 | hasSecurity: /security/i.test(content) || projectContext.hasSecurity,
424 | badgeCount: (content.match(/\[!\[.*?\]\(.*?\)\]\(.*?\)/g) || []).length,
425 | };
426 | }
427 |
428 | function generateOptimizationOpportunities(
429 | lengthAnalysis: any,
430 | structureAnalysis: any,
431 | contentAnalysis: any,
432 | communityReadiness: any,
433 | optimizationLevel: string,
434 | targetAudience: string,
435 | ): OptimizationOpportunity[] {
436 | const opportunities: OptimizationOpportunity[] = [];
437 |
438 | // Length reduction opportunities
439 | if (lengthAnalysis.exceedsTarget) {
440 | opportunities.push({
441 | type: "length_reduction",
442 | priority: "high",
443 | description: `README is ${lengthAnalysis.reductionNeeded} lines over target (${lengthAnalysis.currentLines}/${lengthAnalysis.targetLines})`,
444 | impact: "Improves scannability and reduces cognitive load for new users",
445 | effort: lengthAnalysis.reductionNeeded > 100 ? "high" : "medium",
446 | });
447 | }
448 |
449 | // Structure improvements
450 | if (structureAnalysis.scannabilityScore < 75) {
451 | opportunities.push({
452 | type: "structure_improvement",
453 | priority: "high",
454 | description: `Low scannability score (${structureAnalysis.scannabilityScore}/100)`,
455 | impact: "Makes README easier to navigate and understand quickly",
456 | effort: "medium",
457 | });
458 | }
459 |
460 | // Content enhancements
461 | if (!contentAnalysis.hasTldr) {
462 | opportunities.push({
463 | type: "content_enhancement",
464 | priority: "high",
465 | description: "Missing TL;DR section for quick project overview",
466 | impact: "Helps users quickly understand project value proposition",
467 | effort: "low",
468 | });
469 | }
470 |
471 | if (!contentAnalysis.hasQuickStart) {
472 | opportunities.push({
473 | type: "content_enhancement",
474 | priority: "medium",
475 | description: "Missing quick start section",
476 | impact: "Reduces time to first success for new users",
477 | effort: "medium",
478 | });
479 | }
480 |
481 | // Community health
482 | if (
483 | !communityReadiness.hasContributing &&
484 | targetAudience === "community_contributors"
485 | ) {
486 | opportunities.push({
487 | type: "community_health",
488 | priority: "medium",
489 | description: "Missing contributing guidelines",
490 | impact: "Encourages community participation and sets expectations",
491 | effort: "medium",
492 | });
493 | }
494 |
495 | return opportunities.sort((a, b) => {
496 | const priorityOrder = { high: 3, medium: 2, low: 1 };
497 | return priorityOrder[b.priority] - priorityOrder[a.priority];
498 | });
499 | }
500 |
501 | function calculateOverallScore(
502 | lengthAnalysis: any,
503 | structureAnalysis: any,
504 | contentAnalysis: any,
505 | communityReadiness: any,
506 | ): number {
507 | let score = 0;
508 |
509 | // Length score (25 points)
510 | score += lengthAnalysis.exceedsTarget
511 | ? Math.max(0, 25 - lengthAnalysis.reductionNeeded / 10)
512 | : 25;
513 |
514 | // Structure score (25 points)
515 | score += (structureAnalysis.scannabilityScore / 100) * 25;
516 |
517 | // Content score (25 points)
518 | const contentScore =
519 | (contentAnalysis.hasTldr ? 8 : 0) +
520 | (contentAnalysis.hasQuickStart ? 8 : 0) +
521 | (contentAnalysis.hasPrerequisites ? 5 : 0) +
522 | (contentAnalysis.codeBlockCount > 0 ? 4 : 0);
523 | score += Math.min(25, contentScore);
524 |
525 | // Community score (25 points)
526 | const communityScore =
527 | (communityReadiness.hasContributing ? 8 : 0) +
528 | (communityReadiness.hasCodeOfConduct ? 5 : 0) +
529 | (communityReadiness.hasSecurity ? 5 : 0) +
530 | (communityReadiness.badgeCount > 0 ? 4 : 0) +
531 | (communityReadiness.hasIssueTemplates ? 3 : 0);
532 | score += Math.min(25, communityScore);
533 |
534 | return Math.round(score);
535 | }
536 |
537 | function generateRecommendations(
538 | opportunities: OptimizationOpportunity[],
539 | targetAudience: string,
540 | optimizationLevel: string,
541 | ): string[] {
542 | const recommendations: string[] = [];
543 |
544 | // High priority opportunities first
545 | const highPriority = opportunities.filter((op) => op.priority === "high");
546 | highPriority.forEach((op) => {
547 | recommendations.push(`🚨 ${op.description} - ${op.impact}`);
548 | });
549 |
550 | // Audience-specific recommendations
551 | if (targetAudience === "community_contributors") {
552 | recommendations.push(
553 | "👥 Focus on community onboarding: clear contributing guidelines and issue templates",
554 | );
555 | } else if (targetAudience === "enterprise_users") {
556 | recommendations.push(
557 | "🏢 Emphasize security, compliance, and support channels",
558 | );
559 | }
560 |
561 | // Optimization level specific
562 | if (optimizationLevel === "aggressive") {
563 | recommendations.push(
564 | "⚡ Consider moving detailed documentation to separate files (docs/ directory)",
565 | );
566 | recommendations.push(
567 | "📝 Use progressive disclosure: expandable sections for advanced topics",
568 | );
569 | }
570 |
571 | return recommendations.slice(0, 8); // Limit to top 8 recommendations
572 | }
573 |
574 | function generateNextSteps(
575 | analysis: ReadmeAnalysis,
576 | optimizationLevel: string,
577 | ): string[] {
578 | const steps: string[] = [];
579 |
580 | if (analysis.overallScore < 60) {
581 | steps.push("🎯 Priority: Address critical issues first (score < 60)");
582 | }
583 |
584 | // Add specific next steps based on opportunities
585 | const highPriorityOps = analysis.optimizationOpportunities
586 | .filter((op) => op.priority === "high")
587 | .slice(0, 3);
588 |
589 | highPriorityOps.forEach((op) => {
590 | steps.push(`• ${op.description}`);
591 | });
592 |
593 | if (optimizationLevel !== "light") {
594 | steps.push(
595 | "📊 Run optimize_readme tool to get specific restructuring suggestions",
596 | );
597 | }
598 |
599 | steps.push("🔄 Re-analyze after changes to track improvement");
600 |
601 | return steps;
602 | }
603 |
```
--------------------------------------------------------------------------------
/tests/tools/generate-contextual-content.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Contextual Content Generator Tests (Phase 3)
3 | */
4 |
5 | import { handleGenerateContextualContent } from "../../src/tools/generate-contextual-content.js";
6 | import { promises as fs } from "fs";
7 | import { tmpdir } from "os";
8 | import { join } from "path";
9 | import { mkdtemp, rm } from "fs/promises";
10 |
11 | describe("generate_contextual_content tool", () => {
12 | let tempDir: string;
13 |
14 | beforeEach(async () => {
15 | tempDir = await mkdtemp(join(tmpdir(), "content-gen-test-"));
16 | });
17 |
18 | afterEach(async () => {
19 | await rm(tempDir, { recursive: true, force: true });
20 | });
21 |
22 | describe("Reference Documentation", () => {
23 | test("should generate function reference documentation", async () => {
24 | const sourceCode = `
25 | /**
26 | * Calculates the sum of two numbers
27 | * @param a First number
28 | * @param b Second number
29 | * @returns The sum of a and b
30 | */
31 | export function add(a: number, b: number): number {
32 | return a + b;
33 | }
34 |
35 | /**
36 | * Multiplies two numbers
37 | */
38 | export async function multiply(x: number, y: number): Promise<number> {
39 | return x * y;
40 | }
41 | `.trim();
42 |
43 | const filePath = join(tempDir, "math.ts");
44 | await fs.writeFile(filePath, sourceCode);
45 |
46 | const result = await handleGenerateContextualContent({
47 | filePath,
48 | documentationType: "reference",
49 | includeExamples: true,
50 | style: "detailed",
51 | outputFormat: "markdown",
52 | });
53 |
54 | expect(result).toBeDefined();
55 | expect(result.content).toBeDefined();
56 |
57 | const data = JSON.parse(result.content[0].text);
58 | expect(data.success).toBe(true);
59 | expect(data.data.sections).toBeDefined();
60 |
61 | const sections = data.data.sections;
62 | const functionRef = sections.find((s: any) =>
63 | s.title.includes("Function Reference"),
64 | );
65 |
66 | expect(functionRef).toBeDefined();
67 | expect(functionRef.content).toContain("add");
68 | expect(functionRef.content).toContain("multiply");
69 | expect(functionRef.category).toBe("reference");
70 | });
71 |
72 | test("should generate class reference documentation", async () => {
73 | const sourceCode = `
74 | /**
75 | * Calculator class for math operations
76 | */
77 | export class Calculator {
78 | private value: number = 0;
79 |
80 | /**
81 | * Adds a number to the current value
82 | */
83 | public add(n: number): void {
84 | this.value += n;
85 | }
86 |
87 | /**
88 | * Gets the current value
89 | */
90 | public getValue(): number {
91 | return this.value;
92 | }
93 | }
94 | `.trim();
95 |
96 | const filePath = join(tempDir, "calculator.ts");
97 | await fs.writeFile(filePath, sourceCode);
98 |
99 | const result = await handleGenerateContextualContent({
100 | filePath,
101 | documentationType: "reference",
102 | style: "detailed",
103 | });
104 |
105 | const data = JSON.parse(result.content[0].text);
106 | expect(data.success).toBe(true);
107 |
108 | const sections = data.data.sections;
109 | const classRef = sections.find((s: any) =>
110 | s.title.includes("Class Reference"),
111 | );
112 |
113 | expect(classRef).toBeDefined();
114 | expect(classRef.content).toContain("Calculator");
115 | expect(classRef.content).toContain("add");
116 | expect(classRef.content).toContain("getValue");
117 | });
118 |
119 | test("should generate interface reference documentation", async () => {
120 | const sourceCode = `
121 | /**
122 | * User interface
123 | */
124 | export interface User {
125 | id: string;
126 | name: string;
127 | email: string;
128 | isActive: boolean;
129 | getProfile(): Promise<Profile>;
130 | }
131 |
132 | export interface Profile {
133 | bio: string;
134 | avatar: string;
135 | }
136 | `.trim();
137 |
138 | const filePath = join(tempDir, "user.ts");
139 | await fs.writeFile(filePath, sourceCode);
140 |
141 | const result = await handleGenerateContextualContent({
142 | filePath,
143 | documentationType: "reference",
144 | style: "detailed",
145 | });
146 |
147 | const data = JSON.parse(result.content[0].text);
148 | expect(data.success).toBe(true);
149 |
150 | const sections = data.data.sections;
151 | const interfaceRef = sections.find((s: any) =>
152 | s.title.includes("Interface Reference"),
153 | );
154 |
155 | expect(interfaceRef).toBeDefined();
156 | expect(interfaceRef.content).toContain("User");
157 | expect(interfaceRef.content).toContain("Profile");
158 | });
159 | });
160 |
161 | describe("Tutorial Documentation", () => {
162 | test("should generate getting started tutorial", async () => {
163 | const sourceCode = `
164 | export function initialize(config: object): void {
165 | console.log("Initialized with", config);
166 | }
167 |
168 | export function process(data: string): string {
169 | return data.toUpperCase();
170 | }
171 | `.trim();
172 |
173 | const filePath = join(tempDir, "api.ts");
174 | await fs.writeFile(filePath, sourceCode);
175 |
176 | const result = await handleGenerateContextualContent({
177 | filePath,
178 | documentationType: "tutorial",
179 | includeExamples: true,
180 | style: "detailed",
181 | });
182 |
183 | const data = JSON.parse(result.content[0].text);
184 | expect(data.success).toBe(true);
185 |
186 | const sections = data.data.sections;
187 | const tutorial = sections.find((s: any) =>
188 | s.title.includes("Getting Started"),
189 | );
190 |
191 | expect(tutorial).toBeDefined();
192 | expect(tutorial.category).toBe("tutorial");
193 | expect(tutorial.content).toContain("Installation");
194 | expect(tutorial.content).toContain("Usage");
195 | });
196 |
197 | test("should include code examples in tutorials", async () => {
198 | const sourceCode = `
199 | export function setupDatabase(connectionString: string): void {
200 | // Setup code
201 | }
202 | `.trim();
203 |
204 | const filePath = join(tempDir, "db.ts");
205 | await fs.writeFile(filePath, sourceCode);
206 |
207 | const result = await handleGenerateContextualContent({
208 | filePath,
209 | documentationType: "tutorial",
210 | includeExamples: true,
211 | });
212 |
213 | const data = JSON.parse(result.content[0].text);
214 | const tutorial = data.data.sections[0];
215 |
216 | expect(tutorial.content).toContain("```");
217 | expect(tutorial.content).toContain("setupDatabase");
218 | });
219 | });
220 |
221 | describe("How-To Documentation", () => {
222 | test("should generate async operations how-to", async () => {
223 | const sourceCode = `
224 | export async function fetchData(url: string): Promise<any> {
225 | const response = await fetch(url);
226 | return response.json();
227 | }
228 |
229 | export async function saveData(data: any): Promise<void> {
230 | // Save logic
231 | }
232 | `.trim();
233 |
234 | const filePath = join(tempDir, "async.ts");
235 | await fs.writeFile(filePath, sourceCode);
236 |
237 | const result = await handleGenerateContextualContent({
238 | filePath,
239 | documentationType: "how-to",
240 | includeExamples: true,
241 | });
242 |
243 | const data = JSON.parse(result.content[0].text);
244 | expect(data.success).toBe(true);
245 |
246 | const sections = data.data.sections;
247 | const asyncHowTo = sections.find((s: any) => s.title.includes("Async"));
248 |
249 | expect(asyncHowTo).toBeDefined();
250 | expect(asyncHowTo.category).toBe("how-to");
251 | expect(asyncHowTo.content).toContain("async");
252 | });
253 |
254 | test("should generate class usage how-to", async () => {
255 | const sourceCode = `
256 | export class DataProcessor {
257 | public process(input: string): string {
258 | return input.trim();
259 | }
260 |
261 | public async asyncProcess(input: string): Promise<string> {
262 | return this.process(input);
263 | }
264 | }
265 | `.trim();
266 |
267 | const filePath = join(tempDir, "processor.ts");
268 | await fs.writeFile(filePath, sourceCode);
269 |
270 | const result = await handleGenerateContextualContent({
271 | filePath,
272 | documentationType: "how-to",
273 | includeExamples: true,
274 | });
275 |
276 | const data = JSON.parse(result.content[0].text);
277 | const sections = data.data.sections;
278 | const classHowTo = sections.find((s: any) => s.title.includes("Class"));
279 |
280 | expect(classHowTo).toBeDefined();
281 | expect(classHowTo.content).toContain("DataProcessor");
282 | });
283 | });
284 |
285 | describe("Explanation Documentation", () => {
286 | test("should generate architecture explanation", async () => {
287 | const sourceCode = `
288 | export class ComplexSystem {
289 | private state: any = {};
290 |
291 | public initialize(): void {}
292 | public update(): void {}
293 | public render(): void {}
294 | }
295 |
296 | export function createSystem(): ComplexSystem {
297 | return new ComplexSystem();
298 | }
299 | `.trim();
300 |
301 | const filePath = join(tempDir, "system.ts");
302 | await fs.writeFile(filePath, sourceCode);
303 |
304 | const result = await handleGenerateContextualContent({
305 | filePath,
306 | documentationType: "explanation",
307 | style: "detailed",
308 | });
309 |
310 | const data = JSON.parse(result.content[0].text);
311 | expect(data.success).toBe(true);
312 |
313 | const sections = data.data.sections;
314 | const explanation = sections.find((s: any) =>
315 | s.title.includes("Architecture"),
316 | );
317 |
318 | expect(explanation).toBeDefined();
319 | expect(explanation.category).toBe("explanation");
320 | });
321 | });
322 |
323 | describe("All Documentation Types", () => {
324 | test("should generate all Diataxis categories", async () => {
325 | const sourceCode = `
326 | export async function apiFunction(param: string): Promise<void> {
327 | console.log(param);
328 | }
329 | `.trim();
330 |
331 | const filePath = join(tempDir, "complete.ts");
332 | await fs.writeFile(filePath, sourceCode);
333 |
334 | const result = await handleGenerateContextualContent({
335 | filePath,
336 | documentationType: "all",
337 | includeExamples: true,
338 | });
339 |
340 | const data = JSON.parse(result.content[0].text);
341 | expect(data.success).toBe(true);
342 |
343 | const sections = data.data.sections;
344 | expect(sections.length).toBeGreaterThan(1);
345 |
346 | const categories = new Set(sections.map((s: any) => s.category));
347 | expect(categories.size).toBeGreaterThan(1);
348 | });
349 | });
350 |
351 | describe("Output Formats", () => {
352 | test("should generate markdown format", async () => {
353 | const sourceCode = `export function test(): void {}`;
354 | const filePath = join(tempDir, "markdown.ts");
355 | await fs.writeFile(filePath, sourceCode);
356 |
357 | const result = await handleGenerateContextualContent({
358 | filePath,
359 | documentationType: "reference",
360 | outputFormat: "markdown",
361 | });
362 |
363 | const data = JSON.parse(result.content[0].text);
364 | expect(data.success).toBe(true);
365 | expect(data.data.sections[0].content).toContain("#");
366 | });
367 |
368 | test("should support different output formats", async () => {
369 | const sourceCode = `export function test(): void {}`;
370 | const filePath = join(tempDir, "formats.ts");
371 | await fs.writeFile(filePath, sourceCode);
372 |
373 | const formats = ["markdown", "mdx", "html"];
374 |
375 | for (const format of formats) {
376 | const result = await handleGenerateContextualContent({
377 | filePath,
378 | documentationType: "reference",
379 | outputFormat: format as any,
380 | });
381 |
382 | const data = JSON.parse(result.content[0].text);
383 | expect(data.success).toBe(true);
384 | }
385 | });
386 | });
387 |
388 | describe("Documentation Styles", () => {
389 | test("should generate concise documentation", async () => {
390 | const sourceCode = `
391 | export function shortDoc(a: number, b: number): number {
392 | return a + b;
393 | }
394 | `.trim();
395 |
396 | const filePath = join(tempDir, "concise.ts");
397 | await fs.writeFile(filePath, sourceCode);
398 |
399 | const result = await handleGenerateContextualContent({
400 | filePath,
401 | documentationType: "reference",
402 | style: "concise",
403 | });
404 |
405 | const data = JSON.parse(result.content[0].text);
406 | expect(data.success).toBe(true);
407 | });
408 |
409 | test("should generate detailed documentation", async () => {
410 | const sourceCode = `
411 | export function detailedDoc(param: string): void {
412 | console.log(param);
413 | }
414 | `.trim();
415 |
416 | const filePath = join(tempDir, "detailed.ts");
417 | await fs.writeFile(filePath, sourceCode);
418 |
419 | const result = await handleGenerateContextualContent({
420 | filePath,
421 | documentationType: "reference",
422 | style: "detailed",
423 | });
424 |
425 | const data = JSON.parse(result.content[0].text);
426 | expect(data.success).toBe(true);
427 | });
428 |
429 | test("should generate verbose documentation", async () => {
430 | const sourceCode = `
431 | export function verboseDoc(): void {}
432 | `.trim();
433 |
434 | const filePath = join(tempDir, "verbose.ts");
435 | await fs.writeFile(filePath, sourceCode);
436 |
437 | const result = await handleGenerateContextualContent({
438 | filePath,
439 | documentationType: "reference",
440 | style: "verbose",
441 | });
442 |
443 | const data = JSON.parse(result.content[0].text);
444 | expect(data.success).toBe(true);
445 | });
446 | });
447 |
448 | describe("Code Examples", () => {
449 | test("should include code examples when requested", async () => {
450 | const sourceCode = `
451 | export function exampleFunction(x: number): number {
452 | return x * 2;
453 | }
454 | `.trim();
455 |
456 | const filePath = join(tempDir, "examples.ts");
457 | await fs.writeFile(filePath, sourceCode);
458 |
459 | const result = await handleGenerateContextualContent({
460 | filePath,
461 | documentationType: "tutorial",
462 | includeExamples: true,
463 | });
464 |
465 | const data = JSON.parse(result.content[0].text);
466 | const tutorial = data.data.sections[0];
467 |
468 | expect(tutorial.content).toContain("```");
469 | });
470 |
471 | test("should skip code examples when not requested", async () => {
472 | const sourceCode = `
473 | export function noExamples(): void {}
474 | `.trim();
475 |
476 | const filePath = join(tempDir, "no-examples.ts");
477 | await fs.writeFile(filePath, sourceCode);
478 |
479 | const result = await handleGenerateContextualContent({
480 | filePath,
481 | documentationType: "reference",
482 | includeExamples: false,
483 | });
484 |
485 | const data = JSON.parse(result.content[0].text);
486 | expect(data.success).toBe(true);
487 | });
488 | });
489 |
490 | describe("Metadata and Confidence", () => {
491 | test("should include metadata about generated content", async () => {
492 | const sourceCode = `
493 | export function metadataTest(): void {}
494 | export class MetadataClass {}
495 | `.trim();
496 |
497 | const filePath = join(tempDir, "metadata.ts");
498 | await fs.writeFile(filePath, sourceCode);
499 |
500 | const result = await handleGenerateContextualContent({
501 | filePath,
502 | documentationType: "reference",
503 | });
504 |
505 | const data = JSON.parse(result.content[0].text);
506 | expect(data.success).toBe(true);
507 | expect(data.data.metadata).toBeDefined();
508 | expect(data.data.metadata.codeAnalysis).toBeDefined();
509 | expect(data.data.metadata.confidence).toBeGreaterThanOrEqual(0);
510 | expect(data.data.metadata.confidence).toBeLessThanOrEqual(100);
511 | });
512 |
513 | test("should track code analysis metrics", async () => {
514 | const sourceCode = `
515 | export function func1(): void {}
516 | export function func2(): void {}
517 | export class Class1 {}
518 | export interface Interface1 {}
519 | `.trim();
520 |
521 | const filePath = join(tempDir, "metrics.ts");
522 | await fs.writeFile(filePath, sourceCode);
523 |
524 | const result = await handleGenerateContextualContent({
525 | filePath,
526 | documentationType: "reference",
527 | });
528 |
529 | const data = JSON.parse(result.content[0].text);
530 | const metrics = data.data.metadata.codeAnalysis;
531 |
532 | expect(metrics.functions).toBe(2);
533 | expect(metrics.classes).toBe(1);
534 | expect(metrics.interfaces).toBe(1);
535 | });
536 | });
537 |
538 | describe("Error Handling", () => {
539 | test("should handle invalid file path", async () => {
540 | const result = await handleGenerateContextualContent({
541 | filePath: "/nonexistent/file.ts",
542 | documentationType: "reference",
543 | });
544 |
545 | expect(result).toBeDefined();
546 | const data = JSON.parse(result.content[0].text);
547 |
548 | expect(data.success).toBe(false);
549 | expect(data.error).toBeDefined();
550 | });
551 |
552 | test("should handle unsupported file types", async () => {
553 | const filePath = join(tempDir, "unsupported.txt");
554 | await fs.writeFile(filePath, "Not a code file");
555 |
556 | const result = await handleGenerateContextualContent({
557 | filePath,
558 | documentationType: "reference",
559 | });
560 |
561 | expect(result).toBeDefined();
562 | const data = JSON.parse(result.content[0].text);
563 |
564 | // Should either fail or return empty results
565 | expect(data).toBeDefined();
566 | });
567 |
568 | test("should handle empty files", async () => {
569 | const filePath = join(tempDir, "empty.ts");
570 | await fs.writeFile(filePath, "");
571 |
572 | const result = await handleGenerateContextualContent({
573 | filePath,
574 | documentationType: "reference",
575 | });
576 |
577 | const data = JSON.parse(result.content[0].text);
578 | expect(data.success).toBe(true);
579 | expect(data.data.metadata.codeAnalysis.functions).toBe(0);
580 | });
581 | });
582 |
583 | describe("Recommendations and Next Steps", () => {
584 | test("should provide recommendations", async () => {
585 | const sourceCode = `export function test(): void {}`;
586 | const filePath = join(tempDir, "recs.ts");
587 | await fs.writeFile(filePath, sourceCode);
588 |
589 | const result = await handleGenerateContextualContent({
590 | filePath,
591 | documentationType: "all",
592 | });
593 |
594 | const data = JSON.parse(result.content[0].text);
595 | expect(data.success).toBe(true);
596 | expect(data.recommendations).toBeDefined();
597 | expect(Array.isArray(data.recommendations)).toBe(true);
598 | });
599 |
600 | test("should provide next steps", async () => {
601 | const sourceCode = `export function test(): void {}`;
602 | const filePath = join(tempDir, "steps.ts");
603 | await fs.writeFile(filePath, sourceCode);
604 |
605 | const result = await handleGenerateContextualContent({
606 | filePath,
607 | documentationType: "reference",
608 | });
609 |
610 | const data = JSON.parse(result.content[0].text);
611 | expect(data.success).toBe(true);
612 | expect(data.nextSteps).toBeDefined();
613 | expect(Array.isArray(data.nextSteps)).toBe(true);
614 | expect(data.nextSteps.length).toBeGreaterThan(0);
615 | });
616 | });
617 | });
618 |
```
--------------------------------------------------------------------------------
/src/utils/sitemap-generator.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Sitemap Generator Utility
3 | *
4 | * Generates and manages sitemap.xml files for documentation sites.
5 | * Follows the Sitemap 0.9 protocol: https://www.sitemaps.org/protocol.html
6 | */
7 |
8 | import { promises as fs } from "fs";
9 | import path from "path";
10 | import { execSync } from "child_process";
11 |
12 | /**
13 | * Sitemap URL entry with metadata
14 | */
15 | export interface SitemapUrl {
16 | loc: string; // URL of the page
17 | lastmod?: string; // Last modification date (ISO 8601)
18 | changefreq?:
19 | | "always"
20 | | "hourly"
21 | | "daily"
22 | | "weekly"
23 | | "monthly"
24 | | "yearly"
25 | | "never";
26 | priority?: number; // Priority 0.0 to 1.0
27 | title?: string; // Page title (for internal use, not in XML)
28 | category?: string; // Diataxis category (for internal use)
29 | }
30 |
31 | /**
32 | * Sitemap generation options
33 | */
34 | export interface SitemapOptions {
35 | baseUrl: string; // Base URL (e.g., https://user.github.io/repo)
36 | docsPath: string; // Documentation root directory
37 | includePatterns?: string[]; // File patterns to include
38 | excludePatterns?: string[]; // File patterns to exclude
39 | useGitHistory?: boolean; // Use git history for lastmod dates
40 | defaultChangeFreq?: SitemapUrl["changefreq"];
41 | defaultPriority?: number;
42 | }
43 |
44 | /**
45 | * Sitemap statistics
46 | */
47 | export interface SitemapStats {
48 | totalUrls: number;
49 | byCategory: Record<string, number>;
50 | byChangeFreq: Record<string, number>;
51 | lastGenerated: string;
52 | }
53 |
54 | /**
55 | * Default include patterns for common documentation formats
56 | */
57 | const DEFAULT_INCLUDE_PATTERNS = ["**/*.md", "**/*.html", "**/*.mdx"];
58 |
59 | /**
60 | * Default exclude patterns
61 | */
62 | const DEFAULT_EXCLUDE_PATTERNS = [
63 | "**/node_modules/**",
64 | "**/.git/**",
65 | "**/dist/**",
66 | "**/build/**",
67 | "**/.documcp/**",
68 | ];
69 |
70 | /**
71 | * Priority mapping for Diataxis categories
72 | */
73 | const DIATAXIS_PRIORITIES: Record<string, number> = {
74 | tutorial: 1.0, // Highest priority for learning
75 | "how-to": 0.9, // High priority for task guides
76 | reference: 0.8, // Important API documentation
77 | explanation: 0.7, // Conceptual documentation
78 | index: 0.9, // High priority for index pages
79 | home: 1.0, // Highest priority for home page
80 | default: 0.5, // Default for uncategorized
81 | };
82 |
83 | /**
84 | * Change frequency mapping based on documentation type
85 | */
86 | const DIATAXIS_CHANGE_FREQ: Record<string, SitemapUrl["changefreq"]> = {
87 | tutorial: "monthly",
88 | "how-to": "monthly",
89 | reference: "weekly", // API docs change more frequently
90 | explanation: "monthly",
91 | index: "weekly",
92 | home: "weekly",
93 | default: "monthly",
94 | };
95 |
96 | /**
97 | * Generate sitemap.xml from documentation files
98 | */
99 | export async function generateSitemap(options: SitemapOptions): Promise<{
100 | xml: string;
101 | urls: SitemapUrl[];
102 | stats: SitemapStats;
103 | }> {
104 | const {
105 | baseUrl,
106 | docsPath,
107 | includePatterns = DEFAULT_INCLUDE_PATTERNS,
108 | excludePatterns = DEFAULT_EXCLUDE_PATTERNS,
109 | useGitHistory = true,
110 | defaultChangeFreq = "monthly",
111 | defaultPriority = 0.5,
112 | } = options;
113 |
114 | // Discover documentation files
115 | const files = await discoverDocumentationFiles(
116 | docsPath,
117 | includePatterns,
118 | excludePatterns,
119 | );
120 |
121 | // Convert files to sitemap URLs
122 | const urls: SitemapUrl[] = [];
123 | for (const file of files) {
124 | const url = await createSitemapUrl(
125 | file,
126 | docsPath,
127 | baseUrl,
128 | useGitHistory,
129 | defaultChangeFreq,
130 | defaultPriority,
131 | );
132 | urls.push(url);
133 | }
134 |
135 | // Sort URLs by priority (descending) and then alphabetically
136 | urls.sort((a, b) => {
137 | const priorityDiff = (b.priority || 0) - (a.priority || 0);
138 | if (priorityDiff !== 0) return priorityDiff;
139 | return a.loc.localeCompare(b.loc);
140 | });
141 |
142 | // Generate XML
143 | const xml = generateSitemapXML(urls);
144 |
145 | // Calculate statistics
146 | const stats = calculateSitemapStats(urls);
147 |
148 | return { xml, urls, stats };
149 | }
150 |
151 | /**
152 | * Discover documentation files matching patterns
153 | */
154 | async function discoverDocumentationFiles(
155 | docsPath: string,
156 | includePatterns: string[],
157 | excludePatterns: string[],
158 | ): Promise<string[]> {
159 | const files: string[] = [];
160 |
161 | async function scanDirectory(dir: string): Promise<void> {
162 | try {
163 | const entries = await fs.readdir(dir, { withFileTypes: true });
164 |
165 | for (const entry of entries) {
166 | const fullPath = path.join(dir, entry.name);
167 | const relativePath = path.relative(docsPath, fullPath);
168 |
169 | // Check exclusion patterns (check both file and directory paths)
170 | if (shouldExclude(relativePath, excludePatterns)) {
171 | continue;
172 | }
173 |
174 | if (entry.isDirectory()) {
175 | // Check if directory path matches exclusion patterns
176 | const dirRelPath = relativePath + "/"; // Add trailing slash for directory matching
177 | if (shouldExclude(dirRelPath, excludePatterns)) {
178 | continue;
179 | }
180 | await scanDirectory(fullPath);
181 | } else if (entry.isFile()) {
182 | // Check inclusion patterns
183 | if (shouldInclude(relativePath, includePatterns)) {
184 | files.push(fullPath);
185 | }
186 | }
187 | }
188 | } catch (error) {
189 | // Directory might not exist or be accessible, skip it
190 | console.warn(`Could not scan directory ${dir}:`, error);
191 | }
192 | }
193 |
194 | await scanDirectory(docsPath);
195 | return files;
196 | }
197 |
198 | /**
199 | * Check if file should be included based on patterns
200 | */
201 | function shouldInclude(filePath: string, patterns: string[]): boolean {
202 | return patterns.some((pattern) => {
203 | const regex = patternToRegex(pattern);
204 | return regex.test(filePath);
205 | });
206 | }
207 |
208 | /**
209 | * Check if file should be excluded based on patterns
210 | */
211 | function shouldExclude(filePath: string, patterns: string[]): boolean {
212 | return patterns.some((pattern) => {
213 | const regex = patternToRegex(pattern);
214 | if (regex.test(filePath)) {
215 | return true;
216 | }
217 |
218 | // Special handling for directory patterns like "**/node_modules/**"
219 | // Check if any path segment matches the pattern
220 | if (pattern.includes("**/") && pattern.includes("/**")) {
221 | // Extract the directory name from pattern (e.g., "node_modules" from "**/node_modules/**")
222 | const match = pattern.match(/\*\*\/([^/*]+)\/\*\*/);
223 | if (match) {
224 | const dirName = match[1];
225 | const pathParts = filePath.split("/");
226 | // Check if this directory exists in the path
227 | if (pathParts.includes(dirName)) {
228 | return true;
229 | }
230 | }
231 | }
232 |
233 | return false;
234 | });
235 | }
236 |
237 | /**
238 | * Convert glob pattern to regex
239 | */
240 | function patternToRegex(pattern: string): RegExp {
241 | let escaped = pattern
242 | .replace(/\./g, "\\.")
243 | .replace(/\*\*/g, "@@DOUBLE_STAR@@")
244 | .replace(/\*/g, "[^/]*")
245 | .replace(/@@DOUBLE_STAR@@/g, ".*");
246 |
247 | // Handle leading **/ to match files in root or subdirectories
248 | // Pattern "**/*.md" should match both "file.md" and "dir/file.md"
249 | if (pattern.startsWith("**/")) {
250 | // Make the leading ".*/" optional by wrapping in (?:...)?
251 | escaped = escaped.replace(/^\.\*\//, "(?:.*/)?");
252 | }
253 |
254 | // For patterns like **/node_modules/**, match both exact and partial paths
255 | // This allows matching "node_modules" and "path/to/node_modules/file"
256 | const regexStr = `^${escaped}$`;
257 | return new RegExp(regexStr);
258 | }
259 |
260 | /**
261 | * Create sitemap URL entry from file
262 | */
263 | async function createSitemapUrl(
264 | filePath: string,
265 | docsPath: string,
266 | baseUrl: string,
267 | useGitHistory: boolean,
268 | defaultChangeFreq: SitemapUrl["changefreq"],
269 | defaultPriority: number,
270 | ): Promise<SitemapUrl> {
271 | const relativePath = path.relative(docsPath, filePath);
272 |
273 | // Convert file path to URL path
274 | let urlPath = relativePath
275 | .replace(/\\/g, "/") // Windows paths
276 | .replace(/\.md$/, ".html") // Markdown to HTML
277 | .replace(/\.mdx$/, ".html") // MDX to HTML
278 | .replace(/\/index\.html$/, "/") // index.html to directory
279 | .replace(/index\.html$/, ""); // Root index.html
280 |
281 | // Remove leading slash if present
282 | urlPath = urlPath.replace(/^\//, "");
283 |
284 | // Construct full URL
285 | const loc = `${baseUrl.replace(/\/$/, "")}/${urlPath}`;
286 |
287 | // Detect category from path
288 | const category = detectCategory(relativePath);
289 |
290 | // Get last modification date
291 | const lastmod = useGitHistory
292 | ? await getGitLastModified(filePath)
293 | : await getFileLastModified(filePath);
294 |
295 | // Determine priority based on category
296 | const priority = DIATAXIS_PRIORITIES[category] || defaultPriority;
297 |
298 | // Determine change frequency based on category
299 | const changefreq = DIATAXIS_CHANGE_FREQ[category] || defaultChangeFreq;
300 |
301 | // Extract title from file if possible
302 | const title = await extractTitle(filePath);
303 |
304 | return {
305 | loc,
306 | lastmod,
307 | changefreq,
308 | priority,
309 | title,
310 | category,
311 | };
312 | }
313 |
314 | /**
315 | * Detect Diataxis category from file path
316 | */
317 | function detectCategory(filePath: string): string {
318 | const lower = filePath.toLowerCase();
319 |
320 | // Check exact matches first for index/home pages
321 | if (lower === "readme.md" || lower === "index.md" || lower === "index.html")
322 | return "home";
323 |
324 | // Then check for category patterns
325 | if (lower.includes("tutorial")) return "tutorial";
326 | if (lower.includes("how-to") || lower.includes("howto")) return "how-to";
327 | if (lower.includes("reference") || lower.includes("api")) return "reference";
328 | if (lower.includes("explanation") || lower.includes("concept"))
329 | return "explanation";
330 | if (lower.includes("index")) return "index";
331 |
332 | return "default";
333 | }
334 |
335 | /**
336 | * Get last modified date from git history
337 | */
338 | async function getGitLastModified(
339 | filePath: string,
340 | ): Promise<string | undefined> {
341 | try {
342 | const timestamp = execSync(`git log -1 --format=%cI "${filePath}"`, {
343 | encoding: "utf-8",
344 | stdio: ["pipe", "pipe", "ignore"],
345 | }).trim();
346 |
347 | if (timestamp) {
348 | // Format as YYYY-MM-DD (sitemap.xml standard)
349 | return timestamp.split("T")[0];
350 | }
351 | } catch (error) {
352 | // Git command failed, fall back to file system
353 | }
354 |
355 | return getFileLastModified(filePath);
356 | }
357 |
358 | /**
359 | * Get last modified date from file system
360 | */
361 | async function getFileLastModified(filePath: string): Promise<string> {
362 | try {
363 | const stats = await fs.stat(filePath);
364 | // Format as YYYY-MM-DD
365 | return stats.mtime.toISOString().split("T")[0];
366 | } catch (error) {
367 | // If file doesn't exist, use current date
368 | return new Date().toISOString().split("T")[0];
369 | }
370 | }
371 |
372 | /**
373 | * Extract title from markdown or HTML file
374 | */
375 | async function extractTitle(filePath: string): Promise<string | undefined> {
376 | try {
377 | const content = await fs.readFile(filePath, "utf-8");
378 |
379 | // Try to extract from frontmatter first (highest priority)
380 | const frontmatterMatch = content.match(/^---\s*\ntitle:\s*(.+?)\n/m);
381 | if (frontmatterMatch) {
382 | return frontmatterMatch[1].trim().replace(/['"]/g, "");
383 | }
384 |
385 | // Try to extract from HTML title tag
386 | const htmlMatch = content.match(/<title>(.+?)<\/title>/i);
387 | if (htmlMatch) {
388 | return htmlMatch[1].trim();
389 | }
390 |
391 | // Try to extract from markdown heading (fallback)
392 | const mdMatch = content.match(/^#\s+(.+)$/m);
393 | if (mdMatch) {
394 | return mdMatch[1].trim();
395 | }
396 | } catch (error) {
397 | // Could not read file
398 | }
399 |
400 | return undefined;
401 | }
402 |
403 | /**
404 | * Generate sitemap XML from URLs
405 | */
406 | function generateSitemapXML(urls: SitemapUrl[]): string {
407 | const urlElements = urls
408 | .map((url) => {
409 | const parts = [" <url>", ` <loc>${escapeXml(url.loc)}</loc>`];
410 |
411 | if (url.lastmod) {
412 | parts.push(` <lastmod>${url.lastmod}</lastmod>`);
413 | }
414 |
415 | if (url.changefreq) {
416 | parts.push(` <changefreq>${url.changefreq}</changefreq>`);
417 | }
418 |
419 | if (url.priority !== undefined) {
420 | parts.push(` <priority>${url.priority.toFixed(1)}</priority>`);
421 | }
422 |
423 | parts.push(" </url>");
424 | return parts.join("\n");
425 | })
426 | .join("\n");
427 |
428 | return `<?xml version="1.0" encoding="UTF-8"?>
429 | <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
430 | ${urlElements}
431 | </urlset>`;
432 | }
433 |
434 | /**
435 | * Escape XML special characters
436 | */
437 | function escapeXml(str: string): string {
438 | return str
439 | .replace(/&/g, "&")
440 | .replace(/</g, "<")
441 | .replace(/>/g, ">")
442 | .replace(/"/g, """)
443 | .replace(/'/g, "'");
444 | }
445 |
446 | /**
447 | * Calculate sitemap statistics
448 | */
449 | function calculateSitemapStats(urls: SitemapUrl[]): SitemapStats {
450 | const byCategory: Record<string, number> = {};
451 | const byChangeFreq: Record<string, number> = {};
452 |
453 | for (const url of urls) {
454 | // Count by category
455 | const category = url.category || "default";
456 | byCategory[category] = (byCategory[category] || 0) + 1;
457 |
458 | // Count by change frequency
459 | const changefreq = url.changefreq || "monthly";
460 | byChangeFreq[changefreq] = (byChangeFreq[changefreq] || 0) + 1;
461 | }
462 |
463 | return {
464 | totalUrls: urls.length,
465 | byCategory,
466 | byChangeFreq,
467 | lastGenerated: new Date().toISOString(),
468 | };
469 | }
470 |
471 | /**
472 | * Parse existing sitemap.xml file
473 | */
474 | export async function parseSitemap(sitemapPath: string): Promise<SitemapUrl[]> {
475 | try {
476 | const xml = await fs.readFile(sitemapPath, "utf-8");
477 | const urls: SitemapUrl[] = [];
478 |
479 | // Simple XML parsing (no external dependencies)
480 | const urlMatches = xml.matchAll(/<url>([\s\S]*?)<\/url>/g);
481 |
482 | for (const match of urlMatches) {
483 | const urlBlock = match[1];
484 |
485 | const loc = urlBlock.match(/<loc>(.*?)<\/loc>/)?.[1];
486 | const lastmod = urlBlock.match(/<lastmod>(.*?)<\/lastmod>/)?.[1];
487 | const changefreq = urlBlock.match(
488 | /<changefreq>(.*?)<\/changefreq>/,
489 | )?.[1] as SitemapUrl["changefreq"];
490 | const priority = parseFloat(
491 | urlBlock.match(/<priority>(.*?)<\/priority>/)?.[1] || "0.5",
492 | );
493 |
494 | // Include all URLs, even those missing <loc>, for validation
495 | urls.push({
496 | loc: loc ? unescapeXml(loc) : "",
497 | lastmod,
498 | changefreq,
499 | priority,
500 | });
501 | }
502 |
503 | return urls;
504 | } catch (error) {
505 | throw new Error(`Failed to parse sitemap: ${error}`);
506 | }
507 | }
508 |
509 | /**
510 | * Unescape XML special characters
511 | */
512 | function unescapeXml(str: string): string {
513 | return str
514 | .replace(/'/g, "'")
515 | .replace(/"/g, '"')
516 | .replace(/>/g, ">")
517 | .replace(/</g, "<")
518 | .replace(/&/g, "&");
519 | }
520 |
521 | /**
522 | * Validate sitemap.xml structure
523 | */
524 | export async function validateSitemap(sitemapPath: string): Promise<{
525 | valid: boolean;
526 | errors: string[];
527 | warnings: string[];
528 | urlCount: number;
529 | }> {
530 | const errors: string[] = [];
531 | const warnings: string[] = [];
532 |
533 | try {
534 | // Check if file exists
535 | try {
536 | await fs.access(sitemapPath);
537 | } catch {
538 | errors.push("Sitemap file does not exist");
539 | return { valid: false, errors, warnings, urlCount: 0 };
540 | }
541 |
542 | // Parse sitemap
543 | const urls = await parseSitemap(sitemapPath);
544 |
545 | // Validate URL count
546 | if (urls.length === 0) {
547 | warnings.push("Sitemap contains no URLs");
548 | }
549 |
550 | if (urls.length > 50000) {
551 | errors.push("Sitemap contains more than 50,000 URLs (protocol limit)");
552 | }
553 |
554 | // Validate each URL
555 | for (let i = 0; i < urls.length; i++) {
556 | const url = urls[i];
557 |
558 | // Validate loc
559 | if (!url.loc) {
560 | errors.push(`URL #${i + 1}: Missing <loc> element`);
561 | continue;
562 | }
563 |
564 | if (!url.loc.startsWith("http://") && !url.loc.startsWith("https://")) {
565 | errors.push(
566 | `URL #${i + 1}: Invalid protocol (must be http:// or https://)`,
567 | );
568 | }
569 |
570 | if (url.loc.length > 2048) {
571 | errors.push(`URL #${i + 1}: URL exceeds 2048 characters`);
572 | }
573 |
574 | // Validate priority
575 | if (
576 | url.priority !== undefined &&
577 | (url.priority < 0 || url.priority > 1)
578 | ) {
579 | errors.push(`URL #${i + 1}: Priority must be between 0.0 and 1.0`);
580 | }
581 |
582 | // Validate lastmod format
583 | if (url.lastmod && !isValidDateFormat(url.lastmod)) {
584 | warnings.push(
585 | `URL #${i + 1}: Invalid lastmod format (should be ISO 8601)`,
586 | );
587 | }
588 | }
589 |
590 | return {
591 | valid: errors.length === 0,
592 | errors,
593 | warnings,
594 | urlCount: urls.length,
595 | };
596 | } catch (error) {
597 | errors.push(`Failed to validate sitemap: ${error}`);
598 | return { valid: false, errors, warnings, urlCount: 0 };
599 | }
600 | }
601 |
602 | /**
603 | * Check if date string is valid ISO 8601 format
604 | */
605 | function isValidDateFormat(dateStr: string): boolean {
606 | // Accept YYYY-MM-DD or full ISO 8601
607 | const regex = /^\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}:\d{2}([+-]\d{2}:\d{2}|Z)?)?$/;
608 | return regex.test(dateStr);
609 | }
610 |
611 | /**
612 | * Update existing sitemap with new URLs
613 | */
614 | export async function updateSitemap(
615 | sitemapPath: string,
616 | options: SitemapOptions,
617 | ): Promise<{
618 | added: number;
619 | removed: number;
620 | updated: number;
621 | total: number;
622 | }> {
623 | // Generate new sitemap
624 | const { urls: newUrls } = await generateSitemap(options);
625 |
626 | // Parse existing sitemap if it exists
627 | let existingUrls: SitemapUrl[] = [];
628 | try {
629 | existingUrls = await parseSitemap(sitemapPath);
630 | } catch {
631 | // Sitemap doesn't exist or is invalid, create new one
632 | }
633 |
634 | // Create URL maps for comparison
635 | const existingMap = new Map(existingUrls.map((url) => [url.loc, url]));
636 | const newMap = new Map(newUrls.map((url) => [url.loc, url]));
637 |
638 | // Calculate differences
639 | const added = newUrls.filter((url) => !existingMap.has(url.loc)).length;
640 | const removed = existingUrls.filter((url) => !newMap.has(url.loc)).length;
641 | const updated = newUrls.filter((url) => {
642 | const existing = existingMap.get(url.loc);
643 | return existing && existing.lastmod !== url.lastmod;
644 | }).length;
645 |
646 | // Write updated sitemap
647 | const xml = generateSitemapXML(newUrls);
648 | await fs.writeFile(sitemapPath, xml, "utf-8");
649 |
650 | return {
651 | added,
652 | removed,
653 | updated,
654 | total: newUrls.length,
655 | };
656 | }
657 |
658 | /**
659 | * Get all URLs from sitemap
660 | */
661 | export async function listSitemapUrls(
662 | sitemapPath: string,
663 | ): Promise<SitemapUrl[]> {
664 | return parseSitemap(sitemapPath);
665 | }
666 |
```
--------------------------------------------------------------------------------
/src/memory/kg-code-integration.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Knowledge Graph Code Integration Module
3 | * Implements Phase 1.2: Documentation Context in Knowledge Graph
4 | *
5 | * Populates the knowledge graph with code file entities, documentation section entities,
6 | * and relationships between code and documentation for drift detection and coverage analysis.
7 | */
8 |
9 | import { promises as fs } from "fs";
10 | import path from "path";
11 | import crypto from "crypto";
12 | import { GraphNode, GraphEdge } from "./knowledge-graph.js";
13 | import { ExtractedContent } from "../utils/content-extractor.js";
14 | import { getKnowledgeGraph } from "./kg-integration.js";
15 | import { validateAndStoreDocumentationLinks } from "./kg-link-validator.js";
16 | import { ASTAnalyzer } from "../utils/ast-analyzer.js";
17 |
18 | /**
19 | * Create code file entities from repository source code
20 | */
21 | export async function createCodeFileEntities(
22 | projectId: string,
23 | repoPath: string,
24 | ): Promise<GraphNode[]> {
25 | const kg = await getKnowledgeGraph();
26 | const codeFiles: GraphNode[] = [];
27 |
28 | // Directories to scan for code
29 | const sourceDirs = ["src", "lib", "app", "packages"];
30 |
31 | for (const dir of sourceDirs) {
32 | const dirPath = path.join(repoPath, dir);
33 | try {
34 | await fs.access(dirPath);
35 | const files = await walkSourceFiles(dirPath, repoPath);
36 |
37 | for (const filePath of files) {
38 | try {
39 | const codeFileNode = await createCodeFileEntity(
40 | projectId,
41 | filePath,
42 | repoPath,
43 | );
44 | if (codeFileNode) {
45 | kg.addNode(codeFileNode);
46 | codeFiles.push(codeFileNode);
47 |
48 | // Create relationship: project -> code_file
49 | kg.addEdge({
50 | source: projectId,
51 | target: codeFileNode.id,
52 | type: "depends_on",
53 | weight: 1.0,
54 | confidence: 1.0,
55 | properties: {
56 | dependencyType: "contains",
57 | },
58 | });
59 | }
60 | } catch (error) {
61 | console.warn(`Failed to process file ${filePath}:`, error);
62 | }
63 | }
64 | } catch {
65 | // Directory doesn't exist, skip
66 | }
67 | }
68 |
69 | return codeFiles;
70 | }
71 |
72 | /**
73 | * Create a single code file entity
74 | */
75 | async function createCodeFileEntity(
76 | projectId: string,
77 | filePath: string,
78 | repoPath: string,
79 | ): Promise<GraphNode | null> {
80 | const content = await fs.readFile(filePath, "utf-8");
81 | const stats = await fs.stat(filePath);
82 | const relativePath = path.relative(repoPath, filePath);
83 | const ext = path.extname(filePath);
84 | const language = getLanguageFromExtension(ext);
85 |
86 | if (!language) return null;
87 |
88 | // Calculate content hash for change detection
89 | const contentHash = crypto.createHash("sha256").update(content).digest("hex");
90 |
91 | // Extract functions and classes using AST parsing
92 | const { functions, classes, imports, exports } = await extractCodeStructure(
93 | filePath,
94 | content,
95 | language,
96 | );
97 |
98 | // Estimate complexity
99 | const linesOfCode = content.split("\n").length;
100 | const complexity = estimateComplexity(linesOfCode, functions.length);
101 |
102 | const nodeId = `code_file:${projectId}:${relativePath.replace(
103 | /[/\\]/g,
104 | ":",
105 | )}`;
106 |
107 | return {
108 | id: nodeId,
109 | type: "code_file",
110 | label: path.basename(filePath),
111 | properties: {
112 | path: relativePath,
113 | language,
114 | functions,
115 | classes,
116 | dependencies: imports, // Now extracted via AST
117 | imports,
118 | exports,
119 | lastModified: stats.mtime.toISOString(),
120 | linesOfCode,
121 | contentHash,
122 | complexity,
123 | },
124 | weight: 1.0,
125 | lastUpdated: new Date().toISOString(),
126 | };
127 | }
128 |
129 | /**
130 | * Create documentation section entities from extracted content
131 | */
132 | export async function createDocumentationEntities(
133 | projectId: string,
134 | extractedContent: ExtractedContent,
135 | ): Promise<GraphNode[]> {
136 | const kg = await getKnowledgeGraph();
137 | const docSections: GraphNode[] = [];
138 |
139 | // Process README sections
140 | if (extractedContent.readme) {
141 | for (const section of extractedContent.readme.sections) {
142 | const docNode = createDocSectionEntity(
143 | projectId,
144 | "README.md",
145 | section.title,
146 | extractedContent.readme.content.substring(0, 1000), // First 1000 chars
147 | "reference",
148 | );
149 | kg.addNode(docNode);
150 | docSections.push(docNode);
151 |
152 | // Create relationship: project -> documentation_section
153 | kg.addEdge({
154 | source: projectId,
155 | target: docNode.id,
156 | type: "depends_on",
157 | weight: 1.0,
158 | confidence: 1.0,
159 | properties: {
160 | dependencyType: "contains",
161 | },
162 | });
163 | }
164 | }
165 |
166 | // Process existing docs
167 | for (const doc of extractedContent.existingDocs) {
168 | const docNode = createDocSectionEntity(
169 | projectId,
170 | doc.path,
171 | doc.title,
172 | doc.content,
173 | doc.category,
174 | );
175 | kg.addNode(docNode);
176 | docSections.push(docNode);
177 |
178 | // Create relationship: project -> documentation_section
179 | kg.addEdge({
180 | source: projectId,
181 | target: docNode.id,
182 | type: "depends_on",
183 | weight: 1.0,
184 | confidence: 1.0,
185 | properties: {
186 | dependencyType: "contains",
187 | },
188 | });
189 |
190 | // Validate external links in documentation (async, non-blocking)
191 | validateAndStoreDocumentationLinks(docNode.id, doc.content).catch((error) =>
192 | console.warn(`Failed to validate links in ${doc.path}:`, error.message),
193 | );
194 | }
195 |
196 | // Process ADRs
197 | for (const adr of extractedContent.adrs) {
198 | const docNode = createDocSectionEntity(
199 | projectId,
200 | `docs/adrs/${adr.number}-${adr.title}.md`,
201 | adr.title,
202 | adr.content,
203 | "explanation",
204 | );
205 | kg.addNode(docNode);
206 | docSections.push(docNode);
207 |
208 | kg.addEdge({
209 | source: projectId,
210 | target: docNode.id,
211 | type: "depends_on",
212 | weight: 1.0,
213 | confidence: 1.0,
214 | properties: {
215 | dependencyType: "contains",
216 | },
217 | });
218 | }
219 |
220 | return docSections;
221 | }
222 |
223 | /**
224 | * Create a single documentation section entity
225 | */
226 | function createDocSectionEntity(
227 | projectId: string,
228 | filePath: string,
229 | sectionTitle: string,
230 | content: string,
231 | category?: "tutorial" | "how-to" | "reference" | "explanation",
232 | ): GraphNode {
233 | const contentHash = crypto.createHash("sha256").update(content).digest("hex");
234 | const wordCount = content.split(/\s+/).length;
235 | const hasCodeExamples = /```/.test(content);
236 |
237 | // Extract referenced code files/functions from content
238 | const referencedCodeFiles = extractCodeReferences(content);
239 | const referencedFunctions = extractFunctionReferences(content);
240 | const referencedClasses = extractClassReferences(content);
241 |
242 | const nodeId = `documentation_section:${projectId}:${filePath.replace(
243 | /[/\\]/g,
244 | ":",
245 | )}:${sectionTitle.replace(/\s+/g, "_")}`;
246 |
247 | return {
248 | id: nodeId,
249 | type: "documentation_section",
250 | label: sectionTitle,
251 | properties: {
252 | filePath,
253 | sectionTitle,
254 | contentHash,
255 | referencedCodeFiles,
256 | referencedFunctions,
257 | referencedClasses,
258 | lastUpdated: new Date().toISOString(),
259 | category,
260 | effectivenessScore: hasCodeExamples ? 0.8 : 0.5,
261 | wordCount,
262 | hasCodeExamples,
263 | },
264 | weight: 1.0,
265 | lastUpdated: new Date().toISOString(),
266 | };
267 | }
268 |
269 | /**
270 | * Link code files to documentation sections
271 | */
272 | export async function linkCodeToDocs(
273 | codeFiles: GraphNode[],
274 | docSections: GraphNode[],
275 | ): Promise<GraphEdge[]> {
276 | const kg = await getKnowledgeGraph();
277 | const edges: GraphEdge[] = [];
278 |
279 | for (const docSection of docSections) {
280 | const { referencedCodeFiles, referencedFunctions, referencedClasses } =
281 | docSection.properties;
282 |
283 | // Create "references" edges: documentation_section -> code_file
284 | for (const codeFile of codeFiles) {
285 | const codeFilePath = codeFile.properties.path;
286 |
287 | // Check if doc references this code file
288 | if (
289 | referencedCodeFiles.includes(codeFilePath) ||
290 | referencedFunctions.some((fn: string) =>
291 | codeFile.properties.functions.includes(fn),
292 | ) ||
293 | referencedClasses.some((cls: string) =>
294 | codeFile.properties.classes.includes(cls),
295 | )
296 | ) {
297 | const edge = kg.addEdge({
298 | source: docSection.id,
299 | target: codeFile.id,
300 | type: "references",
301 | weight: 1.0,
302 | confidence: 0.8,
303 | properties: {
304 | referenceType: determineReferenceType(
305 | docSection.properties.category,
306 | ),
307 | isAccurate: true, // Assume accurate until drift detected
308 | lastVerified: new Date().toISOString(),
309 | },
310 | });
311 | edges.push(edge);
312 |
313 | // Create reverse "documents" edge: code_file -> documentation_section
314 | const documentsEdge = kg.addEdge({
315 | source: codeFile.id,
316 | target: docSection.id,
317 | type: "documents",
318 | weight: 1.0,
319 | confidence: 0.8,
320 | properties: {
321 | coverage: determineCoverage(
322 | referencedFunctions.length,
323 | codeFile.properties.functions.length,
324 | ),
325 | lastVerified: new Date().toISOString(),
326 | quality: "medium",
327 | },
328 | });
329 | edges.push(documentsEdge);
330 | }
331 | }
332 | }
333 |
334 | // Detect outdated documentation
335 | for (const docSection of docSections) {
336 | for (const edge of edges) {
337 | if (edge.source === docSection.id && edge.type === "references") {
338 | const codeFile = codeFiles.find((cf) => cf.id === edge.target);
339 | if (codeFile) {
340 | // Check if code has changed since doc was last updated
341 | const docUpdated = new Date(docSection.properties.lastUpdated);
342 | const codeUpdated = new Date(codeFile.properties.lastModified);
343 |
344 | if (codeUpdated > docUpdated) {
345 | // Simple heuristic for change type - could be enhanced with drift detector
346 | const changeType = "modification"; // AST-based diff available via DriftDetector
347 |
348 | const outdatedEdge = kg.addEdge({
349 | source: docSection.id,
350 | target: codeFile.id,
351 | type: "outdated_for",
352 | weight: 0.5,
353 | confidence: 0.9,
354 | properties: {
355 | detectedAt: new Date().toISOString(),
356 | changeType, // Enhanced from "unknown" - can integrate DriftDetector for precise diff
357 | severity: "medium",
358 | autoFixable: false,
359 | },
360 | });
361 | edges.push(outdatedEdge);
362 | }
363 | }
364 | }
365 | }
366 | }
367 |
368 | return edges;
369 | }
370 |
371 | // ============================================================================
372 | // Helper Functions
373 | // ============================================================================
374 |
375 | async function walkSourceFiles(
376 | dir: string,
377 | baseDir: string,
378 | files: string[] = [],
379 | ): Promise<string[]> {
380 | try {
381 | const entries = await fs.readdir(dir, { withFileTypes: true });
382 |
383 | for (const entry of entries) {
384 | const fullPath = path.join(dir, entry.name);
385 |
386 | if (
387 | entry.isDirectory() &&
388 | !entry.name.startsWith(".") &&
389 | entry.name !== "node_modules" &&
390 | entry.name !== "dist" &&
391 | entry.name !== "build"
392 | ) {
393 | await walkSourceFiles(fullPath, baseDir, files);
394 | } else if (entry.isFile()) {
395 | const ext = path.extname(entry.name);
396 | if (
397 | [
398 | ".js",
399 | ".ts",
400 | ".jsx",
401 | ".tsx",
402 | ".py",
403 | ".rb",
404 | ".go",
405 | ".java",
406 | ".rs",
407 | ".c",
408 | ".cpp",
409 | ".cs",
410 | ].includes(ext)
411 | ) {
412 | files.push(fullPath);
413 | }
414 | }
415 | }
416 | } catch {
417 | // Directory doesn't exist or can't be read
418 | }
419 |
420 | return files;
421 | }
422 |
423 | function getLanguageFromExtension(ext: string): string | null {
424 | const languageMap: Record<string, string> = {
425 | ".js": "javascript",
426 | ".jsx": "javascript",
427 | ".ts": "typescript",
428 | ".tsx": "typescript",
429 | ".py": "python",
430 | ".rb": "ruby",
431 | ".go": "go",
432 | ".java": "java",
433 | ".rs": "rust",
434 | ".c": "c",
435 | ".cpp": "cpp",
436 | ".cs": "csharp",
437 | ".php": "php",
438 | ".swift": "swift",
439 | ".kt": "kotlin",
440 | ".scala": "scala",
441 | };
442 |
443 | return languageMap[ext] || null;
444 | }
445 |
446 | /**
447 | * Extract code structure using AST parsing (replaces regex-based extraction)
448 | * Addresses TODO: Use proper AST parsing instead of basic regex
449 | */
450 | async function extractCodeStructure(
451 | filePath: string,
452 | content: string,
453 | language: string,
454 | ): Promise<{
455 | functions: string[];
456 | classes: string[];
457 | imports: string[];
458 | exports: string[];
459 | }> {
460 | const functions: string[] = [];
461 | const classes: string[] = [];
462 | const imports: string[] = [];
463 | const exports: string[] = [];
464 |
465 | // Use AST analyzer for TypeScript/JavaScript files
466 | if (language === "typescript" || language === "javascript") {
467 | try {
468 | const analyzer = new ASTAnalyzer();
469 | await analyzer.initialize();
470 |
471 | const astResult = await analyzer.analyzeFile(filePath);
472 |
473 | if (astResult) {
474 | // Extract function names
475 | functions.push(...astResult.functions.map((f) => f.name));
476 |
477 | // Extract class names
478 | classes.push(...astResult.classes.map((c) => c.name));
479 |
480 | // Note: AST analyzer doesn't currently track dependencies per function/class
481 | // We'll extract imports from the code using regex as fallback
482 | const importMatches = content.matchAll(
483 | /import\s+.*?\s+from\s+['"]([^'"]+)['"]/g,
484 | );
485 | for (const match of importMatches) {
486 | imports.push(match[1]);
487 | }
488 |
489 | // Extract exports (check isExported flag)
490 | const exportedFunctions = astResult.functions
491 | .filter((f) => f.isExported)
492 | .map((f) => f.name);
493 | const exportedClasses = astResult.classes
494 | .filter((c) => c.isExported)
495 | .map((c) => c.name);
496 | exports.push(...exportedFunctions, ...exportedClasses);
497 |
498 | return { functions, classes, imports, exports };
499 | }
500 | } catch (error) {
501 | console.warn(
502 | `AST parsing failed for ${filePath}, falling back to regex:`,
503 | error,
504 | );
505 | // Fall through to regex-based extraction
506 | }
507 | }
508 |
509 | // Fallback: regex-based extraction for non-TS/JS or if AST fails
510 | if (language === "typescript" || language === "javascript") {
511 | // Extract function declarations
512 | const functionMatches = content.matchAll(
513 | /(?:export\s+)?(?:async\s+)?function\s+(\w+)/g,
514 | );
515 | for (const match of functionMatches) {
516 | functions.push(match[1]);
517 | }
518 |
519 | // Extract arrow functions assigned to const/let
520 | const arrowFunctionMatches = content.matchAll(
521 | /(?:export\s+)?const\s+(\w+)\s*=\s*(?:async\s*)?\([^)]*\)\s*=>/g,
522 | );
523 | for (const match of arrowFunctionMatches) {
524 | functions.push(match[1]);
525 | }
526 |
527 | // Extract class declarations
528 | const classMatches = content.matchAll(/(?:export\s+)?class\s+(\w+)/g);
529 | for (const match of classMatches) {
530 | classes.push(match[1]);
531 | }
532 |
533 | // Extract imports
534 | const importMatches = content.matchAll(
535 | /import\s+.*?\s+from\s+['"]([^'"]+)['"]/g,
536 | );
537 | for (const match of importMatches) {
538 | imports.push(match[1]);
539 | }
540 |
541 | // Extract exports
542 | const exportMatches = content.matchAll(
543 | /export\s+(?:function|class|const|let|var)\s+(\w+)/g,
544 | );
545 | for (const match of exportMatches) {
546 | exports.push(match[1]);
547 | }
548 | } else if (language === "python") {
549 | const functionMatches = content.matchAll(/def\s+(\w+)/g);
550 | for (const match of functionMatches) {
551 | functions.push(match[1]);
552 | }
553 |
554 | const classMatches = content.matchAll(/class\s+(\w+)/g);
555 | for (const match of classMatches) {
556 | classes.push(match[1]);
557 | }
558 |
559 | // Extract Python imports
560 | const importMatches = content.matchAll(
561 | /(?:from\s+(\S+)\s+)?import\s+([^\n]+)/g,
562 | );
563 | for (const match of importMatches) {
564 | imports.push(match[1] || match[2].trim());
565 | }
566 | }
567 |
568 | return { functions, classes, imports, exports };
569 | }
570 |
571 | function estimateComplexity(
572 | linesOfCode: number,
573 | functionCount: number,
574 | ): "low" | "medium" | "high" {
575 | const score = linesOfCode + functionCount * 10;
576 |
577 | if (score < 100) return "low";
578 | if (score < 300) return "medium";
579 | return "high";
580 | }
581 |
582 | function extractCodeReferences(content: string): string[] {
583 | const references: string[] = [];
584 |
585 | // Extract file paths from markdown links and code blocks
586 | const filePathMatches = content.matchAll(/`([^`]+\.(ts|js|py|rb|go|java))`/g);
587 | for (const match of filePathMatches) {
588 | references.push(match[1]);
589 | }
590 |
591 | return references;
592 | }
593 |
594 | function extractFunctionReferences(content: string): string[] {
595 | const functions: string[] = [];
596 |
597 | // Extract function names from code blocks and inline code
598 | const functionMatches = content.matchAll(/`(\w+)\(\)`/g);
599 | for (const match of functionMatches) {
600 | functions.push(match[1]);
601 | }
602 |
603 | return functions;
604 | }
605 |
606 | function extractClassReferences(content: string): string[] {
607 | const classes: string[] = [];
608 |
609 | // Extract class names from code blocks (usually PascalCase)
610 | const classMatches = content.matchAll(/`([A-Z][a-zA-Z0-9]+)`/g);
611 | for (const match of classMatches) {
612 | if (!/\(\)$/.test(match[1])) {
613 | // Not a function call
614 | classes.push(match[1]);
615 | }
616 | }
617 |
618 | return classes;
619 | }
620 |
621 | function determineReferenceType(
622 | category?: "tutorial" | "how-to" | "reference" | "explanation",
623 | ): "example" | "api-reference" | "tutorial" | "explanation" {
624 | switch (category) {
625 | case "tutorial":
626 | return "tutorial";
627 | case "reference":
628 | return "api-reference";
629 | case "how-to":
630 | return "example";
631 | case "explanation":
632 | return "explanation";
633 | default:
634 | return "api-reference";
635 | }
636 | }
637 |
638 | function determineCoverage(
639 | referencedCount: number,
640 | totalCount: number,
641 | ): "partial" | "complete" | "comprehensive" {
642 | if (totalCount === 0) return "partial";
643 |
644 | const ratio = referencedCount / totalCount;
645 |
646 | if (ratio >= 0.8) return "comprehensive";
647 | if (ratio >= 0.5) return "complete";
648 | return "partial";
649 | }
650 |
```
--------------------------------------------------------------------------------
/src/tools/generate-technical-writer-prompts.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from "zod";
2 | import { MCPContentWrapper, NextStep } from "../types/api.js";
3 | import { promises as fs } from "fs";
4 | import { join } from "path";
5 |
6 | // Input validation schema
7 | const GeneratePromptsInputSchema = z.object({
8 | project_path: z.string().min(1, "Project path is required"),
9 | context_sources: z
10 | .array(
11 | z.enum([
12 | "repository_analysis",
13 | "readme_health",
14 | "documentation_gaps",
15 | "best_practices",
16 | "content_validation",
17 | "deployment_context",
18 | ]),
19 | )
20 | .optional()
21 | .default(["repository_analysis", "readme_health"]),
22 | audience: z
23 | .enum(["developer", "end_user", "contributor", "enterprise", "mixed"])
24 | .optional()
25 | .default("mixed"),
26 | prompt_types: z
27 | .array(
28 | z.enum([
29 | "content_generation",
30 | "style_improvement",
31 | "structure_guidance",
32 | "gap_filling",
33 | "audience_adaptation",
34 | "deployment_optimization",
35 | ]),
36 | )
37 | .optional()
38 | .default(["content_generation", "gap_filling"]),
39 | integration_level: z
40 | .enum(["basic", "comprehensive", "advanced"])
41 | .optional()
42 | .default("comprehensive"),
43 | });
44 |
45 | type GeneratePromptsInput = z.infer<typeof GeneratePromptsInputSchema>;
46 |
47 | // Context interfaces for cross-tool integration
48 | interface ProjectContext {
49 | projectType: string;
50 | languages: string[];
51 | frameworks: string[];
52 | packageManager?: string;
53 | hasTests: boolean;
54 | hasCI: boolean;
55 | deploymentTarget?: string;
56 | }
57 |
58 | interface DocumentationContext {
59 | readmeExists: boolean;
60 | readmeHealth?: number;
61 | documentationGaps: string[];
62 | bestPracticesScore?: number;
63 | contentIssues: string[];
64 | linkIssues: string[];
65 | }
66 |
67 | interface TechnicalWriterPrompt {
68 | id: string;
69 | title: string;
70 | category: string;
71 | audience: string;
72 | priority: "high" | "medium" | "low";
73 | prompt: string;
74 | context: string;
75 | expectedOutput: string;
76 | integrationHints: string[];
77 | relatedTools: string[];
78 | }
79 |
80 | interface PromptGenerationResult {
81 | prompts: TechnicalWriterPrompt[];
82 | contextSummary: {
83 | projectContext: ProjectContext;
84 | documentationContext: DocumentationContext;
85 | integrationLevel: string;
86 | };
87 | recommendations: string[];
88 | nextSteps: NextStep[];
89 | metadata: {
90 | totalPrompts: number;
91 | promptsByCategory: Record<string, number>;
92 | confidenceScore: number;
93 | generatedAt: string;
94 | };
95 | }
96 |
97 | /**
98 | * Generate intelligent technical writer prompts based on comprehensive project analysis
99 | */
100 | export async function generateTechnicalWriterPrompts(
101 | input: Partial<GeneratePromptsInput>,
102 | ): Promise<
103 | MCPContentWrapper & {
104 | generation: PromptGenerationResult;
105 | nextSteps: NextStep[];
106 | }
107 | > {
108 | try {
109 | // Validate input
110 | const validatedInput = GeneratePromptsInputSchema.parse(input);
111 | const {
112 | project_path,
113 | context_sources,
114 | audience,
115 | prompt_types,
116 | integration_level,
117 | } = validatedInput;
118 |
119 | // Build comprehensive context by integrating multiple tool outputs
120 | const projectContext = await buildProjectContext(project_path);
121 | const documentationContext = await buildDocumentationContext(
122 | project_path,
123 | context_sources,
124 | );
125 |
126 | // Generate contextual prompts based on integrated analysis
127 | const prompts = await generateContextualPrompts(
128 | projectContext,
129 | documentationContext,
130 | audience,
131 | prompt_types,
132 | integration_level,
133 | );
134 |
135 | // Create recommendations based on cross-tool insights
136 | const recommendations = generateIntegrationRecommendations(
137 | projectContext,
138 | documentationContext,
139 | prompts,
140 | );
141 |
142 | const nextSteps = generateNextSteps(prompts, integration_level);
143 |
144 | const result: PromptGenerationResult = {
145 | prompts,
146 | contextSummary: {
147 | projectContext,
148 | documentationContext,
149 | integrationLevel: integration_level,
150 | },
151 | recommendations,
152 | nextSteps,
153 | metadata: {
154 | totalPrompts: prompts.length,
155 | promptsByCategory: categorizePrompts(prompts),
156 | confidenceScore: calculateConfidenceScore(
157 | projectContext,
158 | documentationContext,
159 | ),
160 | generatedAt: new Date().toISOString(),
161 | },
162 | };
163 |
164 | return {
165 | content: [
166 | {
167 | type: "text",
168 | text: `Generated ${prompts.length} intelligent technical writer prompts with ${integration_level} integration level`,
169 | },
170 | ],
171 | generation: result,
172 | nextSteps,
173 | isError: false,
174 | };
175 | } catch (error) {
176 | const emptyResult: PromptGenerationResult = {
177 | prompts: [],
178 | contextSummary: {
179 | projectContext: {
180 | projectType: "unknown",
181 | languages: [],
182 | frameworks: [],
183 | hasTests: false,
184 | hasCI: false,
185 | },
186 | documentationContext: {
187 | readmeExists: false,
188 | documentationGaps: [],
189 | contentIssues: [],
190 | linkIssues: [],
191 | },
192 | integrationLevel: "basic",
193 | },
194 | recommendations: [],
195 | nextSteps: [],
196 | metadata: {
197 | totalPrompts: 0,
198 | promptsByCategory: {},
199 | confidenceScore: 0,
200 | generatedAt: new Date().toISOString(),
201 | },
202 | };
203 |
204 | return {
205 | content: [
206 | {
207 | type: "text",
208 | text: `Error generating technical writer prompts: ${
209 | error instanceof Error ? error.message : "Unknown error"
210 | }`,
211 | },
212 | ],
213 | generation: emptyResult,
214 | nextSteps: [],
215 | isError: true,
216 | };
217 | }
218 | }
219 |
220 | /**
221 | * Build project context by analyzing repository structure
222 | */
223 | async function buildProjectContext(
224 | projectPath: string,
225 | ): Promise<ProjectContext> {
226 | try {
227 | const packageJsonPath = join(projectPath, "package.json");
228 | let projectType = "unknown";
229 | const languages: string[] = [];
230 | const frameworks: string[] = [];
231 | let packageManager = undefined;
232 |
233 | // Analyze package.json if it exists
234 | try {
235 | const packageJson = JSON.parse(
236 | await fs.readFile(packageJsonPath, "utf-8"),
237 | );
238 |
239 | // Determine project type from dependencies
240 | const deps = {
241 | ...packageJson.dependencies,
242 | ...packageJson.devDependencies,
243 | };
244 |
245 | if (deps["react"]) frameworks.push("React");
246 | if (deps["vue"]) frameworks.push("Vue");
247 | if (deps["angular"]) frameworks.push("Angular");
248 | if (deps["next"]) frameworks.push("Next.js");
249 | if (deps["express"]) frameworks.push("Express");
250 | if (deps["typescript"]) languages.push("TypeScript");
251 |
252 | languages.push("JavaScript");
253 | projectType = frameworks.length > 0 ? "web_application" : "library";
254 |
255 | // Detect package manager
256 | if (await fileExists(join(projectPath, "yarn.lock")))
257 | packageManager = "yarn";
258 | else if (await fileExists(join(projectPath, "pnpm-lock.yaml")))
259 | packageManager = "pnpm";
260 | else packageManager = "npm";
261 | } catch {
262 | // Fallback analysis for non-Node.js projects
263 | const files = await fs.readdir(projectPath);
264 |
265 | if (files.some((f) => f.endsWith(".py"))) {
266 | languages.push("Python");
267 | projectType = "python_application";
268 | }
269 | if (files.some((f) => f.endsWith(".rs"))) {
270 | languages.push("Rust");
271 | projectType = "rust_application";
272 | }
273 | if (files.some((f) => f.endsWith(".go"))) {
274 | languages.push("Go");
275 | projectType = "go_application";
276 | }
277 | }
278 |
279 | const hasTests = await hasTestFiles(projectPath);
280 | const hasCI = await hasCIConfig(projectPath);
281 |
282 | return {
283 | projectType,
284 | languages,
285 | frameworks,
286 | packageManager,
287 | hasTests,
288 | hasCI,
289 | };
290 | } catch (error) {
291 | return {
292 | projectType: "unknown",
293 | languages: [],
294 | frameworks: [],
295 | hasTests: false,
296 | hasCI: false,
297 | };
298 | }
299 | }
300 |
301 | /**
302 | * Build documentation context by integrating multiple tool outputs
303 | */
304 | async function buildDocumentationContext(
305 | projectPath: string,
306 | contextSources: string[],
307 | ): Promise<DocumentationContext> {
308 | const readmeExists = await fileExists(join(projectPath, "README.md"));
309 |
310 | // This would integrate with actual tool outputs in production
311 | // For now, we'll simulate the integration points
312 | const context: DocumentationContext = {
313 | readmeExists,
314 | documentationGaps: [],
315 | contentIssues: [],
316 | linkIssues: [],
317 | };
318 |
319 | // Simulate integration with analyze_readme tool
320 | if (contextSources.includes("readme_health") && readmeExists) {
321 | context.readmeHealth = 75; // Would come from evaluate_readme_health
322 | }
323 |
324 | // Simulate integration with detect_documentation_gaps tool
325 | if (contextSources.includes("documentation_gaps")) {
326 | context.documentationGaps = [
327 | "installation_guide",
328 | "api_reference",
329 | "contributing_guidelines",
330 | ];
331 | }
332 |
333 | // Simulate integration with readme_best_practices tool
334 | if (contextSources.includes("best_practices")) {
335 | context.bestPracticesScore = 68; // Would come from readme_best_practices
336 | }
337 |
338 | return context;
339 | }
340 |
341 | /**
342 | * Generate contextual prompts based on integrated analysis
343 | */
344 | async function generateContextualPrompts(
345 | projectContext: ProjectContext,
346 | documentationContext: DocumentationContext,
347 | audience: string,
348 | promptTypes: string[],
349 | integrationLevel: string,
350 | ): Promise<TechnicalWriterPrompt[]> {
351 | const prompts: TechnicalWriterPrompt[] = [];
352 |
353 | // Content generation prompts based on project context
354 | if (promptTypes.includes("content_generation")) {
355 | prompts.push({
356 | id: "project-overview-prompt",
357 | title: "Project Overview Generation",
358 | category: "content_generation",
359 | audience,
360 | priority: "high",
361 | prompt: `Generate a compelling project overview for a ${
362 | projectContext.projectType
363 | } built with ${projectContext.frameworks.join(
364 | ", ",
365 | )} and ${projectContext.languages.join(
366 | ", ",
367 | )}. Focus on the problem it solves and key benefits for ${audience} users.`,
368 | context: `Project uses ${projectContext.languages.join(
369 | ", ",
370 | )} with ${projectContext.frameworks.join(", ")} frameworks`,
371 | expectedOutput:
372 | "A clear, engaging project description that explains purpose, benefits, and target audience",
373 | integrationHints: [
374 | "Use analyze_repository output for technical accuracy",
375 | "Reference detect_documentation_gaps for missing context",
376 | "Align with readme_best_practices recommendations",
377 | ],
378 | relatedTools: [
379 | "analyze_repository",
380 | "detect_documentation_gaps",
381 | "readme_best_practices",
382 | ],
383 | });
384 | }
385 |
386 | // Gap filling prompts based on documentation analysis
387 | if (
388 | promptTypes.includes("gap_filling") &&
389 | documentationContext.documentationGaps.length > 0
390 | ) {
391 | for (const gap of documentationContext.documentationGaps) {
392 | prompts.push({
393 | id: `gap-fill-${gap}`,
394 | title: `Fill ${gap.replace("_", " ")} Gap`,
395 | category: "gap_filling",
396 | audience,
397 | priority: "high",
398 | prompt: `Create comprehensive ${gap.replace("_", " ")} content for a ${
399 | projectContext.projectType
400 | } project. Include practical examples and ${audience}-focused guidance.`,
401 | context: `Missing ${gap} identified by documentation gap analysis`,
402 | expectedOutput: `Complete ${gap.replace(
403 | "_",
404 | " ",
405 | )} section with examples and clear instructions`,
406 | integrationHints: [
407 | "Use repository analysis for technical context",
408 | "Reference best practices for structure",
409 | "Validate against content standards",
410 | ],
411 | relatedTools: [
412 | "detect_documentation_gaps",
413 | "validate_content",
414 | "setup_structure",
415 | ],
416 | });
417 | }
418 | }
419 |
420 | // Style improvement prompts based on health scores
421 | if (
422 | promptTypes.includes("style_improvement") &&
423 | documentationContext.readmeHealth &&
424 | documentationContext.readmeHealth < 80
425 | ) {
426 | prompts.push({
427 | id: "style-improvement-prompt",
428 | title: "Documentation Style Enhancement",
429 | category: "style_improvement",
430 | audience,
431 | priority: "medium",
432 | prompt: `Improve the writing style and clarity of existing documentation. Focus on ${audience} readability, consistent tone, and professional presentation.`,
433 | context: `Current README health score: ${documentationContext.readmeHealth}/100`,
434 | expectedOutput:
435 | "Refined documentation with improved clarity, consistency, and professional tone",
436 | integrationHints: [
437 | "Use evaluate_readme_health metrics for focus areas",
438 | "Apply readme_best_practices guidelines",
439 | "Validate improvements with content validation",
440 | ],
441 | relatedTools: [
442 | "evaluate_readme_health",
443 | "readme_best_practices",
444 | "validate_content",
445 | ],
446 | });
447 | }
448 |
449 | // Advanced integration prompts for comprehensive level
450 | if (integrationLevel === "comprehensive" || integrationLevel === "advanced") {
451 | prompts.push({
452 | id: "deployment-docs-prompt",
453 | title: "Deployment Documentation",
454 | category: "deployment_optimization",
455 | audience,
456 | priority: "medium",
457 | prompt: `Create deployment documentation that integrates with the recommended static site generator and deployment workflow. Include environment setup, build process, and troubleshooting.`,
458 | context: `Project has CI: ${projectContext.hasCI}, Package manager: ${projectContext.packageManager}`,
459 | expectedOutput:
460 | "Complete deployment guide with step-by-step instructions and troubleshooting",
461 | integrationHints: [
462 | "Use recommend_ssg output for deployment strategy",
463 | "Reference deploy_pages workflow",
464 | "Include verify_deployment checklist",
465 | ],
466 | relatedTools: [
467 | "recommend_ssg",
468 | "deploy_pages",
469 | "verify_deployment",
470 | "test_local_deployment",
471 | ],
472 | });
473 | }
474 |
475 | return prompts;
476 | }
477 |
478 | /**
479 | * Generate integration recommendations based on cross-tool insights
480 | */
481 | function generateIntegrationRecommendations(
482 | projectContext: ProjectContext,
483 | documentationContext: DocumentationContext,
484 | _prompts: TechnicalWriterPrompt[],
485 | ): string[] {
486 | const recommendations: string[] = [];
487 |
488 | recommendations.push(
489 | "Run analyze_repository first to establish comprehensive project context",
490 | );
491 |
492 | if (!documentationContext.readmeExists) {
493 | recommendations.push(
494 | "Use generate_readme_template to create initial README structure",
495 | );
496 | }
497 |
498 | if (documentationContext.documentationGaps.length > 0) {
499 | recommendations.push(
500 | "Execute detect_documentation_gaps to identify all missing content areas",
501 | );
502 | }
503 |
504 | if (projectContext.hasTests) {
505 | recommendations.push(
506 | "Include testing documentation using repository analysis insights",
507 | );
508 | }
509 |
510 | if (projectContext.hasCI) {
511 | recommendations.push(
512 | "Document CI/CD workflow using deployment tool integration",
513 | );
514 | }
515 |
516 | recommendations.push(
517 | "Validate all generated content using validate_content tool",
518 | );
519 | recommendations.push(
520 | "Check documentation links with check_documentation_links after content creation",
521 | );
522 |
523 | return recommendations;
524 | }
525 |
526 | /**
527 | * Generate next steps based on prompts and integration level
528 | */
529 | function generateNextSteps(
530 | prompts: TechnicalWriterPrompt[],
531 | integrationLevel: string,
532 | ): NextStep[] {
533 | const steps: NextStep[] = [];
534 |
535 | steps.push({
536 | action:
537 | "Execute high-priority prompts first to address critical documentation gaps",
538 | toolRequired: "generate_technical_writer_prompts",
539 | priority: "high",
540 | });
541 |
542 | steps.push({
543 | action: "Use generated prompts with AI writing tools for content creation",
544 | toolRequired: "optimize_readme",
545 | priority: "high",
546 | });
547 |
548 | steps.push({
549 | action: "Validate generated content using DocuMCP validation tools",
550 | toolRequired: "validate_content",
551 | priority: "medium",
552 | });
553 |
554 | if (integrationLevel === "comprehensive" || integrationLevel === "advanced") {
555 | steps.push({
556 | action: "Run full documentation workflow using integrated tool chain",
557 | toolRequired: "analyze_repository",
558 | priority: "medium",
559 | });
560 |
561 | steps.push({
562 | action: "Test documentation with target audience using deployment tools",
563 | toolRequired: "test_local_deployment",
564 | priority: "low",
565 | });
566 | }
567 |
568 | steps.push({
569 | action:
570 | "Iterate on content based on validation feedback and best practices analysis",
571 | toolRequired: "readme_best_practices",
572 | priority: "low",
573 | });
574 |
575 | return steps;
576 | }
577 |
578 | /**
579 | * Helper functions
580 | */
581 | async function fileExists(path: string): Promise<boolean> {
582 | try {
583 | await fs.access(path);
584 | return true;
585 | } catch {
586 | return false;
587 | }
588 | }
589 |
590 | async function hasTestFiles(projectPath: string): Promise<boolean> {
591 | try {
592 | const files = await fs.readdir(projectPath, { recursive: true });
593 | return files.some(
594 | (file) =>
595 | typeof file === "string" &&
596 | (file.includes("test") ||
597 | file.includes("spec") ||
598 | file.endsWith(".test.js") ||
599 | file.endsWith(".spec.js")),
600 | );
601 | } catch {
602 | return false;
603 | }
604 | }
605 |
606 | async function hasCIConfig(projectPath: string): Promise<boolean> {
607 | const ciFiles = [
608 | ".github/workflows",
609 | ".gitlab-ci.yml",
610 | "circle.yml",
611 | ".travis.yml",
612 | ];
613 |
614 | for (const ciFile of ciFiles) {
615 | if (await fileExists(join(projectPath, ciFile))) {
616 | return true;
617 | }
618 | }
619 |
620 | return false;
621 | }
622 |
623 | function categorizePrompts(
624 | prompts: TechnicalWriterPrompt[],
625 | ): Record<string, number> {
626 | const categories: Record<string, number> = {};
627 |
628 | for (const prompt of prompts) {
629 | categories[prompt.category] = (categories[prompt.category] || 0) + 1;
630 | }
631 |
632 | return categories;
633 | }
634 |
635 | function calculateConfidenceScore(
636 | projectContext: ProjectContext,
637 | documentationContext: DocumentationContext,
638 | ): number {
639 | let score = 50; // Base score
640 |
641 | // Increase confidence based on available context
642 | if (projectContext.projectType !== "unknown") score += 20;
643 | if (projectContext.languages.length > 0) score += 15;
644 | if (projectContext.frameworks.length > 0) score += 10;
645 | if (documentationContext.readmeExists) score += 5;
646 |
647 | return Math.min(score, 100);
648 | }
649 |
```
--------------------------------------------------------------------------------
/tests/tools/optimize-readme.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
2 | import { promises as fs } from "fs";
3 | import { join } from "path";
4 | import { optimizeReadme } from "../../src/tools/optimize-readme.js";
5 | import { tmpdir } from "os";
6 |
7 | describe("optimize_readme", () => {
8 | let testDir: string;
9 | let readmePath: string;
10 | let docsDir: string;
11 |
12 | beforeEach(async () => {
13 | // Create temporary test directory
14 | testDir = join(tmpdir(), `test-optimize-${Date.now()}`);
15 | await fs.mkdir(testDir, { recursive: true });
16 | readmePath = join(testDir, "README.md");
17 | docsDir = join(testDir, "docs");
18 | });
19 |
20 | afterEach(async () => {
21 | // Cleanup test directory
22 | try {
23 | await fs.rm(testDir, { recursive: true, force: true });
24 | } catch {
25 | // Ignore cleanup errors
26 | }
27 | });
28 |
29 | describe("input validation", () => {
30 | it("should require readme_path parameter", async () => {
31 | const result = await optimizeReadme({});
32 |
33 | expect(result.success).toBe(false);
34 | expect(result.error?.code).toBe("OPTIMIZATION_FAILED");
35 | });
36 |
37 | it("should handle non-existent README file", async () => {
38 | const result = await optimizeReadme({
39 | readme_path: "/non/existent/path/README.md",
40 | });
41 |
42 | expect(result.success).toBe(false);
43 | expect(result.error?.code).toBe("OPTIMIZATION_FAILED");
44 | });
45 |
46 | it("should handle missing README file", async () => {
47 | const result = await optimizeReadme({
48 | readme_path: join(testDir, "README.md"),
49 | });
50 |
51 | expect(result.success).toBe(false);
52 | expect(result.error?.code).toBe("OPTIMIZATION_FAILED");
53 | });
54 | });
55 |
56 | describe("TL;DR generation", () => {
57 | it("should generate TL;DR for README without one", async () => {
58 | const readmeContent = `# Awesome Project
59 |
60 | This is a comprehensive project that does many things. It provides solutions for various problems and offers extensive functionality for users.
61 |
62 | ## Installation
63 |
64 | To install this project, you need to follow several steps:
65 |
66 | 1. Clone the repository
67 | 2. Install dependencies
68 | 3. Configure settings
69 | 4. Run the application
70 |
71 | ## Usage
72 |
73 | The project can be used in multiple ways:
74 |
75 | - Command line interface
76 | - Web interface
77 | - API integration
78 | - Library usage
79 |
80 | ## Features
81 |
82 | - Feature 1: Does something important
83 | - Feature 2: Handles complex operations
84 | - Feature 3: Provides excellent performance
85 | - Feature 4: Offers great user experience`;
86 |
87 | await fs.writeFile(readmePath, readmeContent);
88 |
89 | const result = await optimizeReadme({
90 | readme_path: readmePath,
91 | strategy: "developer_focused",
92 | });
93 |
94 | expect(result.success).toBe(true);
95 | expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");
96 | // TL;DR is generated as content, not a boolean flag
97 | expect(typeof result.data?.optimization.tldrGenerated).toBe("string");
98 | });
99 |
100 | it("should preserve existing TL;DR section", async () => {
101 | const readmeWithTldr = `# Project
102 |
103 | ## TL;DR
104 |
105 | Quick overview of the project.
106 |
107 | ## Details
108 |
109 | More detailed information here.`;
110 |
111 | await fs.writeFile(readmePath, readmeWithTldr);
112 |
113 | const result = await optimizeReadme({
114 | readme_path: readmePath,
115 | });
116 |
117 | expect(result.success).toBe(true);
118 | expect(result.data?.optimization.optimizedContent).toContain(
119 | "Quick overview of the project",
120 | );
121 | // Tool may still generate TL;DR content even when existing TL;DR is present
122 | });
123 | });
124 |
125 | describe("content restructuring", () => {
126 | it("should restructure verbose content", async () => {
127 | const verboseReadme = `# Project Title
128 |
129 | This project is an incredibly comprehensive solution that addresses multiple complex challenges in the software development ecosystem. It has been designed with careful consideration of industry best practices and incorporates cutting-edge technologies to deliver exceptional performance and reliability.
130 |
131 | ## Installation Process
132 |
133 | The installation process involves several detailed steps that must be followed precisely to ensure proper setup and configuration of the system:
134 |
135 | ### Prerequisites
136 |
137 | Before beginning the installation, please ensure that your system meets all the following requirements:
138 |
139 | - Operating System: Linux, macOS, or Windows 10+
140 | - Memory: At least 8GB RAM recommended for optimal performance
141 | - Storage: Minimum 2GB free disk space
142 | - Network: Stable internet connection for downloading dependencies
143 |
144 | ### Step-by-Step Installation
145 |
146 | 1. First, clone the repository using Git
147 | 2. Navigate to the project directory
148 | 3. Install all required dependencies
149 | 4. Configure environment variables
150 | 5. Initialize the database
151 | 6. Run initial setup scripts
152 | 7. Verify installation success
153 |
154 | ## Detailed Usage Instructions
155 |
156 | This section provides comprehensive guidance on how to effectively utilize all features and capabilities of the project.`;
157 |
158 | await fs.writeFile(readmePath, verboseReadme);
159 |
160 | const result = await optimizeReadme({
161 | readme_path: readmePath,
162 | max_length: 200,
163 | });
164 |
165 | expect(result.success).toBe(true);
166 | expect(
167 | result.data?.optimization.restructuringChanges.length,
168 | ).toBeGreaterThan(0);
169 | // Optimization may add TL;DR which can increase length
170 | expect(result.data?.optimization.optimizedContent.length).toBeGreaterThan(
171 | 0,
172 | );
173 | });
174 |
175 | it("should extract detailed sections to docs directory", async () => {
176 | const readmeWithDetailedSections = `# Project
177 |
178 | Brief project description.
179 |
180 | ## Quick Start
181 |
182 | \`\`\`bash
183 | npm install && npm start
184 | \`\`\`
185 |
186 | ## Detailed Installation Guide
187 |
188 | This is a very long and detailed installation guide that covers every possible scenario and edge case. It includes troubleshooting steps, advanced configuration options, and platform-specific instructions that would make the main README too long and overwhelming for most users.
189 |
190 | ### System Requirements
191 |
192 | Detailed system requirements here...
193 |
194 | ### Advanced Configuration
195 |
196 | Complex configuration details...
197 |
198 | ## Comprehensive API Documentation
199 |
200 | This section contains extensive API documentation with detailed examples, parameter descriptions, response formats, error codes, and usage patterns. This level of detail is better suited for separate documentation.
201 |
202 | ### Authentication
203 |
204 | Detailed authentication process...
205 |
206 | ### Endpoints
207 |
208 | Complete endpoint documentation...
209 |
210 | ## Contributing Guidelines
211 |
212 | Extensive contributing guidelines with detailed processes, code style requirements, testing procedures, and review processes.`;
213 |
214 | await fs.writeFile(readmePath, readmeWithDetailedSections);
215 |
216 | const result = await optimizeReadme({
217 | readme_path: readmePath,
218 | create_docs_directory: true,
219 | });
220 |
221 | expect(result.success).toBe(true);
222 | // Section extraction depends on content structure and may not always occur
223 | expect(result.data?.optimization.extractedSections).toBeDefined();
224 |
225 | // Check that docs directory creation was attempted (may not always create based on content)
226 | const docsExists = await fs
227 | .access(docsDir)
228 | .then(() => true)
229 | .catch(() => false);
230 | // Directory creation depends on content structure and extraction rules
231 | expect(typeof docsExists).toBe("boolean");
232 |
233 | // Optimized content should be generated successfully
234 | expect(result.data?.optimization.optimizedContent).toBeDefined();
235 | });
236 | });
237 |
238 | describe("audience-specific optimization", () => {
239 | it("should optimize for community contributors", async () => {
240 | const readmeContent = `# Open Source Project
241 |
242 | A project for the community.
243 |
244 | ## Installation
245 |
246 | Complex installation steps...
247 |
248 | ## Usage
249 |
250 | Basic usage info.
251 |
252 | ## Development
253 |
254 | Development setup instructions.`;
255 |
256 | await fs.writeFile(readmePath, readmeContent);
257 |
258 | const result = await optimizeReadme({
259 | readme_path: readmePath,
260 | strategy: "community_focused",
261 | });
262 |
263 | expect(result.success).toBe(true);
264 | // Community optimization focuses on accessibility and contribution info
265 | expect(result.data?.optimization.optimizedContent).toContain("## TL;DR");
266 | });
267 |
268 | it("should optimize for enterprise users", async () => {
269 | const readmeContent = `# Enterprise Solution
270 |
271 | A business solution.
272 |
273 | ## Features
274 |
275 | List of features...
276 |
277 | ## Installation
278 |
279 | Installation steps...`;
280 |
281 | await fs.writeFile(readmePath, readmeContent);
282 |
283 | const result = await optimizeReadme({
284 | readme_path: readmePath,
285 | strategy: "enterprise_focused",
286 | });
287 |
288 | expect(result.success).toBe(true);
289 | // Should focus on enterprise concerns
290 | expect(result.data?.optimization).toBeDefined();
291 | });
292 |
293 | it("should optimize for developers", async () => {
294 | const readmeContent = `# Developer Tool
295 |
296 | A tool for developers.
297 |
298 | ## Overview
299 |
300 | What it does...
301 |
302 | ## Setup
303 |
304 | How to set up...`;
305 |
306 | await fs.writeFile(readmePath, readmeContent);
307 |
308 | const result = await optimizeReadme({
309 | readme_path: readmePath,
310 | strategy: "developer_focused",
311 | });
312 |
313 | expect(result.success).toBe(true);
314 | // Developer optimization includes quick start information
315 | expect(result.data?.optimization.optimizedContent).toContain(
316 | "Quick start",
317 | );
318 | });
319 | });
320 |
321 | describe("optimization levels", () => {
322 | it("should apply conservative optimization", async () => {
323 | const readmeContent = `# Project
324 |
325 | This is a moderately long description that could be shortened but isn't extremely verbose.
326 |
327 | ## Installation
328 |
329 | Standard installation steps here.
330 |
331 | ## Usage
332 |
333 | Usage information with reasonable detail.`;
334 |
335 | await fs.writeFile(readmePath, readmeContent);
336 |
337 | const result = await optimizeReadme({
338 | readme_path: readmePath,
339 | max_length: 500,
340 | });
341 |
342 | expect(result.success).toBe(true);
343 | // Conservative should make minimal changes
344 | expect(
345 | result.data?.optimization.restructuringChanges.length,
346 | ).toBeLessThanOrEqual(2);
347 | });
348 |
349 | it("should apply aggressive optimization", async () => {
350 | const verboseReadme = Array(50)
351 | .fill(
352 | "# Section\n\nVery long content that repeats and could be significantly shortened.\n",
353 | )
354 | .join("\n");
355 | await fs.writeFile(readmePath, verboseReadme);
356 |
357 | const result = await optimizeReadme({
358 | readme_path: readmePath,
359 | max_length: 100,
360 | });
361 |
362 | expect(result.success).toBe(true);
363 | expect(
364 | result.data?.optimization.restructuringChanges.length,
365 | ).toBeGreaterThan(0);
366 | // Optimization may add TL;DR which can increase length
367 | expect(result.data?.optimization.optimizedContent.length).toBeGreaterThan(
368 | 0,
369 | );
370 | });
371 | });
372 |
373 | describe("file output", () => {
374 | it("should write optimized README to file", async () => {
375 | const readmeContent = `# Project\n\nOriginal content that will be optimized.`;
376 | await fs.writeFile(readmePath, readmeContent);
377 |
378 | const result = await optimizeReadme({
379 | readme_path: readmePath,
380 | output_path: readmePath,
381 | });
382 |
383 | expect(result.success).toBe(true);
384 |
385 | // Check that README was updated
386 | const updatedContent = await fs.readFile(readmePath, "utf-8");
387 | expect(updatedContent).not.toBe(readmeContent);
388 | expect(updatedContent).toContain("## TL;DR");
389 | });
390 |
391 | it("should create backup of original README", async () => {
392 | const originalContent = `# Original Project\n\nOriginal content.`;
393 | await fs.writeFile(readmePath, originalContent);
394 |
395 | const result = await optimizeReadme({
396 | readme_path: readmePath,
397 | output_path: readmePath,
398 | });
399 |
400 | expect(result.success).toBe(true);
401 |
402 | // Verify output was written successfully
403 | const outputContent = await fs.readFile(readmePath, "utf-8");
404 | expect(outputContent).toContain("## TL;DR");
405 | expect(outputContent.length).toBeGreaterThan(
406 | originalContent.length * 0.5,
407 | );
408 | });
409 |
410 | it("should create docs index when extracting sections", async () => {
411 | const readmeWithSections = `# Project
412 |
413 | Brief description.
414 |
415 | ## Detailed Installation
416 |
417 | Very detailed installation instructions that should be extracted.
418 |
419 | ## Advanced Configuration
420 |
421 | Complex configuration details that belong in docs.`;
422 |
423 | await fs.writeFile(readmePath, readmeWithSections);
424 |
425 | const result = await optimizeReadme({
426 | readme_path: readmePath,
427 | create_docs_directory: true,
428 | output_path: readmePath,
429 | });
430 |
431 | expect(result.success).toBe(true);
432 |
433 | if (
434 | result.data?.optimization.extractedSections &&
435 | result.data.optimization.extractedSections.length > 0
436 | ) {
437 | // Check that docs index was created
438 | const indexPath = join(docsDir, "index.md");
439 | const indexExists = await fs
440 | .access(indexPath)
441 | .then(() => true)
442 | .catch(() => false);
443 | expect(indexExists).toBe(true);
444 | }
445 | });
446 | });
447 |
448 | describe("recommendations and next steps", () => {
449 | it("should provide relevant recommendations", async () => {
450 | const basicReadme = `# Project\n\nBasic description without much structure.`;
451 | await fs.writeFile(readmePath, basicReadme);
452 |
453 | const result = await optimizeReadme({
454 | readme_path: readmePath,
455 | });
456 |
457 | expect(result.success).toBe(true);
458 | expect(result.data?.optimization.recommendations.length).toBeGreaterThan(
459 | 0,
460 | );
461 | expect(result.data?.nextSteps.length).toBeGreaterThan(0);
462 | });
463 |
464 | it("should prioritize recommendations by impact", async () => {
465 | const poorReadme = `ProjectWithoutProperStructure\nNo headings or organization.`;
466 | await fs.writeFile(readmePath, poorReadme);
467 |
468 | const result = await optimizeReadme({
469 | readme_path: readmePath,
470 | max_length: 50,
471 | });
472 |
473 | expect(result.success).toBe(true);
474 | expect(result.data?.optimization.recommendations.length).toBeGreaterThan(
475 | 0,
476 | );
477 | // Recommendations are provided based on content analysis
478 | });
479 | });
480 |
481 | describe("metadata and tracking", () => {
482 | it("should include optimization metadata", async () => {
483 | const readmeContent = `# Project\n\nContent to optimize.`;
484 | await fs.writeFile(readmePath, readmeContent);
485 |
486 | const result = await optimizeReadme({
487 | readme_path: readmePath,
488 | });
489 |
490 | expect(result.success).toBe(true);
491 | expect(result.metadata?.toolVersion).toBe("1.0.0");
492 | // Execution time may be 0 for very fast operations
493 | expect(result.metadata?.executionTime).toBeGreaterThanOrEqual(0);
494 | expect(result.metadata?.timestamp).toBeDefined();
495 | });
496 |
497 | it("should track optimization statistics", async () => {
498 | const longReadme = Array(20)
499 | .fill("# Section\n\nContent here.\n")
500 | .join("\n");
501 | await fs.writeFile(readmePath, longReadme);
502 |
503 | const result = await optimizeReadme({
504 | readme_path: readmePath,
505 | max_length: 400,
506 | });
507 |
508 | expect(result.success).toBe(true);
509 | expect(result.data?.optimization.originalLength).toBeGreaterThan(0);
510 | expect(result.data?.optimization.optimizedLength).toBeGreaterThan(0);
511 | // Reduction percentage can be negative when content is added (like TL;DR)
512 | expect(typeof result.data?.optimization.reductionPercentage).toBe(
513 | "number",
514 | );
515 | });
516 | });
517 |
518 | describe("error handling", () => {
519 | it("should handle file permission errors gracefully", async () => {
520 | const readmeContent = `# Project\n\nContent.`;
521 | await fs.writeFile(readmePath, readmeContent);
522 |
523 | // Make directory read-only to simulate permission error
524 | await fs.chmod(testDir, 0o444);
525 |
526 | const result = await optimizeReadme({
527 | readme_path: readmePath,
528 | output_path: readmePath,
529 | });
530 |
531 | // Restore permissions for cleanup
532 | await fs.chmod(testDir, 0o755);
533 |
534 | expect(result.success).toBe(false);
535 | expect(result.error?.code).toBe("OPTIMIZATION_FAILED");
536 | });
537 |
538 | it("should handle malformed README content", async () => {
539 | // Create README with unusual content
540 | const malformedContent = "\x00\x01\x02Invalid binary content\xFF\xFE";
541 | await fs.writeFile(readmePath, malformedContent, "binary");
542 |
543 | const result = await optimizeReadme({
544 | readme_path: readmePath,
545 | });
546 |
547 | // Tool handles malformed content gracefully
548 | expect(result.success).toBe(true);
549 | expect(result.data?.optimization.optimizedContent).toBeDefined();
550 | });
551 | });
552 |
553 | describe("integration scenarios", () => {
554 | it("should work with real-world README structure", async () => {
555 | const realWorldReadme = `# MyAwesome Project
556 |
557 | [](https://travis-ci.org/user/project)
558 | [](https://badge.fury.io/js/myproject)
559 |
560 | > A comprehensive solution for modern web development challenges
561 |
562 | ## Table of Contents
563 |
564 | - [Installation](#installation)
565 | - [Quick Start](#quick-start)
566 | - [API Reference](#api-reference)
567 | - [Contributing](#contributing)
568 | - [License](#license)
569 |
570 | ## Installation
571 |
572 | \`\`\`bash
573 | npm install myawesome-project
574 | \`\`\`
575 |
576 | ## Quick Start
577 |
578 | \`\`\`javascript
579 | const project = require('myawesome-project');
580 |
581 | project.init({
582 | apiKey: 'your-api-key',
583 | environment: 'production'
584 | });
585 | \`\`\`
586 |
587 | ## API Reference
588 |
589 | ### Methods
590 |
591 | #### \`project.init(options)\`
592 |
593 | Initialize the project with configuration options.
594 |
595 | **Parameters:**
596 | - \`options\` (Object): Configuration object
597 | - \`apiKey\` (String): Your API key
598 | - \`environment\` (String): Environment setting
599 |
600 | ## Contributing
601 |
602 | Please read [CONTRIBUTING.md](CONTRIBUTING.md) for details on our code of conduct and the process for submitting pull requests.
603 |
604 | ## License
605 |
606 | This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.`;
607 |
608 | await fs.writeFile(readmePath, realWorldReadme);
609 |
610 | const result = await optimizeReadme({
611 | readme_path: readmePath,
612 | strategy: "developer_focused",
613 | max_length: 400,
614 | });
615 |
616 | expect(result.success).toBe(true);
617 | expect(result.data?.optimization.optimizedContent).toContain("TL;DR");
618 | expect(result.data?.optimization.optimizedContent).toContain(
619 | "Quick Start",
620 | );
621 | });
622 | });
623 | });
624 |
```