This is page 3 of 33. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── ARCHITECTURAL_CHANGES_SUMMARY.md
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── adr-0001-mcp-server-architecture.md
│ │ ├── adr-0002-repository-analysis-engine.md
│ │ ├── adr-0003-static-site-generator-recommendation-engine.md
│ │ ├── adr-0004-diataxis-framework-integration.md
│ │ ├── adr-0005-github-pages-deployment-automation.md
│ │ ├── adr-0006-mcp-tools-api-design.md
│ │ ├── adr-0007-mcp-prompts-and-resources-integration.md
│ │ ├── adr-0008-intelligent-content-population-engine.md
│ │ ├── adr-0009-content-accuracy-validation-framework.md
│ │ ├── adr-0010-mcp-resource-pattern-redesign.md
│ │ ├── adr-0011-ce-mcp-compatibility.md
│ │ ├── adr-0012-priority-scoring-system-for-documentation-drift.md
│ │ ├── adr-0013-release-pipeline-and-package-distribution.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── CE-MCP-FINDINGS.md
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── change-watcher.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── drift-priority-scoring.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── llm-integration.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── ISSUE_IMPLEMENTATION_SUMMARY.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── change-watcher.ts
│ │ ├── check-documentation-links.ts
│ │ ├── cleanup-agent-artifacts.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── simulate-execution.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── artifact-detector.ts
│ │ ├── ast-analyzer.ts
│ │ ├── change-watcher.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── execution-simulator.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── llm-client.ts
│ │ ├── permission-checker.ts
│ │ ├── semantic-analyzer.ts
│ │ ├── sitemap-generator.ts
│ │ ├── usage-metadata.ts
│ │ └── user-feedback-integration.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── call-graph-builder.test.ts
│ ├── change-watcher-priority.integration.test.ts
│ ├── change-watcher.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── execution-simulator.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-documentation-examples.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas-documentation-examples.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── cleanup-agent-artifacts.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── artifact-detector.test.ts
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector-diataxis.test.ts
│ ├── drift-detector-priority.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ ├── llm-client.test.ts
│ ├── semantic-analyzer.test.ts
│ ├── sitemap-generator.test.ts
│ ├── usage-metadata.test.ts
│ └── user-feedback-integration.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/src/scripts/benchmark.ts:
--------------------------------------------------------------------------------
```typescript
1 | #!/usr/bin/env node
2 | // Performance benchmark CLI script per PERF-001 rules
3 | import { promises as fs } from "fs";
4 | import path from "path";
5 | import { createBenchmarker } from "../benchmarks/performance.js";
6 |
7 | interface BenchmarkConfig {
8 | testRepos: Array<{
9 | path: string;
10 | name: string;
11 | expectedSize?: "small" | "medium" | "large";
12 | }>;
13 | outputDir?: string;
14 | verbose?: boolean;
15 | }
16 |
17 | async function main() {
18 | const args = process.argv.slice(2);
19 | const command = args[0] || "help";
20 |
21 | switch (command) {
22 | case "run":
23 | await runBenchmarks(args.slice(1));
24 | break;
25 | case "current":
26 | await benchmarkCurrentRepo();
27 | break;
28 | case "create-config":
29 | await createDefaultConfig();
30 | break;
31 | case "help":
32 | default:
33 | printHelp();
34 | break;
35 | }
36 | }
37 |
38 | async function runBenchmarks(args: string[]) {
39 | const configPath = args[0] || "./benchmark-config.json";
40 |
41 | try {
42 | const configContent = await fs.readFile(configPath, "utf-8");
43 | const config: BenchmarkConfig = JSON.parse(configContent);
44 |
45 | console.log("🎯 Performance Benchmarking System (PERF-001 Compliance)");
46 | console.log("Target Performance:");
47 | console.log(" • Small repos (<100 files): <1 second");
48 | console.log(" • Medium repos (100-1000 files): <10 seconds");
49 | console.log(" • Large repos (1000+ files): <60 seconds\\n");
50 |
51 | const benchmarker = createBenchmarker();
52 | const suite = await benchmarker.runBenchmarkSuite(config.testRepos);
53 |
54 | // Print detailed report
55 | benchmarker.printDetailedReport(suite);
56 |
57 | // Export results if output directory specified
58 | if (config.outputDir) {
59 | await fs.mkdir(config.outputDir, { recursive: true });
60 | const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
61 | const outputPath = path.join(
62 | config.outputDir,
63 | `benchmark-${timestamp}.json`,
64 | );
65 |
66 | await benchmarker.exportResults(suite, outputPath);
67 | console.log(`\\n📄 Results exported to: ${outputPath}`);
68 | }
69 |
70 | // Exit with appropriate code
71 | process.exit(suite.overallPassed ? 0 : 1);
72 | } catch (error) {
73 | console.error("❌ Benchmark failed:", error);
74 | console.error(
75 | '\\nTry running "npm run benchmark:create-config" to create a default configuration.',
76 | );
77 | process.exit(1);
78 | }
79 | }
80 |
81 | async function benchmarkCurrentRepo() {
82 | console.log("🎯 Benchmarking Current Repository");
83 | console.log("=".repeat(40));
84 |
85 | const currentRepo = process.cwd();
86 | const repoName = path.basename(currentRepo);
87 |
88 | const benchmarker = createBenchmarker();
89 |
90 | try {
91 | console.log(`📊 Analyzing: ${repoName} at ${currentRepo}\\n`);
92 |
93 | const result = await benchmarker.benchmarkRepository(
94 | currentRepo,
95 | "standard",
96 | );
97 |
98 | // Generate single-repo suite
99 | const suite = benchmarker.generateSuite(`Current Repository: ${repoName}`, [
100 | result,
101 | ]);
102 |
103 | // Print results
104 | benchmarker.printDetailedReport(suite);
105 |
106 | // Export to current directory
107 | const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
108 | const outputPath = `./benchmark-current-${timestamp}.json`;
109 | await benchmarker.exportResults(suite, outputPath);
110 |
111 | console.log(`\\n📄 Results saved to: ${outputPath}`);
112 |
113 | process.exit(suite.overallPassed ? 0 : 1);
114 | } catch (error) {
115 | console.error("❌ Benchmark failed:", error);
116 | process.exit(1);
117 | }
118 | }
119 |
120 | async function createDefaultConfig() {
121 | const defaultConfig: BenchmarkConfig = {
122 | testRepos: [
123 | {
124 | path: ".",
125 | name: "Current Repository",
126 | expectedSize: "small",
127 | },
128 | // Add more test repositories here
129 | // {
130 | // path: "/path/to/medium/repo",
131 | // name: "Medium Test Repo",
132 | // expectedSize: "medium"
133 | // },
134 | // {
135 | // path: "/path/to/large/repo",
136 | // name: "Large Test Repo",
137 | // expectedSize: "large"
138 | // }
139 | ],
140 | outputDir: "./benchmark-results",
141 | verbose: true,
142 | };
143 |
144 | const configPath = "./benchmark-config.json";
145 | await fs.writeFile(configPath, JSON.stringify(defaultConfig, null, 2));
146 |
147 | console.log("✅ Created default benchmark configuration:");
148 | console.log(` ${configPath}`);
149 | console.log("");
150 | console.log("📝 Edit this file to add your test repositories, then run:");
151 | console.log(" npm run benchmark:run");
152 | }
153 |
154 | function printHelp() {
155 | console.log("🎯 DocuMCP Performance Benchmarking Tool");
156 | console.log("");
157 | console.log("USAGE:");
158 | console.log(
159 | " npm run benchmark:run [config-file] Run full benchmark suite",
160 | );
161 | console.log(
162 | " npm run benchmark:current Benchmark current repository only",
163 | );
164 | console.log(
165 | " npm run benchmark:create-config Create default configuration",
166 | );
167 | console.log(" npm run benchmark:help Show this help");
168 | console.log("");
169 | console.log("PERFORMANCE TARGETS (PERF-001):");
170 | console.log(" • Small repositories (<100 files): <1 second");
171 | console.log(" • Medium repositories (100-1000 files): <10 seconds");
172 | console.log(" • Large repositories (1000+ files): <60 seconds");
173 | console.log("");
174 | console.log("EXAMPLES:");
175 | console.log(" npm run benchmark:current");
176 | console.log(" npm run benchmark:create-config");
177 | console.log(" npm run benchmark:run ./my-config.json");
178 | }
179 |
180 | // Handle unhandled promise rejections
181 | process.on("unhandledRejection", (error) => {
182 | console.error("❌ Unhandled rejection:", error);
183 | process.exit(1);
184 | });
185 |
186 | main().catch((error) => {
187 | console.error("❌ Script failed:", error);
188 | process.exit(1);
189 | });
190 |
```
--------------------------------------------------------------------------------
/docs/research/domain-3-ssg-recommendation/ssg-performance-analysis.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.966Z"
4 | last_validated: "2025-12-09T19:41:38.597Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # Static Site Generator Performance Analysis
11 |
12 | **Research Date**: 2025-01-14
13 | **Domain**: SSG Recommendation Engine
14 | **Status**: Completed
15 |
16 | ## Research Overview
17 |
18 | Comprehensive analysis of static site generator performance characteristics, build times, and deployment considerations for DocuMCP recommendation engine.
19 |
20 | ## Key Research Findings
21 |
22 | ### Build Performance Comparison
23 |
24 | Based on CSS-Tricks comprehensive benchmarking study:
25 |
26 | | SSG | Language | Small Sites (1-1024 files) | Large Sites (1K-64K files) | Key Characteristics |
27 | | -------------- | -------- | --------------------------------- | ------------------------------- | ------------------------------- |
28 | | **Hugo** | Go | ~250x faster than Gatsby | ~40x faster than Gatsby | Fastest across all scales |
29 | | **Jekyll** | Ruby | Competitive with Eleventy | Slower scaling, Ruby bottleneck | Good for small-medium sites |
30 | | **Eleventy** | Node.js | Fast, lightweight | Good scaling | Excellent developer experience |
31 | | **Gatsby** | React | Slower startup (webpack overhead) | Improves relatively at scale | Rich features, plugin ecosystem |
32 | | **Next.js** | React | Framework overhead | Good with optimization | Hybrid capabilities |
33 | | **Docusaurus** | React | Moderate performance | Documentation optimized | Purpose-built for docs |
34 |
35 | ### Performance Characteristics Analysis
36 |
37 | #### **Tier 1: Speed Champions (Hugo)**
38 |
39 | - **Build Time**: Sub-second for small sites, seconds for large sites
40 | - **Scaling**: Linear performance, excellent for content-heavy sites
41 | - **Trade-offs**: Limited plugin ecosystem, steeper learning curve
42 |
43 | #### **Tier 2: Balanced Performance (Jekyll, Eleventy)**
44 |
45 | - **Build Time**: Fast for small sites, moderate scaling
46 | - **Scaling**: Jekyll hits Ruby performance ceiling, Eleventy scales better
47 | - **Trade-offs**: Good balance of features and performance
48 |
49 | #### **Tier 3: Feature-Rich (Gatsby, Next.js, Docusaurus)**
50 |
51 | - **Build Time**: Significant webpack/framework overhead
52 | - **Scaling**: Performance gap narrows at scale due to optimizations
53 | - **Trade-offs**: Rich ecosystems, modern features, slower builds
54 |
55 | ### Real-World Performance Implications
56 |
57 | #### **For DocuMCP Recommendation Logic:**
58 |
59 | 1. **Small Projects** (< 100 pages):
60 |
61 | - All SSGs perform adequately
62 | - Developer experience becomes primary factor
63 | - Hugo still 250x faster than Gatsby for simple sites
64 |
65 | 2. **Medium Projects** (100-1000 pages):
66 |
67 | - Performance differences become noticeable
68 | - Hugo maintains significant advantage
69 | - Jekyll starts showing Ruby limitations
70 |
71 | 3. **Large Projects** (1000+ pages):
72 | - Hugo remains fastest but gap narrows
73 | - Framework-based SSGs benefit from optimizations
74 | - Build time becomes CI/CD bottleneck consideration
75 |
76 | ### Deployment and CI/CD Considerations
77 |
78 | #### **GitHub Actions Build Time Impact**
79 |
80 | - **Free Plan Limitations**: 2000 minutes/month
81 | - **Cost Implications**: Slow builds consume more CI time
82 | - **Real Example**: Gatsby site taking 15 minutes vs Hugo taking 30 seconds
83 |
84 | #### **Content Editor Experience**
85 |
86 | - **Preview Generation**: Fast builds enable quick content previews
87 | - **Development Workflow**: Build speed affects local development experience
88 | - **Incremental Builds**: Framework support varies significantly
89 |
90 | ### Recommendation Engine Criteria
91 |
92 | Based on research findings, DocuMCP should weight these factors:
93 |
94 | 1. **Project Scale Weight**:
95 |
96 | - Small projects: 40% performance, 60% features/DX
97 | - Medium projects: 60% performance, 40% features/DX
98 | - Large projects: 80% performance, 20% features/DX
99 |
100 | 2. **Team Context Multipliers**:
101 |
102 | - Technical team: Favor performance (Hugo/Eleventy)
103 | - Non-technical content creators: Favor ease-of-use (Jekyll/Docusaurus)
104 | - Mixed teams: Balanced approach (Next.js/Gatsby)
105 |
106 | 3. **Use Case Optimization**:
107 | - **Documentation**: Docusaurus > MkDocs > Hugo
108 | - **Marketing Sites**: Next.js > Gatsby > Hugo
109 | - **Blogs**: Jekyll > Eleventy > Hugo
110 | - **Large Content Sites**: Hugo > Eleventy > Others
111 |
112 | ## Implementation Recommendations for DocuMCP
113 |
114 | ### Algorithm Design
115 |
116 | ```typescript
117 | // Performance scoring algorithm
118 | const calculatePerformanceScore = (projectMetrics: ProjectMetrics) => {
119 | const { pageCount, teamSize, techLevel, updateFrequency } = projectMetrics;
120 |
121 | // Scale-based performance weighting
122 | const performanceWeight =
123 | pageCount > 1000 ? 0.8 : pageCount > 100 ? 0.6 : 0.4;
124 |
125 | // SSG-specific performance scores (0-100)
126 | const performanceScores = {
127 | hugo: 100,
128 | eleventy: 85,
129 | jekyll: pageCount > 500 ? 60 : 80,
130 | nextjs: 70,
131 | gatsby: pageCount > 1000 ? 65 : 45,
132 | docusaurus: 75,
133 | };
134 |
135 | return performanceScores;
136 | };
137 | ```
138 |
139 | ### Research Validation
140 |
141 | - ✅ Performance benchmarks analyzed from multiple sources
142 | - ✅ Real-world implications documented
143 | - ✅ Recommendation criteria established
144 | - ⚠️ Needs validation: Edge case performance scenarios
145 | - ⚠️ Needs testing: Algorithm implementation with real project data
146 |
147 | ## Sources & References
148 |
149 | 1. CSS-Tricks Comprehensive SSG Build Time Analysis
150 | 2. Jamstack.org Performance Surveys
151 | 3. GitHub Actions CI/CD Cost Analysis
152 | 4. Community Performance Reports (Hugo, Gatsby, Next.js)
153 |
```
--------------------------------------------------------------------------------
/ARCHITECTURAL_CHANGES_SUMMARY.md:
--------------------------------------------------------------------------------
```markdown
1 | # Architectural Changes Summary
2 |
3 | ## January 14, 2025
4 |
5 | This document summarizes all architectural changes, ADR updates, and implementations completed in this session.
6 |
7 | ## ✅ Implementations Completed
8 |
9 | ### 1. Release Pipeline Improvements (Issues #1, #2, #3)
10 |
11 | **Status**: ✅ Fully Implemented
12 |
13 | **Files Changed**:
14 |
15 | - `.github/workflows/release.yml` - Enhanced with verification and automation
16 |
17 | **Features Implemented**:
18 |
19 | - ✅ npm publishing verification with retry mechanism (3 attempts)
20 | - ✅ Package installation test after publication
21 | - ✅ Automated changelog generation using standard-version
22 | - ✅ Commit message validation before release
23 | - ✅ Coverage threshold updated from 80% to 85% (currently at 91.65%)
24 | - ✅ Enhanced changelog extraction for GitHub Releases
25 |
26 | **Verification**:
27 |
28 | - ✅ `npm run release:dry-run` tested and working
29 | - ✅ All quality gates in place
30 | - ✅ Error handling implemented throughout
31 |
32 | ### 2. ADR Documentation Updates
33 |
34 | **Status**: ✅ Completed
35 |
36 | **New ADRs Created**:
37 |
38 | - **ADR-012**: Priority Scoring System for Documentation Drift Detection
39 | - **ADR-013**: Release Pipeline and Package Distribution Architecture
40 |
41 | **ADRs Updated**:
42 |
43 | - **ADR-002**: Added GitHub issue references (#77, #78)
44 | - **ADR-004**: Added Diataxis type tracking documentation (#81)
45 | - **ADR-005**: Added release pipeline reference
46 | - **ADR-006**: Added agent artifact cleanup tool reference (#80)
47 | - **ADR-009**: Added LLM integration documentation (#82)
48 | - **ADR-012**: Added GitHub issue reference (#83)
49 | - **ADR-013**: Updated status to Accepted with implementation details
50 |
51 | **ADR README**: Updated with all new ADRs and status changes
52 |
53 | ## 📋 Code Implementation Verification
54 |
55 | ### Priority Scoring System (Issue #83)
56 |
57 | **Implementation**: ✅ Found in `src/utils/drift-detector.ts`
58 |
59 | - `DriftPriorityScore` interface (lines 91-103)
60 | - `calculatePriorityScore()` method (line 1307)
61 | - Integration with drift detection results
62 |
63 | ### LLM Integration Layer (Issue #82)
64 |
65 | **Implementation**: ✅ Found in multiple files
66 |
67 | - `src/utils/llm-client.ts` - LLM client implementation
68 | - `src/utils/semantic-analyzer.ts` - Semantic analysis integration
69 | - Supports DeepSeek, OpenAI, Anthropic, Ollama providers
70 | - Hybrid analysis (LLM + AST fallback)
71 |
72 | ### Agent Artifact Cleanup (Issue #80)
73 |
74 | **Implementation**: ✅ Found in multiple files
75 |
76 | - `src/tools/cleanup-agent-artifacts.ts` - MCP tool implementation
77 | - `src/utils/artifact-detector.ts` - Detection logic
78 | - Integrated into main MCP server (`src/index.ts`)
79 |
80 | ### Diataxis Type Tracking (Issue #81)
81 |
82 | **Implementation**: ✅ Found in multiple files
83 |
84 | - `src/utils/drift-detector.ts` - Diataxis type detection (lines 699-984)
85 | - `src/memory/schemas.ts` - Schema definition (line 266)
86 | - CodeExample interface extended with diataxisType field
87 |
88 | ### Knowledge Graph Extensions (Issues #77, #78)
89 |
90 | **Implementation**: ✅ Found in `src/memory/schemas.ts`
91 |
92 | - DocumentationExampleEntitySchema (line 262)
93 | - ExampleValidationEntitySchema (line 284)
94 | - CallGraphEntitySchema (referenced in commit)
95 |
96 | ## 📊 Project Statistics
97 |
98 | - **Total TypeScript Files**: 72
99 | - **ADRs**: 13 (11 Accepted, 2 Proposed)
100 | - **Test Coverage**: 91.65% (exceeds 85% target)
101 | - **Recent Commits**: 10+ in last 2 days
102 |
103 | ## 🔗 GitHub Issues Status
104 |
105 | | Issue # | Title | Status | ADR Reference |
106 | | ------- | ------------------------------ | -------------------- | ------------- |
107 | | #1 | Fix npm Package Publishing | ✅ Fixed | ADR-013 |
108 | | #2 | Automated Changelog Generation | ✅ Implemented | ADR-013 |
109 | | #3 | Test Coverage to 85% | ✅ Exceeded (91.65%) | ADR-013 |
110 | | #77 | Knowledge Graph Extensions | ✅ Implemented | ADR-002 |
111 | | #78 | Documentation Example Entities | ✅ Implemented | ADR-002 |
112 | | #80 | Agent Artifact Cleanup | ✅ Implemented | ADR-006 |
113 | | #81 | Diataxis Type Tracking | ✅ Implemented | ADR-004 |
114 | | #82 | LLM Integration Layer | ✅ Implemented | ADR-009 |
115 | | #83 | Priority Scoring System | ✅ Implemented | ADR-012 |
116 |
117 | ## 📝 Commits Made
118 |
119 | 1. **dbef13f** - `feat(release): implement npm publishing verification and automated changelog (#1, #2)`
120 |
121 | - Release pipeline improvements
122 | - New ADRs (012, 013)
123 | - ADR updates with issue references
124 |
125 | 2. **ef03918** - `docs(adrs): update ADR-013 status to Accepted with implementation details`
126 | - ADR-013 status update
127 | - Implementation details added
128 |
129 | ## 🎯 Next Steps
130 |
131 | ### Ready for Implementation
132 |
133 | - **Issue #74**: Change Watcher for Real-time Documentation Drift Monitoring
134 | - Dependencies: ✅ Drift detection system exists
135 | - Dependencies: ✅ LLM integration available (optional)
136 | - Status: Ready to implement
137 |
138 | ### Future Enhancements (From ADR-013)
139 |
140 | - Issue #7: AI-enhanced release notes
141 | - Issue #8: Release health dashboard
142 | - Issue #6: Smart Dependabot auto-merge
143 |
144 | ## 📚 Documentation Created
145 |
146 | 1. **ISSUE_IMPLEMENTATION_SUMMARY.md** - Detailed implementation summary
147 | 2. **ARCHITECTURAL_CHANGES_SUMMARY.md** - This document
148 | 3. **ADR-012** - Priority Scoring System documentation
149 | 4. **ADR-013** - Release Pipeline Architecture documentation
150 |
151 | ## ✅ Quality Assurance
152 |
153 | - ✅ All implementations verified in codebase
154 | - ✅ ADRs updated with implementation status
155 | - ✅ GitHub issues referenced in ADRs
156 | - ✅ Commit messages follow conventional format
157 | - ✅ Test coverage exceeds targets
158 | - ✅ Release pipeline tested and working
159 |
160 | ---
161 |
162 | **Last Updated**: 2025-01-14
163 | **Status**: All changes committed and pushed to GitHub
164 | **Ready for**: Issue #74 implementation
165 |
```
--------------------------------------------------------------------------------
/tests/utils/usage-metadata.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { UsageMetadataCollector } from "../../src/utils/usage-metadata.js";
2 | import {
3 | DriftSnapshot,
4 | DocumentationSnapshot,
5 | DriftDetectionResult,
6 | } from "../../src/utils/drift-detector.js";
7 | import { ASTAnalysisResult } from "../../src/utils/ast-analyzer.js";
8 | import { DriftDetector } from "../../src/utils/drift-detector.js";
9 |
10 | describe("UsageMetadataCollector", () => {
11 | const collector = new UsageMetadataCollector();
12 |
13 | const makeSnapshot = (): DriftSnapshot => {
14 | const producerFile: ASTAnalysisResult = {
15 | filePath: "/repo/src/producer.ts",
16 | language: "typescript",
17 | functions: [
18 | {
19 | name: "produce",
20 | parameters: [],
21 | returnType: null,
22 | isAsync: false,
23 | isExported: true,
24 | isPublic: true,
25 | docComment: null,
26 | startLine: 1,
27 | endLine: 1,
28 | complexity: 1,
29 | dependencies: [],
30 | },
31 | ],
32 | classes: [
33 | {
34 | name: "Widget",
35 | isExported: true,
36 | extends: null,
37 | implements: [],
38 | methods: [],
39 | properties: [],
40 | docComment: null,
41 | startLine: 1,
42 | endLine: 1,
43 | },
44 | ],
45 | interfaces: [],
46 | types: [],
47 | imports: [],
48 | exports: ["produce", "Widget"],
49 | contentHash: "abc",
50 | lastModified: new Date().toISOString(),
51 | linesOfCode: 10,
52 | complexity: 1,
53 | };
54 |
55 | const consumerFile: ASTAnalysisResult = {
56 | filePath: "/repo/src/consumer.ts",
57 | language: "typescript",
58 | functions: [],
59 | classes: [],
60 | interfaces: [],
61 | types: [],
62 | imports: [
63 | {
64 | source: "./producer",
65 | imports: [{ name: "produce" }, { name: "Widget" }],
66 | isDefault: false,
67 | startLine: 1,
68 | },
69 | ],
70 | exports: [],
71 | contentHash: "def",
72 | lastModified: new Date().toISOString(),
73 | linesOfCode: 10,
74 | complexity: 1,
75 | };
76 |
77 | const docSnapshot: DocumentationSnapshot = {
78 | filePath: "/repo/docs/api.md",
79 | contentHash: "ghi",
80 | referencedCode: ["/repo/src/producer.ts"],
81 | lastUpdated: new Date().toISOString(),
82 | sections: [
83 | {
84 | title: "Widget",
85 | content: "Widget docs",
86 | referencedFunctions: [],
87 | referencedClasses: ["Widget"],
88 | referencedTypes: [],
89 | codeExamples: [],
90 | startLine: 1,
91 | endLine: 5,
92 | },
93 | ],
94 | };
95 |
96 | return {
97 | projectPath: "/repo",
98 | timestamp: new Date().toISOString(),
99 | files: new Map([
100 | [producerFile.filePath, producerFile],
101 | [consumerFile.filePath, consumerFile],
102 | ]),
103 | documentation: new Map([[docSnapshot.filePath, docSnapshot]]),
104 | };
105 | };
106 |
107 | it("counts imports and class/function references (sync fallback)", () => {
108 | const snapshot = makeSnapshot();
109 | const metadata = collector.collectSync(snapshot);
110 |
111 | expect(metadata.imports.get("produce")).toBe(1);
112 | expect(metadata.imports.get("Widget")).toBe(1);
113 | expect(metadata.functionCalls.get("produce")).toBe(1);
114 | // Widget is identified as a class and should increment class instantiations
115 | // once from docs and once from imports.
116 | expect(metadata.classInstantiations.get("Widget")).toBe(2);
117 | });
118 |
119 | it("collects usage metadata asynchronously with call graph analysis", async () => {
120 | const snapshot = makeSnapshot();
121 | const metadata = await collector.collect(snapshot);
122 |
123 | expect(metadata.imports.get("produce")).toBeGreaterThanOrEqual(1);
124 | expect(metadata.imports.get("Widget")).toBeGreaterThanOrEqual(1);
125 | // Function calls may be counted from call graph or imports
126 | expect(metadata.functionCalls.get("produce")).toBeGreaterThanOrEqual(0);
127 | expect(metadata.classInstantiations.get("Widget")).toBeGreaterThanOrEqual(
128 | 1,
129 | );
130 | });
131 |
132 | it("integrates with DriftDetector scoring when usage metadata is supplied", async () => {
133 | const snapshot = makeSnapshot();
134 | const metadata = await collector
135 | .collect(snapshot)
136 | .catch(() => collector.collectSync(snapshot));
137 | const detector = new DriftDetector("/repo");
138 |
139 | const result: DriftDetectionResult = {
140 | filePath: "/repo/src/producer.ts",
141 | hasDrift: true,
142 | severity: "medium" as const,
143 | drifts: [
144 | {
145 | type: "outdated" as const,
146 | affectedDocs: ["/repo/docs/api.md"],
147 | codeChanges: [
148 | {
149 | type: "modified" as const,
150 | category: "function" as const,
151 | name: "produce",
152 | details: "signature update",
153 | impactLevel: "minor" as const,
154 | },
155 | ],
156 | description: "function changed",
157 | detectedAt: new Date().toISOString(),
158 | severity: "medium" as const,
159 | },
160 | ],
161 | suggestions: [],
162 | impactAnalysis: {
163 | breakingChanges: 0,
164 | majorChanges: 0,
165 | minorChanges: 1,
166 | affectedDocFiles: ["/repo/docs/api.md"],
167 | estimatedUpdateEffort: "low" as const,
168 | requiresManualReview: false,
169 | },
170 | };
171 |
172 | const scoreWithoutUsage = detector.calculatePriorityScore(result, snapshot);
173 | const scoreWithUsage = detector.calculatePriorityScore(
174 | result,
175 | snapshot,
176 | metadata,
177 | );
178 |
179 | // Usage frequency should align with observed usage (imports + calls).
180 | const expectedUsage =
181 | (metadata.functionCalls.get("produce") ?? 0) +
182 | (metadata.imports.get("produce") ?? 0);
183 | expect(scoreWithUsage.factors.usageFrequency).toBe(expectedUsage);
184 | expect(scoreWithUsage.factors.usageFrequency).toBeGreaterThan(0);
185 | });
186 | });
187 |
```
--------------------------------------------------------------------------------
/tests/utils/user-feedback-integration.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * User Feedback Integration Tests (ADR-012 Phase 3)
3 | */
4 |
5 | import { UserFeedbackIntegration } from "../../src/utils/user-feedback-integration.js";
6 | import { DriftDetectionResult } from "../../src/utils/drift-detector.js";
7 |
8 | // Mock fetch globally
9 | global.fetch = jest.fn();
10 |
11 | describe("UserFeedbackIntegration", () => {
12 | let integration: UserFeedbackIntegration;
13 |
14 | beforeEach(() => {
15 | integration = new UserFeedbackIntegration();
16 | (global.fetch as jest.Mock).mockClear();
17 | });
18 |
19 | afterEach(() => {
20 | jest.clearAllMocks();
21 | });
22 |
23 | describe("Configuration", () => {
24 | test("should configure GitHub integration", () => {
25 | integration.configure({
26 | provider: "github",
27 | apiToken: "test-token",
28 | owner: "test-owner",
29 | repo: "test-repo",
30 | });
31 |
32 | expect(integration).toBeDefined();
33 | });
34 |
35 | test("should clear cache on configuration change", () => {
36 | integration.configure({
37 | provider: "github",
38 | owner: "test",
39 | repo: "test",
40 | });
41 |
42 | integration.clearCache();
43 | expect(integration).toBeDefined();
44 | });
45 | });
46 |
47 | describe("Feedback Score Calculation", () => {
48 | test("should return 0 when no integration configured", async () => {
49 | const result: DriftDetectionResult = {
50 | filePath: "/test/file.ts",
51 | hasDrift: true,
52 | severity: "medium",
53 | drifts: [],
54 | suggestions: [],
55 | impactAnalysis: {
56 | breakingChanges: 0,
57 | majorChanges: 1,
58 | minorChanges: 0,
59 | affectedDocFiles: [],
60 | estimatedUpdateEffort: "medium",
61 | requiresManualReview: false,
62 | },
63 | };
64 |
65 | const score = await integration.calculateFeedbackScore(result);
66 | expect(score).toBe(0);
67 | });
68 |
69 | test("should handle API errors gracefully", async () => {
70 | integration.configure({
71 | provider: "github",
72 | apiToken: "invalid-token",
73 | owner: "nonexistent",
74 | repo: "nonexistent",
75 | });
76 |
77 | (global.fetch as jest.Mock).mockResolvedValueOnce({
78 | ok: false,
79 | status: 401,
80 | });
81 |
82 | const result: DriftDetectionResult = {
83 | filePath: "/test/file.ts",
84 | hasDrift: true,
85 | severity: "medium",
86 | drifts: [],
87 | suggestions: [],
88 | impactAnalysis: {
89 | breakingChanges: 0,
90 | majorChanges: 1,
91 | minorChanges: 0,
92 | affectedDocFiles: [],
93 | estimatedUpdateEffort: "medium",
94 | requiresManualReview: false,
95 | },
96 | };
97 |
98 | const score = await integration.calculateFeedbackScore(result);
99 | expect(score).toBe(0);
100 | });
101 |
102 | test("should calculate feedback score from GitHub issues", async () => {
103 | integration.configure({
104 | provider: "github",
105 | apiToken: "test-token",
106 | owner: "test-owner",
107 | repo: "test-repo",
108 | });
109 |
110 | const mockIssues = [
111 | {
112 | number: 1,
113 | title: "Documentation issue",
114 | body: "The file `src/utils/test.ts` has outdated docs",
115 | state: "open",
116 | labels: [{ name: "documentation" }, { name: "critical" }],
117 | created_at: new Date().toISOString(),
118 | updated_at: new Date().toISOString(),
119 | },
120 | {
121 | number: 2,
122 | title: "Another docs issue",
123 | body: "Function `testFunction()` needs documentation",
124 | state: "open",
125 | labels: [{ name: "docs" }],
126 | created_at: new Date().toISOString(),
127 | updated_at: new Date(
128 | Date.now() - 10 * 24 * 60 * 60 * 1000,
129 | ).toISOString(), // 10 days ago
130 | },
131 | ];
132 |
133 | (global.fetch as jest.Mock).mockResolvedValueOnce({
134 | ok: true,
135 | json: async () => mockIssues,
136 | });
137 |
138 | const result: DriftDetectionResult = {
139 | filePath: "src/utils/test.ts",
140 | hasDrift: true,
141 | severity: "medium",
142 | drifts: [
143 | {
144 | type: "missing",
145 | affectedDocs: [],
146 | codeChanges: [
147 | {
148 | name: "testFunction",
149 | type: "added",
150 | category: "function",
151 | details: "New function added",
152 | impactLevel: "minor",
153 | },
154 | ],
155 | description: "Function testFunction is missing documentation",
156 | detectedAt: new Date().toISOString(),
157 | severity: "medium",
158 | },
159 | ],
160 | suggestions: [],
161 | impactAnalysis: {
162 | breakingChanges: 0,
163 | majorChanges: 1,
164 | minorChanges: 0,
165 | affectedDocFiles: [],
166 | estimatedUpdateEffort: "medium",
167 | requiresManualReview: false,
168 | },
169 | };
170 |
171 | const score = await integration.calculateFeedbackScore(result);
172 | // Should have score > 0 due to open issues
173 | expect(score).toBeGreaterThan(0);
174 | });
175 |
176 | test("should use cache for repeated requests", async () => {
177 | integration.configure({
178 | provider: "github",
179 | apiToken: "test-token",
180 | owner: "test-owner",
181 | repo: "test-repo",
182 | });
183 |
184 | (global.fetch as jest.Mock).mockResolvedValue({
185 | ok: true,
186 | json: async () => [],
187 | });
188 |
189 | const result: DriftDetectionResult = {
190 | filePath: "/test/file.ts",
191 | hasDrift: true,
192 | severity: "medium",
193 | drifts: [],
194 | suggestions: [],
195 | impactAnalysis: {
196 | breakingChanges: 0,
197 | majorChanges: 1,
198 | minorChanges: 0,
199 | affectedDocFiles: [],
200 | estimatedUpdateEffort: "medium",
201 | requiresManualReview: false,
202 | },
203 | };
204 |
205 | // First call
206 | await integration.calculateFeedbackScore(result);
207 | // Second call should use cache
208 | await integration.calculateFeedbackScore(result);
209 |
210 | // Fetch should only be called once due to caching
211 | expect(global.fetch).toHaveBeenCalledTimes(1);
212 | });
213 | });
214 | });
215 |
```
--------------------------------------------------------------------------------
/ISSUE_IMPLEMENTATION_SUMMARY.md:
--------------------------------------------------------------------------------
```markdown
1 | # GitHub Issues Implementation Summary
2 |
3 | This document summarizes the implementation of GitHub issues related to the release pipeline and package distribution (ADR-013).
4 |
5 | ## ✅ Completed Implementations
6 |
7 | ### Issue #1: Fix npm Package Publishing ✅
8 |
9 | **Status**: Implemented
10 |
11 | **Changes Made**:
12 | 1. **Enhanced npm Publishing Step** (`.github/workflows/release.yml`):
13 | - Added npm authentication verification before publishing
14 | - Implemented retry mechanism (3 attempts with 5-second delays)
15 | - Added error handling with clear failure messages
16 | - Captured package version for verification
17 |
18 | 2. **Added npm Publication Verification**:
19 | - New step to verify package exists on npm registry after publication
20 | - 10-second wait for registry propagation
21 | - Clear success/failure indicators
22 | - Automatic failure if package not found
23 |
24 | 3. **Added Package Installation Test**:
25 | - Tests that published package can be installed globally
26 | - Verifies `documcp` command is available after installation
27 | - Ensures end-to-end package functionality
28 |
29 | **Key Features**:
30 | - Retry mechanism for transient failures
31 | - Comprehensive error messages
32 | - Verification steps prevent false success
33 | - Installation test ensures package works correctly
34 |
35 | ### Issue #2: Automated Changelog Generation ✅
36 |
37 | **Status**: Already Configured, Enhanced Integration
38 |
39 | **Existing Configuration**:
40 | - ✅ `standard-version` package installed
41 | - ✅ `.versionrc.json` configured with proper formatting
42 | - ✅ Release scripts in `package.json`
43 |
44 | **Enhancements Made**:
45 | 1. **Improved Changelog Extraction**:
46 | - Better parsing of CHANGELOG.md sections
47 | - Handles version format correctly
48 | - Improved error handling if changelog missing
49 |
50 | 2. **Added Commit Message Validation**:
51 | - Validates commits follow conventional format before release
52 | - Prevents releases with invalid commit messages
53 | - Clear error messages for developers
54 |
55 | 3. **Enhanced Release Workflow**:
56 | - Better integration with standard-version
57 | - Improved changelog content extraction for GitHub Releases
58 | - Proper error handling throughout
59 |
60 | **Verification**:
61 | - ✅ `npm run release:dry-run` works correctly
62 | - ✅ Generates properly formatted changelog entries
63 | - ✅ Links commits and issues correctly
64 |
65 | ### Issue #3: Improve Test Coverage to 85% ✅
66 |
67 | **Status**: Already Exceeded Target
68 |
69 | **Current Status**:
70 | - **Statement Coverage**: 91.65% ✅ (Target: 85%)
71 | - **Branch Coverage**: 81.44%
72 | - **Function Coverage**: 93.97%
73 | - **Line Coverage**: 92.39%
74 |
75 | **Changes Made**:
76 | 1. **Updated Coverage Threshold**:
77 | - Changed from 80% to 85% in release workflow
78 | - Updated threshold check to use correct output parsing
79 | - Added clear success message with actual coverage percentage
80 |
81 | **Note**: Coverage already exceeds target, but threshold updated to reflect new standard.
82 |
83 | ## 📋 Implementation Details
84 |
85 | ### Release Workflow Improvements
86 |
87 | The release workflow (`.github/workflows/release.yml`) now includes:
88 |
89 | 1. **Pre-Release Quality Gates**:
90 | - Test coverage verification (85% threshold)
91 | - Commit message validation
92 | - Full test suite execution
93 | - Build verification
94 |
95 | 2. **Automated Changelog Generation**:
96 | - Uses `standard-version` for version bumping
97 | - Generates formatted changelog entries
98 | - Extracts changelog content for GitHub Releases
99 | - Handles both manual and tag-based releases
100 |
101 | 3. **npm Publishing with Verification**:
102 | - Authentication verification
103 | - Retry mechanism (3 attempts)
104 | - Publication verification
105 | - Installation test
106 |
107 | ### Configuration Files
108 |
109 | **commitlint.config.js**:
110 | - ✅ Already configured with conventional commit rules
111 | - ✅ Enforces commit message format
112 | - ✅ Integrated with Husky hooks
113 |
114 | **.versionrc.json**:
115 | - ✅ Configured with proper changelog formatting
116 | - ✅ Includes emoji sections
117 | - ✅ Proper URL formats for GitHub
118 |
119 | **.husky/commit-msg**:
120 | - ✅ Pre-commit hook validates commit messages
121 | - ✅ Prevents invalid commits from being created
122 |
123 | ## 🎯 Acceptance Criteria Status
124 |
125 | ### Issue #1: npm Package Publishing
126 | - [x] npm package "documcp" verification step added
127 | - [x] Release workflow includes publication verification
128 | - [x] Publication failures are properly logged and handled
129 | - [x] Retry mechanism implemented
130 | - [x] Installation test added
131 |
132 | ### Issue #2: Automated Changelog Generation
133 | - [x] Changelog automatically updated on release
134 | - [x] Commit messages follow conventional format (enforced)
135 | - [x] Release notes include all relevant changes
136 | - [x] Consistent formatting across all releases
137 | - [x] Automated categorization of changes
138 |
139 | ### Issue #3: Test Coverage
140 | - [x] Overall statement coverage ≥85% (currently 91.65%)
141 | - [x] Coverage threshold updated in workflow
142 | - [x] Coverage check integrated into release pipeline
143 |
144 | ## 🚀 Next Steps
145 |
146 | ### Recommended Actions
147 |
148 | 1. **Test Release Pipeline**:
149 | - Run a test release to verify all steps work correctly
150 | - Verify npm publication succeeds
151 | - Confirm changelog generation works
152 |
153 | 2. **Monitor First Release**:
154 | - Watch for any issues in the enhanced workflow
155 | - Verify package appears on npm registry
156 | - Confirm installation works for users
157 |
158 | 3. **Documentation Updates**:
159 | - Update CONTRIBUTING.md with commit message guidelines
160 | - Add release process documentation
161 | - Document npm publishing process
162 |
163 | ### Future Enhancements (From ADR-013)
164 |
165 | - [ ] AI-enhanced release notes (Issue #7)
166 | - [ ] Release health dashboard (Issue #8)
167 | - [ ] Smart Dependabot auto-merge (Issue #6)
168 | - [ ] Enhanced release notes with performance metrics
169 |
170 | ## 📝 Related ADRs
171 |
172 | - **ADR-013**: Release Pipeline and Package Distribution Architecture
173 | - **ADR-005**: GitHub Pages Deployment Automation (related workflow)
174 |
175 | ## 🔗 References
176 |
177 | - GitHub Issue: #1 - Fix npm Package Publishing
178 | - GitHub Issue: #2 - Implement Automated Changelog Generation
179 | - GitHub Issue: #3 - Improve Test Coverage to 85%
180 | - [Conventional Commits](https://www.conventionalcommits.org/)
181 | - [standard-version](https://github.com/conventional-changelog/standard-version)
182 | - [commitlint](https://commitlint.js.org/)
183 |
184 | ---
185 |
186 | **Last Updated**: 2025-01-14
187 | **Implementation Status**: ✅ Complete
188 | **Ready for Testing**: Yes
189 |
```
--------------------------------------------------------------------------------
/tests/change-watcher-priority.integration.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { ChangeWatcher } from "../src/utils/change-watcher.js";
2 | import {
3 | DriftSnapshot,
4 | PrioritizedDriftResult,
5 | } from "../src/utils/drift-detector.js";
6 | import { ASTAnalysisResult } from "../src/utils/ast-analyzer.js";
7 |
8 | describe("ChangeWatcher priority integration", () => {
9 | it("passes collected usage metadata into prioritized drift results", async () => {
10 | // Baseline snapshot (pre-change)
11 | const producerFile: ASTAnalysisResult = {
12 | filePath: "/repo/src/producer.ts",
13 | language: "typescript",
14 | functions: [
15 | {
16 | name: "produce",
17 | parameters: [],
18 | returnType: null,
19 | isAsync: false,
20 | isExported: true,
21 | isPublic: true,
22 | docComment: null,
23 | startLine: 1,
24 | endLine: 1,
25 | complexity: 1,
26 | dependencies: [],
27 | },
28 | ],
29 | classes: [],
30 | interfaces: [],
31 | types: [],
32 | imports: [],
33 | exports: ["produce"],
34 | contentHash: "abc",
35 | lastModified: new Date().toISOString(),
36 | linesOfCode: 10,
37 | complexity: 1,
38 | };
39 |
40 | const baselineSnapshot: DriftSnapshot = {
41 | projectPath: "/repo",
42 | timestamp: new Date().toISOString(),
43 | files: new Map([[producerFile.filePath, producerFile]]),
44 | documentation: new Map(),
45 | };
46 |
47 | // Current snapshot adds an importing consumer and doc references
48 | const consumerFile: ASTAnalysisResult = {
49 | filePath: "/repo/src/consumer.ts",
50 | language: "typescript",
51 | functions: [],
52 | classes: [],
53 | interfaces: [],
54 | types: [],
55 | imports: [
56 | {
57 | source: "./producer",
58 | imports: [{ name: "produce" }],
59 | isDefault: false,
60 | startLine: 1,
61 | },
62 | ],
63 | exports: [],
64 | contentHash: "def",
65 | lastModified: new Date().toISOString(),
66 | linesOfCode: 5,
67 | complexity: 1,
68 | };
69 |
70 | const docSnapshot = {
71 | filePath: "/repo/docs/api.md",
72 | contentHash: "ghi",
73 | referencedCode: [producerFile.filePath],
74 | lastUpdated: new Date().toISOString(),
75 | sections: [
76 | {
77 | title: "produce",
78 | content: "Description",
79 | referencedFunctions: ["produce"],
80 | referencedClasses: [],
81 | referencedTypes: [],
82 | codeExamples: [],
83 | startLine: 1,
84 | endLine: 5,
85 | },
86 | ],
87 | };
88 |
89 | const currentSnapshot: DriftSnapshot = {
90 | projectPath: "/repo",
91 | timestamp: new Date().toISOString(),
92 | files: new Map([
93 | [producerFile.filePath, producerFile],
94 | [consumerFile.filePath, consumerFile],
95 | ]),
96 | documentation: new Map([[docSnapshot.filePath, docSnapshot]]),
97 | };
98 |
99 | const driftResults: PrioritizedDriftResult[] = [
100 | {
101 | filePath: producerFile.filePath,
102 | hasDrift: true,
103 | severity: "medium",
104 | drifts: [
105 | {
106 | type: "outdated",
107 | affectedDocs: [docSnapshot.filePath],
108 | codeChanges: [
109 | {
110 | type: "modified",
111 | category: "function",
112 | name: "produce",
113 | details: "signature update",
114 | impactLevel: "minor",
115 | },
116 | ],
117 | description: "function changed",
118 | detectedAt: new Date().toISOString(),
119 | severity: "medium",
120 | },
121 | ],
122 | suggestions: [],
123 | impactAnalysis: {
124 | breakingChanges: 0,
125 | majorChanges: 0,
126 | minorChanges: 1,
127 | affectedDocFiles: [docSnapshot.filePath],
128 | estimatedUpdateEffort: "low",
129 | requiresManualReview: false,
130 | },
131 | priorityScore: {
132 | overall: 0,
133 | factors: {
134 | codeComplexity: 0,
135 | usageFrequency: 0,
136 | changeMagnitude: 0,
137 | documentationCoverage: 0,
138 | staleness: 0,
139 | userFeedback: 0,
140 | },
141 | recommendation: "low",
142 | suggestedAction: "",
143 | },
144 | },
145 | ];
146 |
147 | let capturedUsage: any = null;
148 |
149 | const detectorStub = {
150 | initialize: jest.fn().mockResolvedValue(undefined),
151 | loadLatestSnapshot: jest.fn().mockResolvedValue(baselineSnapshot),
152 | createSnapshot: jest.fn().mockResolvedValue(currentSnapshot),
153 | getPrioritizedDriftResults: jest
154 | .fn()
155 | .mockImplementation(
156 | async (
157 | _oldSnapshot: DriftSnapshot,
158 | _newSnapshot: DriftSnapshot,
159 | usageMetadata: any,
160 | ) => {
161 | capturedUsage = usageMetadata;
162 | // Encode usage frequency into the priority score for assertion
163 | const usageFreq = usageMetadata?.imports?.get("produce") ?? 0;
164 | return driftResults.map((dr) => ({
165 | ...dr,
166 | priorityScore: {
167 | ...dr.priorityScore!,
168 | factors: {
169 | ...dr.priorityScore!.factors,
170 | usageFrequency: usageFreq,
171 | },
172 | overall: usageFreq,
173 | },
174 | }));
175 | },
176 | ),
177 | };
178 |
179 | const watcher = new ChangeWatcher(
180 | {
181 | projectPath: "/repo",
182 | docsPath: "/repo/docs",
183 | watchPaths: [], // disable FS watcher side effects
184 | },
185 | {
186 | createDetector: () => detectorStub as any,
187 | },
188 | );
189 |
190 | await watcher.start();
191 | const result = await watcher.triggerManual("test-run");
192 |
193 | expect(detectorStub.initialize).toHaveBeenCalled();
194 | expect(detectorStub.getPrioritizedDriftResults).toHaveBeenCalled();
195 |
196 | // Usage metadata should reflect imports and doc references
197 | expect(capturedUsage).toBeTruthy();
198 | expect(capturedUsage.imports.get("produce")).toBe(1);
199 | // produce is exported as a function; collector should count it in functionCalls
200 | expect(capturedUsage.functionCalls.get("produce")).toBeGreaterThanOrEqual(
201 | 1,
202 | );
203 |
204 | // Drift results returned to the caller should carry the usage-influenced score
205 | expect(result.driftResults[0].priorityScore?.overall).toBe(1);
206 | expect(result.driftResults[0].priorityScore?.factors.usageFrequency).toBe(
207 | 1,
208 | );
209 | });
210 | });
211 |
```
--------------------------------------------------------------------------------
/docs/CE-MCP-FINDINGS.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-12-09T19:18:14.152Z"
4 | last_validated: "2025-12-09T19:41:38.564Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # Code Execution with MCP (CE-MCP) Research Findings
11 |
12 | **Date**: 2025-12-09
13 | **Status**: Validated - documcp is CE-MCP Compatible ✅
14 |
15 | ## Executive Summary
16 |
17 | After comprehensive research into the Code Execution with MCP (CE-MCP) paradigm, we've confirmed that **documcp's existing architecture is fully compatible with Code Mode clients** without requiring architectural changes.
18 |
19 | ## Key Discoveries
20 |
21 | ### 1. CE-MCP is Client-Side, Not Server-Side
22 |
23 | The CE-MCP paradigm described in the architectural guide is implemented by **MCP clients** (Claude Code, Cloudflare Workers AI, pctx), not servers:
24 |
25 | | Responsibility | Implementation | Status for documcp |
26 | | ------------------------------ | ------------------------------------------- | -------------------------- |
27 | | Code generation | MCP Client | ✅ Client handles |
28 | | Tool discovery | MCP Client (generates filesystem structure) | ✅ Compatible |
29 | | Sandboxed execution | MCP Client (isolates, Docker, etc.) | ✅ Client handles |
30 | | Security (AgentBound-style) | MCP Client (MCP Guardian, etc.) | ✅ Client handles |
31 | | Summary filtering | MCP Client | ✅ Compatible |
32 | | **Tool definitions & schemas** | **MCP Server (documcp)** | ✅ **Already implemented** |
33 | | **Tool execution** | **MCP Server (documcp)** | ✅ **Already implemented** |
34 |
35 | ### 2. What MCP Servers Provide
36 |
37 | According to Anthropic and Cloudflare's documentation:
38 |
39 | > "MCP is designed for tool-calling, but it doesn't actually _have to_ be used that way. The 'tools' that an MCP server exposes are really just an RPC interface with attached documentation."
40 |
41 | **MCP servers (like documcp) provide:**
42 |
43 | - Standard MCP protocol tools ✅ (documcp has 25+ tools)
44 | - Tool schemas and documentation ✅ (Zod-validated)
45 | - JSON-RPC interface ✅ (MCP SDK handles this)
46 |
47 | **That's it!** The client SDK handles everything else.
48 |
49 | ### 3. How Code Mode Works
50 |
51 | **Client-Side Transformation:**
52 |
53 | 1. Client connects to MCP server and receives tool definitions
54 | 2. Client converts tool definitions → TypeScript/Python code APIs
55 | 3. Client creates filesystem structure for tool discovery (e.g., `./servers/google-drive/getDocument.ts`)
56 | 4. LLM navigates filesystem and reads only needed tool definitions
57 | 5. LLM generates orchestration code using the tool APIs
58 | 6. Client executes code in secure sandbox (isolate, Docker, etc.)
59 | 7. Only final summary returned to LLM context
60 |
61 | **Result**: 98.7% token reduction, 75x cost reduction, 60% faster execution
62 |
63 | ### 4. MCP SDK 1.24.0 New Features
64 |
65 | PR #69 upgrades us from v0.6.0 → v1.24.0, bringing:
66 |
67 | - **SEP-1686: Tasks API** - New MCP primitive for long-running agent operations
68 | - Better SSE (Server-Sent Events) handling
69 | - OAuth enhancements (client credentials flow)
70 | - Improved type safety and Zod V4 compatibility
71 |
72 | ## Validation Results
73 |
74 | ### ✅ SDK Upgrade Successful
75 |
76 | - All tests pass: 91.67% coverage
77 | - No breaking changes detected
78 | - TypeScript compilation successful
79 | - Build successful
80 |
81 | ### ✅ documcp Architecture Validates
82 |
83 | **Why documcp is already Code Mode compatible:**
84 |
85 | 1. **Stateless Design** (ADR-001): Perfect for Code Mode workflows
86 | 2. **Modular Tools** (ADR-006): Each tool is independent and composable
87 | 3. **Zod Validation**: Provides excellent schema docs for code generation
88 | 4. **JSON-RPC**: Standard MCP protocol, works with all clients
89 | 5. **MCP Resources** (ADR-007): Perfect for summary-only result filtering
90 |
91 | ## Architectural Implications
92 |
93 | ### What documcp Does NOT Need
94 |
95 | ❌ Filesystem-based tool discovery system (client does this)
96 | ❌ Sandbox execution environment (client does this)
97 | ❌ AgentBound security framework (client does this)
98 | ❌ Code generation layer (client does this)
99 | ❌ Tool wrappers (client generates these)
100 | ❌ Major architectural changes
101 |
102 | ### What documcp COULD Optimize (Optional)
103 |
104 | These are **optional enhancements** for better Code Mode UX, not requirements:
105 |
106 | 1. **Tool Categorization** - Add metadata tags for easier discovery
107 | 2. **Concise Descriptions** - Optimize tool descriptions for token efficiency
108 | 3. **Result Summarization** - Return more concise results where appropriate
109 | 4. **MCP Tasks Integration** - Use new Tasks API for long-running operations
110 | 5. **Resource Optimization** - Better use of MCP resources for intermediate results
111 |
112 | ## Recommended Actions
113 |
114 | ### Immediate (Completed ✅)
115 |
116 | - [x] Merge PR #69 (SDK upgrade to 1.24.0)
117 | - [x] Run tests to validate compatibility
118 | - [x] Document CE-MCP findings
119 |
120 | ### Short-Term (This Sprint)
121 |
122 | - [ ] Create ADR-011: CE-MCP Compatibility and Code Mode Support
123 | - [ ] Update ADR-001: Add Code Mode compatibility note
124 | - [ ] Update ADR-006: Add tool organization recommendations
125 | - [ ] Update ADR-007: Add resource optimization for Code Mode
126 | - [ ] Test with Code Mode client (Claude Code, pctx)
127 | - [ ] Create CE-MCP usage documentation
128 |
129 | ### Medium-Term (Optional Optimizations)
130 |
131 | - [ ] Research which tools benefit from MCP Tasks API
132 | - [ ] Add tool categorization metadata
133 | - [ ] Optimize tool descriptions for token efficiency
134 | - [ ] Implement result summarization for large outputs
135 | - [ ] Create example Code Mode workflows
136 |
137 | ## References
138 |
139 | - [Anthropic: Code Execution with MCP](https://www.anthropic.com/engineering/code-execution-with-mcp)
140 | - [Cloudflare: Code Mode](https://blog.cloudflare.com/code-mode/)
141 | - [MCP Specification 2025-11-25](https://modelcontextprotocol.io/specification/2025-06-18)
142 | - [MCP SDK 1.24.0 Release Notes](https://github.com/modelcontextprotocol/typescript-sdk/releases/tag/1.24.0)
143 |
144 | ## Conclusion
145 |
146 | **documcp's existing architecture is fully Code Mode compatible.** The stateless, tool-based design aligns perfectly with the CE-MCP paradigm. No architectural changes are required—only optional optimizations to enhance the user experience with Code Mode clients.
147 |
148 | The CE-MCP paradigm validates our architectural decisions in ADR-001, ADR-006, and ADR-007. The focus should now shift to testing with Code Mode clients and documenting best practices for developers using documcp in Code Mode workflows.
149 |
```
--------------------------------------------------------------------------------
/docs/tutorials/getting-started.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | id: getting-started
3 | title: Getting Started with DocuMCP
4 | sidebar_label: Getting Started
5 | documcp:
6 | last_updated: "2025-11-20T00:46:21.972Z"
7 | last_validated: "2025-12-09T19:41:38.603Z"
8 | auto_updated: false
9 | update_frequency: monthly
10 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
11 | ---
12 |
13 | # Getting Started with DocuMCP
14 |
15 | This tutorial will guide you through setting up and using DocuMCP's intelligent documentation deployment system with memory-enhanced capabilities.
16 |
17 | ## Prerequisites
18 |
19 | Before you begin, ensure you have:
20 |
21 | - Node.js 20.0.0 or higher installed
22 | - Access to a GitHub repository
23 | - Claude Desktop, Cursor, or another MCP-compatible client
24 | - Basic familiarity with documentation workflows
25 |
26 | ## 🎯 Pro Tip: Reference LLM_CONTEXT.md
27 |
28 | When using DocuMCP through an AI assistant, **always reference the LLM_CONTEXT.md file** for comprehensive tool context:
29 |
30 | ```
31 | @LLM_CONTEXT.md help me set up documentation for my TypeScript project
32 | ```
33 |
34 | The `LLM_CONTEXT.md` file is auto-generated and contains:
35 |
36 | - All 45 tool descriptions and parameters
37 | - Usage examples and workflows
38 | - Memory system documentation
39 | - Phase 3 code-to-docs sync features
40 |
41 | **Location**: `/LLM_CONTEXT.md` (in project root)
42 |
43 | This ensures your AI assistant has complete context and can provide optimal recommendations.
44 |
45 | ## Step 1: Initial Repository Analysis
46 |
47 | Start by analyzing your repository to understand its characteristics and documentation needs:
48 |
49 | ```json
50 | {
51 | "path": "/path/to/your/project",
52 | "depth": "standard"
53 | }
54 | ```
55 |
56 | This will analyze your project and return:
57 |
58 | - **Project structure**: File counts, languages used, and organization
59 | - **Dependencies**: Production and development packages detected
60 | - **Documentation status**: Existing docs, README, contributing guidelines
61 | - **Smart recommendations**: Primary language, project type, team size estimates
62 | - **Unique analysis ID**: For use in subsequent steps
63 |
64 | Example response snippet:
65 |
66 | ```json
67 | {
68 | "id": "analysis_abc123xyz",
69 | "structure": {
70 | "totalFiles": 150,
71 | "languages": { ".ts": 45, ".js": 12, ".md": 8 },
72 | "hasTests": true,
73 | "hasCI": true
74 | },
75 | "dependencies": {
76 | "ecosystem": "javascript",
77 | "packages": ["react", "typescript"]
78 | },
79 | "recommendations": {
80 | "primaryLanguage": "typescript",
81 | "projectType": "library"
82 | }
83 | }
84 | ```
85 |
86 | ## Step 2: Memory-Enhanced SSG Recommendation
87 |
88 | Next, get intelligent recommendations powered by DocuMCP's memory system:
89 |
90 | ```json
91 | {
92 | "analysisId": "analysis_abc123xyz",
93 | "preferences": {
94 | "ecosystem": "javascript",
95 | "priority": "features"
96 | }
97 | }
98 | ```
99 |
100 | The memory system leverages patterns from 130+ previous projects to provide:
101 |
102 | - **Confidence-scored recommendations** (e.g., Docusaurus with 85% confidence)
103 | - **Historical success data** (69% deployment success rate insights)
104 | - **Pattern-based insights** (Hugo most common with 98 projects, but Docusaurus optimal for TypeScript)
105 | - **Similar project examples** to learn from successful configurations
106 |
107 | Example recommendation response:
108 |
109 | ```json
110 | {
111 | "recommended": "docusaurus",
112 | "confidence": 0.85,
113 | "reasoning": [
114 | "JavaScript/TypeScript ecosystem detected",
115 | "Modern React-based framework aligns with project stack",
116 | "Strong community support and active development"
117 | ],
118 | "alternatives": [
119 | {
120 | "name": "MkDocs",
121 | "score": 0.75,
122 | "pros": ["Simple setup", "Great themes"],
123 | "cons": ["Limited React component support"]
124 | }
125 | ]
126 | }
127 | ```
128 |
129 | ## Step 3: Configuration Generation
130 |
131 | Generate optimized configuration files for your chosen SSG:
132 |
133 | ```javascript
134 | // Generate Docusaurus configuration
135 | {
136 | "ssg": "docusaurus",
137 | "projectName": "Your Project",
138 | "projectDescription": "Your project description",
139 | "outputPath": "/path/to/your/repository"
140 | }
141 | ```
142 |
143 | ## Step 4: Diataxis Structure Setup
144 |
145 | Create a professional documentation structure following the Diataxis framework:
146 |
147 | ```javascript
148 | // Setup documentation structure
149 | {
150 | "path": "/path/to/your/repository/docs",
151 | "ssg": "docusaurus",
152 | "includeExamples": true
153 | }
154 | ```
155 |
156 | This creates four optimized sections following the Diataxis framework:
157 |
158 | - **Tutorials**: Learning-oriented guides for skill acquisition (study context)
159 | - **How-to Guides**: Problem-solving guides for specific tasks (work context)
160 | - **Reference**: Information-oriented content for lookup and verification (information context)
161 | - **Explanation**: Understanding-oriented content for context and background (understanding context)
162 |
163 | ## Step 5: GitHub Pages Deployment
164 |
165 | Set up automated deployment with security best practices:
166 |
167 | ```javascript
168 | // Deploy to GitHub Pages
169 | {
170 | "repository": "/path/to/your/repository",
171 | "ssg": "docusaurus",
172 | "branch": "gh-pages"
173 | }
174 | ```
175 |
176 | This generates:
177 |
178 | - GitHub Actions workflow with OIDC authentication
179 | - Minimal security permissions (pages:write, id-token:write only)
180 | - Automated build and deployment pipeline
181 |
182 | ## Step 6: Memory System Exploration
183 |
184 | Explore DocuMCP's advanced memory capabilities:
185 |
186 | ```javascript
187 | // Get learning statistics
188 | {
189 | "includeDetails": true
190 | }
191 |
192 | // Recall similar projects
193 | {
194 | "query": "typescript documentation",
195 | "type": "recommendation",
196 | "limit": 5
197 | }
198 | ```
199 |
200 | The memory system provides:
201 |
202 | - **Pattern Recognition**: Most successful SSG choices for your project type
203 | - **Historical Insights**: Success rates and common issues
204 | - **Smart Recommendations**: Enhanced suggestions based on similar projects
205 |
206 | ## Verification
207 |
208 | Verify your setup with these checks:
209 |
210 | 1. **Documentation Structure**: Confirm all Diataxis directories are created
211 | 2. **Configuration Files**: Check generated config files are valid
212 | 3. **GitHub Actions**: Verify workflow file in `.github/workflows/`
213 | 4. **Memory Insights**: Review recommendations and confidence scores
214 |
215 | ## Summary
216 |
217 | In this tutorial, you learned how to:
218 |
219 | - **Analyze repositories** with comprehensive project profiling
220 | - **Get intelligent SSG recommendations** using memory-enhanced insights
221 | - **Generate optimized configurations** for your chosen static site generator
222 | - **Create Diataxis-compliant structures** for professional documentation
223 | - **Set up automated GitHub Pages deployment** with security best practices
224 | - **Leverage the memory system** for enhanced recommendations and insights
225 |
226 | ## Next Steps
227 |
228 | - Explore [Memory-Enhanced Workflows](./memory-workflows.md)
229 | - Read [How-To Guides](../how-to/) for specific tasks
230 | - Check the [API Reference](../reference/) for complete tool documentation
231 | - Learn about [Diataxis Framework](../explanation/) principles
232 |
```
--------------------------------------------------------------------------------
/tests/memory/contextual-retrieval.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Basic unit tests for Contextual Memory Retrieval System
3 | * Tests basic context-aware memory retrieval capabilities
4 | * Part of Issue #55 - Advanced Memory Components Unit Tests
5 | */
6 |
7 | import { promises as fs } from "fs";
8 | import path from "path";
9 | import os from "os";
10 | import { MemoryManager } from "../../src/memory/manager.js";
11 | import ContextualRetrievalSystem, {
12 | RetrievalContext,
13 | } from "../../src/memory/contextual-retrieval.js";
14 |
15 | describe("ContextualRetrievalSystem", () => {
16 | let tempDir: string;
17 | let memoryManager: MemoryManager;
18 | let knowledgeGraph: any;
19 | let contextualRetrieval: ContextualRetrievalSystem;
20 |
21 | beforeEach(async () => {
22 | // Create unique temp directory for each test
23 | tempDir = path.join(
24 | os.tmpdir(),
25 | `contextual-retrieval-test-${Date.now()}-${Math.random()
26 | .toString(36)
27 | .substr(2, 9)}`,
28 | );
29 | await fs.mkdir(tempDir, { recursive: true });
30 |
31 | memoryManager = new MemoryManager(tempDir);
32 | await memoryManager.initialize();
33 |
34 | // Create a mock knowledge graph for testing
35 | knowledgeGraph = {
36 | findRelatedNodes: jest.fn().mockResolvedValue([]),
37 | getConnectionStrength: jest.fn().mockResolvedValue(0.5),
38 | query: jest.fn().mockReturnValue({ nodes: [], edges: [] }),
39 | };
40 |
41 | contextualRetrieval = new ContextualRetrievalSystem(
42 | memoryManager,
43 | knowledgeGraph,
44 | );
45 | });
46 |
47 | afterEach(async () => {
48 | // Cleanup temp directory
49 | try {
50 | await fs.rm(tempDir, { recursive: true, force: true });
51 | } catch (error) {
52 | // Ignore cleanup errors
53 | }
54 | });
55 |
56 | describe("Initialization and Configuration", () => {
57 | test("should create ContextualRetrievalSystem instance", () => {
58 | expect(contextualRetrieval).toBeInstanceOf(ContextualRetrievalSystem);
59 | });
60 |
61 | test("should have memory manager and knowledge graph dependencies", () => {
62 | expect(contextualRetrieval).toBeDefined();
63 | // Basic integration test - system should be created with dependencies
64 | });
65 | });
66 |
67 | describe("Basic Contextual Retrieval", () => {
68 | beforeEach(async () => {
69 | // Set up test memories for retrieval tests
70 | await memoryManager.remember("analysis", {
71 | projectPath: "/test/typescript-project",
72 | language: "typescript",
73 | framework: "react",
74 | outcome: "success",
75 | recommendation: "Use TypeScript for better type safety",
76 | });
77 |
78 | await memoryManager.remember("deployment", {
79 | projectPath: "/test/node-project",
80 | language: "javascript",
81 | framework: "express",
82 | outcome: "success",
83 | recommendation: "Deploy with Docker for consistency",
84 | });
85 |
86 | await memoryManager.remember("recommendation", {
87 | projectPath: "/test/python-project",
88 | language: "python",
89 | framework: "django",
90 | outcome: "failure",
91 | recommendation: "Check Python version compatibility",
92 | });
93 | });
94 |
95 | test("should retrieve contextual matches based on project context", async () => {
96 | const retrievalContext: RetrievalContext = {
97 | currentProject: {
98 | path: "/test/typescript-project",
99 | language: "typescript",
100 | framework: "react",
101 | },
102 | userIntent: {
103 | action: "analyze",
104 | urgency: "medium",
105 | experience: "intermediate",
106 | },
107 | temporalContext: {
108 | recency: "recent",
109 | },
110 | };
111 |
112 | const result = await contextualRetrieval.retrieve(
113 | "typescript react documentation",
114 | retrievalContext,
115 | );
116 |
117 | expect(result).toBeDefined();
118 | expect(result.matches).toBeDefined();
119 | expect(Array.isArray(result.matches)).toBe(true);
120 |
121 | // Basic structure validation
122 | if (result.matches.length > 0) {
123 | const match = result.matches[0];
124 | expect(match).toHaveProperty("memory");
125 | expect(match).toHaveProperty("relevanceScore");
126 | expect(typeof match.relevanceScore).toBe("number");
127 | }
128 | });
129 |
130 | test("should handle different user intents", async () => {
131 | const troubleshootContext: RetrievalContext = {
132 | userIntent: {
133 | action: "troubleshoot",
134 | urgency: "high",
135 | experience: "novice",
136 | },
137 | };
138 |
139 | const recommendContext: RetrievalContext = {
140 | userIntent: {
141 | action: "recommend",
142 | urgency: "low",
143 | experience: "expert",
144 | },
145 | };
146 |
147 | const troubleshootResult = await contextualRetrieval.retrieve(
148 | "deployment failed",
149 | troubleshootContext,
150 | );
151 | const recommendResult = await contextualRetrieval.retrieve(
152 | "best practices",
153 | recommendContext,
154 | );
155 |
156 | expect(troubleshootResult).toBeDefined();
157 | expect(recommendResult).toBeDefined();
158 | expect(Array.isArray(troubleshootResult.matches)).toBe(true);
159 | expect(Array.isArray(recommendResult.matches)).toBe(true);
160 | });
161 |
162 | test("should consider temporal context for relevance", async () => {
163 | const recentContext: RetrievalContext = {
164 | temporalContext: {
165 | recency: "recent",
166 | },
167 | };
168 |
169 | const historicalContext: RetrievalContext = {
170 | temporalContext: {
171 | recency: "historical",
172 | },
173 | };
174 |
175 | const recentResult = await contextualRetrieval.retrieve(
176 | "recent activity",
177 | recentContext,
178 | );
179 | const historicalResult = await contextualRetrieval.retrieve(
180 | "historical data",
181 | historicalContext,
182 | );
183 |
184 | expect(recentResult).toBeDefined();
185 | expect(historicalResult).toBeDefined();
186 | expect(Array.isArray(recentResult.matches)).toBe(true);
187 | expect(Array.isArray(historicalResult.matches)).toBe(true);
188 | });
189 | });
190 |
191 | describe("Error Handling and Edge Cases", () => {
192 | test("should handle empty query gracefully", async () => {
193 | const context: RetrievalContext = {
194 | userIntent: {
195 | action: "analyze",
196 | urgency: "medium",
197 | experience: "intermediate",
198 | },
199 | };
200 |
201 | const result = await contextualRetrieval.retrieve("", context);
202 |
203 | expect(result).toBeDefined();
204 | expect(result.matches).toBeDefined();
205 | expect(Array.isArray(result.matches)).toBe(true);
206 | });
207 |
208 | test("should handle minimal context", async () => {
209 | const minimalContext: RetrievalContext = {};
210 |
211 | const result = await contextualRetrieval.retrieve(
212 | "test query",
213 | minimalContext,
214 | );
215 |
216 | expect(result).toBeDefined();
217 | expect(result.matches).toBeDefined();
218 | expect(Array.isArray(result.matches)).toBe(true);
219 | });
220 | });
221 | });
222 |
```
--------------------------------------------------------------------------------
/tests/integration/mcp-readme-tools.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { promises as fs } from "fs";
2 | import { join } from "path";
3 |
4 | describe("MCP Integration Tests", () => {
5 | let tempDir: string;
6 |
7 | beforeEach(async () => {
8 | tempDir = join(process.cwd(), "test-mcp-integration-temp");
9 | await fs.mkdir(tempDir, { recursive: true });
10 | });
11 |
12 | afterEach(async () => {
13 | try {
14 | await fs.rm(tempDir, { recursive: true, force: true });
15 | } catch (error) {
16 | // Ignore cleanup errors
17 | }
18 | });
19 |
20 | describe("Tool Registration", () => {
21 | test("should include evaluate_readme_health in tools list", async () => {
22 | // This test verifies that the README health tool is properly registered
23 | // Since we can't directly access the server instance, we'll test the tool functions directly
24 | // but verify they match the expected MCP interface
25 |
26 | const { evaluateReadmeHealth } = await import(
27 | "../../src/tools/evaluate-readme-health.js"
28 | );
29 |
30 | // Test with valid parameters that match the MCP schema
31 | const readmePath = join(tempDir, "README.md");
32 | await fs.writeFile(readmePath, "# Test Project\n\nBasic README content.");
33 |
34 | const result = await evaluateReadmeHealth({
35 | readme_path: readmePath,
36 | project_type: "community_library", // Valid enum value from schema
37 | });
38 |
39 | expect(result.content).toBeDefined();
40 | expect(result.isError).toBe(false);
41 | });
42 |
43 | test("should include readme_best_practices in tools list", async () => {
44 | const { readmeBestPractices } = await import(
45 | "../../src/tools/readme-best-practices.js"
46 | );
47 |
48 | const readmePath = join(tempDir, "README.md");
49 | await fs.writeFile(
50 | readmePath,
51 | "# Test Library\n\nLibrary documentation.",
52 | );
53 |
54 | const result = await readmeBestPractices({
55 | readme_path: readmePath,
56 | project_type: "library", // Valid enum value from schema
57 | });
58 |
59 | expect(result.success).toBe(true);
60 | expect(result.data).toBeDefined();
61 | });
62 | });
63 |
64 | describe("Parameter Validation", () => {
65 | test("evaluate_readme_health should handle invalid project_type", async () => {
66 | const { evaluateReadmeHealth } = await import(
67 | "../../src/tools/evaluate-readme-health.js"
68 | );
69 |
70 | const readmePath = join(tempDir, "README.md");
71 | await fs.writeFile(readmePath, "# Test");
72 |
73 | const result = await evaluateReadmeHealth({
74 | readme_path: readmePath,
75 | project_type: "invalid_type" as any,
76 | });
77 |
78 | expect(result.isError).toBe(true);
79 | });
80 |
81 | test("readme_best_practices should handle invalid project_type", async () => {
82 | const { readmeBestPractices } = await import(
83 | "../../src/tools/readme-best-practices.js"
84 | );
85 |
86 | const readmePath = join(tempDir, "README.md");
87 | await fs.writeFile(readmePath, "# Test");
88 |
89 | const result = await readmeBestPractices({
90 | readme_path: readmePath,
91 | project_type: "invalid_type" as any,
92 | });
93 |
94 | expect(result.success).toBe(false);
95 | expect(result.error).toBeDefined();
96 | });
97 |
98 | test("evaluate_readme_health should handle missing file", async () => {
99 | const { evaluateReadmeHealth } = await import(
100 | "../../src/tools/evaluate-readme-health.js"
101 | );
102 |
103 | const result = await evaluateReadmeHealth({
104 | readme_path: join(tempDir, "nonexistent.md"),
105 | });
106 |
107 | expect(result.isError).toBe(true);
108 | });
109 |
110 | test("readme_best_practices should handle missing file without template", async () => {
111 | const { readmeBestPractices } = await import(
112 | "../../src/tools/readme-best-practices.js"
113 | );
114 |
115 | const result = await readmeBestPractices({
116 | readme_path: join(tempDir, "nonexistent.md"),
117 | generate_template: false,
118 | });
119 |
120 | expect(result.success).toBe(false);
121 | expect(result.error?.code).toBe("README_NOT_FOUND");
122 | });
123 | });
124 |
125 | describe("Response Format Consistency", () => {
126 | test("evaluate_readme_health should return MCP-formatted response", async () => {
127 | const { evaluateReadmeHealth } = await import(
128 | "../../src/tools/evaluate-readme-health.js"
129 | );
130 |
131 | const readmePath = join(tempDir, "README.md");
132 | await fs.writeFile(
133 | readmePath,
134 | "# Complete Project\n\n## Description\nDetailed description.",
135 | );
136 |
137 | const result = await evaluateReadmeHealth({
138 | readme_path: readmePath,
139 | });
140 |
141 | // Should be already formatted for MCP
142 | expect(result.content).toBeDefined();
143 | expect(Array.isArray(result.content)).toBe(true);
144 | expect(result.isError).toBeDefined();
145 |
146 | // Should have execution metadata
147 | const metadataContent = result.content.find((c) =>
148 | c.text.includes("Execution completed"),
149 | );
150 | expect(metadataContent).toBeDefined();
151 | });
152 |
153 | test("readme_best_practices should return MCPToolResponse that can be formatted", async () => {
154 | const { readmeBestPractices } = await import(
155 | "../../src/tools/readme-best-practices.js"
156 | );
157 | const { formatMCPResponse } = await import("../../src/types/api.js");
158 |
159 | const readmePath = join(tempDir, "README.md");
160 | await fs.writeFile(
161 | readmePath,
162 | "# Library Project\n\n## Installation\nnpm install",
163 | );
164 |
165 | const result = await readmeBestPractices({
166 | readme_path: readmePath,
167 | });
168 |
169 | // Should be raw MCPToolResponse
170 | expect(result.success).toBeDefined();
171 | expect(result.metadata).toBeDefined();
172 |
173 | // Should be formattable
174 | const formatted = formatMCPResponse(result);
175 | expect(formatted.content).toBeDefined();
176 | expect(Array.isArray(formatted.content)).toBe(true);
177 | expect(formatted.isError).toBe(false);
178 | });
179 | });
180 |
181 | describe("Cross-tool Consistency", () => {
182 | test("both tools should handle the same README file", async () => {
183 | const { evaluateReadmeHealth } = await import(
184 | "../../src/tools/evaluate-readme-health.js"
185 | );
186 | const { readmeBestPractices } = await import(
187 | "../../src/tools/readme-best-practices.js"
188 | );
189 |
190 | const readmePath = join(tempDir, "README.md");
191 | await fs.writeFile(
192 | readmePath,
193 | `# Test Project
194 |
195 | ## Description
196 | This is a comprehensive test project.
197 |
198 | ## Installation
199 | \`\`\`bash
200 | npm install test-project
201 | \`\`\`
202 |
203 | ## Usage
204 | \`\`\`javascript
205 | const test = require('test-project');
206 | test.run();
207 | \`\`\`
208 |
209 | ## Contributing
210 | Please read our contributing guidelines.
211 |
212 | ## License
213 | MIT License
214 | `,
215 | );
216 |
217 | // Both tools should work on the same file
218 | const healthResult = await evaluateReadmeHealth({
219 | readme_path: readmePath,
220 | project_type: "community_library",
221 | });
222 |
223 | const practicesResult = await readmeBestPractices({
224 | readme_path: readmePath,
225 | project_type: "library",
226 | });
227 |
228 | expect(healthResult.isError).toBe(false);
229 | expect(practicesResult.success).toBe(true);
230 | });
231 | });
232 | });
233 |
```
--------------------------------------------------------------------------------
/docs/guides/playwright-integration.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.948Z"
4 | last_validated: "2025-12-09T19:41:38.579Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # Playwright Integration Guide
11 |
12 | ## Overview
13 |
14 | DocuMCP can generate a complete Playwright E2E testing setup for your documentation site, including:
15 |
16 | - Playwright configuration
17 | - Link validation tests
18 | - Accessibility tests (WCAG 2.1 AA)
19 | - Docker/Podman containerization
20 | - GitHub Actions CI/CD workflow
21 |
22 | **Important**: Playwright is NOT a dependency of DocuMCP itself. Instead, DocuMCP **generates** the Playwright setup for your documentation site.
23 |
24 | ## Quick Start
25 |
26 | ### Generate Playwright Setup
27 |
28 | Use the `setup_playwright_tests` tool to generate all necessary files:
29 |
30 | ```typescript
31 | {
32 | tool: "setup_playwright_tests",
33 | arguments: {
34 | repositoryPath: "./my-docs-site",
35 | ssg: "docusaurus",
36 | projectName: "My Documentation",
37 | mainBranch: "main",
38 | includeAccessibilityTests: true,
39 | includeDockerfile: true,
40 | includeGitHubActions: true
41 | }
42 | }
43 | ```
44 |
45 | ### What Gets Generated
46 |
47 | ```
48 | my-docs-site/
49 | ├── playwright.config.ts # Playwright configuration
50 | ├── Dockerfile.playwright # Multi-stage Docker build
51 | ├── .github/workflows/
52 | │ └── docs-e2e-tests.yml # CI/CD workflow
53 | ├── tests/e2e/
54 | │ ├── link-validation.spec.ts # Link tests
55 | │ └── accessibility.spec.ts # A11y tests
56 | ├── package.json # Updated with Playwright deps
57 | └── .gitignore # Updated with test artifacts
58 | ```
59 |
60 | ## Generated Files Explained
61 |
62 | ### 1. Playwright Config (`playwright.config.ts`)
63 |
64 | ```typescript
65 | export default defineConfig({
66 | testDir: "./tests/e2e",
67 | timeout: 30 * 1000,
68 | use: {
69 | baseURL: process.env.BASE_URL || "http://localhost:3000",
70 | },
71 | projects: [{ name: "chromium" }, { name: "firefox" }, { name: "webkit" }],
72 | });
73 | ```
74 |
75 | ### 2. Link Validation Tests
76 |
77 | - ✅ Internal navigation links
78 | - ✅ External link HTTP status
79 | - ✅ Anchor/hash links
80 | - ✅ 404 detection
81 |
82 | ### 3. Accessibility Tests
83 |
84 | - ✅ WCAG 2.1 AA compliance (axe-core)
85 | - ✅ Keyboard navigation
86 | - ✅ Image alt text
87 | - ✅ Color contrast
88 |
89 | ### 4. Docker Multi-Stage Build
90 |
91 | ```dockerfile
92 | # Build docs
93 | FROM node:20-alpine AS builder
94 | RUN npm run build
95 |
96 | # Run tests
97 | FROM mcr.microsoft.com/playwright:v1.55.1 AS tester
98 | RUN npx playwright test
99 |
100 | # Serve production
101 | FROM nginx:alpine AS server
102 | COPY build /usr/share/nginx/html
103 | ```
104 |
105 | ### 5. GitHub Actions Workflow
106 |
107 | Automated testing on every push/PR:
108 |
109 | 1. **Build** → Compile documentation
110 | 2. **Test** → Run Playwright in container (chromium, firefox, webkit)
111 | 3. **Deploy** → Push to GitHub Pages (if tests pass)
112 | 4. **Verify** → Test live production site
113 |
114 | ## Usage After Generation
115 |
116 | ### Local Testing
117 |
118 | ```bash
119 | # Install dependencies (in YOUR docs site, not DocuMCP)
120 | cd my-docs-site
121 | npm install
122 |
123 | # Install Playwright browsers
124 | npx playwright install
125 |
126 | # Run tests
127 | npm run test:e2e
128 |
129 | # Run tests in UI mode
130 | npm run test:e2e:ui
131 |
132 | # View test report
133 | npm run test:e2e:report
134 | ```
135 |
136 | ### Docker Testing
137 |
138 | ```bash
139 | # Build test container
140 | docker build -t my-docs-test -f Dockerfile.playwright .
141 |
142 | # Run tests in container
143 | docker run --rm my-docs-test
144 |
145 | # Or with Podman
146 | podman build -t my-docs-test -f Dockerfile.playwright .
147 | podman run --rm my-docs-test
148 | ```
149 |
150 | ### CI/CD Integration
151 |
152 | Push to trigger GitHub Actions:
153 |
154 | ```bash
155 | git add .
156 | git commit -m "Add Playwright E2E tests"
157 | git push origin main
158 | ```
159 |
160 | Workflow will automatically:
161 |
162 | - Build docs
163 | - Run E2E tests across browsers
164 | - Deploy to GitHub Pages (if all tests pass)
165 | - Test production site after deployment
166 |
167 | ## Customization
168 |
169 | ### Add More Tests
170 |
171 | Create new test files in `tests/e2e/`:
172 |
173 | ```typescript
174 | // tests/e2e/navigation.spec.ts
175 | import { test, expect } from "@playwright/test";
176 |
177 | test("breadcrumbs should work", async ({ page }) => {
178 | await page.goto("/docs/some-page");
179 | const breadcrumbs = page.locator('[aria-label="breadcrumb"]');
180 | await expect(breadcrumbs).toBeVisible();
181 | });
182 | ```
183 |
184 | ### Modify Configuration
185 |
186 | Edit `playwright.config.ts`:
187 |
188 | ```typescript
189 | export default defineConfig({
190 | // Increase timeout for slow networks
191 | timeout: 60 * 1000,
192 |
193 | // Add mobile viewports
194 | projects: [
195 | { name: "chromium" },
196 | { name: "Mobile Chrome", use: devices["Pixel 5"] },
197 | ],
198 | });
199 | ```
200 |
201 | ## SSG-Specific Configuration
202 |
203 | DocuMCP automatically configures for your SSG:
204 |
205 | | SSG | Build Command | Build Dir | Port |
206 | | ---------- | -------------------------- | --------- | ---- |
207 | | Jekyll | `bundle exec jekyll build` | `_site` | 4000 |
208 | | Hugo | `hugo` | `public` | 1313 |
209 | | Docusaurus | `npm run build` | `build` | 3000 |
210 | | MkDocs | `mkdocs build` | `site` | 8000 |
211 | | Eleventy | `npx @11ty/eleventy` | `_site` | 8080 |
212 |
213 | ## Knowledge Graph Integration
214 |
215 | Test results are tracked in DocuMCP's Knowledge Graph:
216 |
217 | ```typescript
218 | {
219 | type: "deployment_validation",
220 | properties: {
221 | playwrightResults: {
222 | totalTests: 25,
223 | passed: 24,
224 | failed: 1,
225 | browsers: ["chromium", "firefox", "webkit"],
226 | linksChecked: 127,
227 | brokenLinks: 0,
228 | accessibilityScore: 98,
229 | }
230 | }
231 | }
232 | ```
233 |
234 | ## Troubleshooting
235 |
236 | ### Tests Fail on External Links
237 |
238 | External link validation can fail due to:
239 |
240 | - Network timeouts
241 | - Rate limiting
242 | - CORS issues
243 |
244 | **Solution**: Tests only check first 10 external links. Increase timeout in config.
245 |
246 | ### Container Build Fails
247 |
248 | **Issue**: Docker build fails on dependency installation
249 |
250 | **Solution**: Check SSG-specific dependencies in package.json
251 |
252 | ### CI/CD Workflow Times Out
253 |
254 | **Issue**: GitHub Actions workflow exceeds time limit
255 |
256 | **Solution**: Run only chromium in CI, full matrix locally:
257 |
258 | ```yaml
259 | # .github/workflows/docs-e2e-tests.yml
260 | strategy:
261 | matrix:
262 | browser: [chromium] # Only chromium in CI
263 | ```
264 |
265 | ## Best Practices
266 |
267 | 1. **Run tests before pushing** - `npm run test:e2e`
268 | 2. **Use Docker locally** - Same environment as CI
269 | 3. **Update baselines** - When changing UI intentionally
270 | 4. **Monitor CI reports** - Check artifacts for failures
271 | 5. **Test production** - Workflow tests live site automatically
272 |
273 | ## Example Workflow
274 |
275 | ```bash
276 | # 1. User analyzes their documentation repo with DocuMCP
277 | documcp analyze_repository --path ./my-docs
278 |
279 | # 2. User generates Playwright setup
280 | documcp setup_playwright_tests \
281 | --repositoryPath ./my-docs \
282 | --ssg docusaurus \
283 | --projectName "My Docs"
284 |
285 | # 3. User installs dependencies (in THEIR repo)
286 | cd my-docs
287 | npm install
288 | npx playwright install
289 |
290 | # 4. User runs tests locally
291 | npm run test:e2e
292 |
293 | # 5. User pushes to GitHub
294 | git push origin main
295 |
296 | # 6. GitHub Actions runs tests automatically
297 | # 7. If tests pass, deploys to GitHub Pages
298 | # 8. Tests production site
299 | ```
300 |
301 | ## Resources
302 |
303 | - [Playwright Documentation](https://playwright.dev/)
304 | - [Complete Workflow Guide](./playwright-testing-workflow.md)
305 | - [Link Validation Integration](./link-validation.md)
306 | - [Axe Accessibility Testing](https://github.com/dequelabs/axe-core)
307 |
```
--------------------------------------------------------------------------------
/docs/how-to/repository-analysis.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.954Z"
4 | last_validated: "2025-12-09T19:41:38.585Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # How to Analyze Your Repository with DocuMCP
11 |
12 | This guide walks you through using DocuMCP's repository analysis capabilities to understand your project's documentation needs.
13 |
14 | ## What Repository Analysis Provides
15 |
16 | DocuMCP's analysis examines your project from multiple perspectives:
17 |
18 | - **Project Structure**: File organization, language distribution, directory structure
19 | - **Dependencies**: Package ecosystems, frameworks, and libraries in use
20 | - **Documentation Status**: Existing documentation files, README quality, coverage gaps
21 | - **Complexity Assessment**: Project size, team size estimates, maintenance requirements
22 | - **Recommendations**: Tailored suggestions based on your project characteristics
23 |
24 | ## Basic Analysis
25 |
26 | ### Simple Analysis Request
27 |
28 | ```
29 | analyze my repository
30 | ```
31 |
32 | This performs a standard-depth analysis covering all key aspects of your project.
33 |
34 | ### Specify Analysis Depth
35 |
36 | ```
37 | analyze my repository with deep analysis
38 | ```
39 |
40 | Available depth levels:
41 |
42 | - **quick**: Fast overview focusing on basic structure and languages
43 | - **standard**: Comprehensive analysis including dependencies and documentation (recommended)
44 | - **deep**: Detailed analysis with advanced insights and recommendations
45 |
46 | ## Understanding Analysis Results
47 |
48 | ### Project Structure Section
49 |
50 | ```json
51 | {
52 | "structure": {
53 | "totalFiles": 2034,
54 | "totalDirectories": 87,
55 | "languages": {
56 | ".ts": 86,
57 | ".js": 13,
58 | ".css": 3,
59 | ".html": 37
60 | },
61 | "hasTests": true,
62 | "hasCI": true,
63 | "hasDocs": true
64 | }
65 | }
66 | ```
67 |
68 | This tells you:
69 |
70 | - Scale of your project (file/directory count)
71 | - Primary programming languages
72 | - Presence of tests, CI/CD, and existing documentation
73 |
74 | ### Dependencies Analysis
75 |
76 | ```json
77 | {
78 | "dependencies": {
79 | "ecosystem": "javascript",
80 | "packages": ["@modelcontextprotocol/sdk", "zod", "typescript"],
81 | "devPackages": ["jest", "@types/node", "eslint"]
82 | }
83 | }
84 | ```
85 |
86 | This reveals:
87 |
88 | - Primary package ecosystem (npm, pip, cargo, etc.)
89 | - Key runtime dependencies
90 | - Development and tooling dependencies
91 |
92 | ### Documentation Assessment
93 |
94 | ```json
95 | {
96 | "documentation": {
97 | "hasReadme": true,
98 | "hasContributing": true,
99 | "hasLicense": true,
100 | "existingDocs": ["README.md", "docs/api.md"],
101 | "estimatedComplexity": "complex"
102 | }
103 | }
104 | ```
105 |
106 | This shows:
107 |
108 | - Presence of essential documentation files
109 | - Existing documentation structure
110 | - Complexity level for documentation planning
111 |
112 | ## Advanced Analysis Techniques
113 |
114 | ### Target Specific Directories
115 |
116 | ```
117 | analyze the src directory for API documentation needs
118 | ```
119 |
120 | ### Focus on Documentation Gaps
121 |
122 | ```
123 | what documentation is missing from my project?
124 | ```
125 |
126 | ### Analyze for Specific Use Cases
127 |
128 | ```
129 | analyze my repository to determine if it needs user guides or developer documentation
130 | ```
131 |
132 | ## Using Analysis Results
133 |
134 | ### For SSG Selection
135 |
136 | After analysis, use the results to get targeted recommendations:
137 |
138 | ```
139 | based on the analysis, what static site generator works best for my TypeScript project?
140 | ```
141 |
142 | ### For Documentation Planning
143 |
144 | Use analysis insights to plan your documentation structure:
145 |
146 | ```
147 | given my project complexity, how should I organize my documentation?
148 | ```
149 |
150 | ### For Deployment Strategy
151 |
152 | Let analysis guide your deployment approach:
153 |
154 | ```
155 | considering my project setup, what's the best way to deploy documentation?
156 | ```
157 |
158 | ## Analysis-Driven Workflows
159 |
160 | ### Complete Documentation Setup
161 |
162 | 1. **Analyze**: `analyze my repository for documentation needs`
163 | 2. **Plan**: Use analysis results to understand project characteristics
164 | 3. **Recommend**: `recommend documentation tools based on the analysis`
165 | 4. **Implement**: `set up documentation based on the recommendations`
166 |
167 | ### Documentation Audit
168 |
169 | 1. **Current State**: `analyze my existing documentation structure`
170 | 2. **Gap Analysis**: `what documentation gaps exist in my project?`
171 | 3. **Improvement Plan**: `how can I improve my current documentation?`
172 |
173 | ### Migration Planning
174 |
175 | 1. **Legacy Analysis**: `analyze my project's current documentation approach`
176 | 2. **Modern Approach**: `what modern documentation tools would work better?`
177 | 3. **Migration Strategy**: `how should I migrate from my current setup?`
178 |
179 | ## Interpreting Recommendations
180 |
181 | ### Project Type Classification
182 |
183 | Analysis categorizes your project as:
184 |
185 | - **library**: Reusable code packages requiring API documentation
186 | - **application**: End-user software needing user guides and tutorials
187 | - **tool**: Command-line or developer tools requiring usage documentation
188 |
189 | ### Team Size Estimation
190 |
191 | - **small**: 1-3 developers, favor simple solutions
192 | - **medium**: 4-10 developers, need collaborative features
193 | - **large**: 10+ developers, require enterprise-grade solutions
194 |
195 | ### Complexity Assessment
196 |
197 | - **simple**: Basic projects with minimal documentation needs
198 | - **moderate**: Standard projects requiring structured documentation
199 | - **complex**: Large projects needing comprehensive documentation strategies
200 |
201 | ## Common Analysis Patterns
202 |
203 | ### JavaScript/TypeScript Projects
204 |
205 | Analysis typically reveals:
206 |
207 | - npm ecosystem with extensive dev dependencies
208 | - Need for API documentation (if library)
209 | - Integration with existing build tools
210 | - Recommendation: Often Docusaurus or VuePress
211 |
212 | ### Python Projects
213 |
214 | Analysis usually shows:
215 |
216 | - pip/poetry ecosystem
217 | - Sphinx-compatible documentation needs
218 | - Strong preference for MkDocs
219 | - Integration with Python documentation standards
220 |
221 | ### Multi-Language Projects
222 |
223 | Analysis identifies:
224 |
225 | - Mixed ecosystems and dependencies
226 | - Need for language-agnostic solutions
227 | - Recommendation: Usually Hugo or Jekyll for flexibility
228 |
229 | ## Troubleshooting Analysis
230 |
231 | ### Incomplete Results
232 |
233 | If analysis seems incomplete:
234 |
235 | ```
236 | run deep analysis on my repository to get more detailed insights
237 | ```
238 |
239 | ### Focus on Specific Areas
240 |
241 | If you need more details about certain aspects:
242 |
243 | ```
244 | analyze my project's dependencies in detail
245 | ```
246 |
247 | ### Re-analyze After Changes
248 |
249 | After making significant changes:
250 |
251 | ```
252 | re-analyze my repository to see updated recommendations
253 | ```
254 |
255 | ## Analysis Memory and Caching
256 |
257 | DocuMCP stores analysis results for reference in future operations:
258 |
259 | - Analysis IDs are provided for referencing specific analyses
260 | - Results remain accessible throughout your session
261 | - Memory system learns from successful documentation deployments
262 |
263 | Use analysis IDs in follow-up requests:
264 |
265 | ```
266 | using analysis analysis_abc123, set up the recommended documentation structure
267 | ```
268 |
269 | ## Best Practices
270 |
271 | 1. **Start Fresh**: Begin new documentation projects with analysis
272 | 2. **Regular Reviews**: Re-analyze periodically as projects evolve
273 | 3. **Deep Dive When Needed**: Use deep analysis for complex projects
274 | 4. **Combine with Expertise**: Use analysis as a starting point, not final decision
275 | 5. **Iterate**: Refine based on analysis feedback and results
276 |
277 | Analysis is the foundation of effective documentation planning with DocuMCP. Use it to make informed decisions about tools, structure, and deployment strategies.
278 |
```
--------------------------------------------------------------------------------
/docs/research/domain-5-github-deployment/github-pages-security-analysis.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.967Z"
4 | last_validated: "2025-12-09T19:41:38.598Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # GitHub Pages Deployment Security and Limitations Analysis
11 |
12 | **Research Date**: 2025-01-14
13 | **Domain**: GitHub Pages Deployment Automation
14 | **Status**: Completed
15 |
16 | ## Research Overview
17 |
18 | Comprehensive analysis of GitHub Pages deployment security considerations, limitations, and automation best practices for DocuMCP implementation.
19 |
20 | ## GitHub Pages Security Model Analysis
21 |
22 | ### Deployment Methods & Security Implications
23 |
24 | #### **1. GitHub Actions (Official Method)**
25 |
26 | **Security Profile**:
27 |
28 | - ✅ **OIDC Token-based Authentication**: Uses JWT tokens with branch validation
29 | - ✅ **Permissions Model**: Requires explicit `pages: write` and `id-token: write`
30 | - ✅ **Environment Protection**: Supports environment rules and approvals
31 | - ⚠️ **First Deploy Challenge**: Manual branch selection required initially
32 |
33 | **Implementation Pattern**:
34 |
35 | ```yaml
36 | permissions:
37 | pages: write # Deploy to Pages
38 | id-token: write # Verify deployment origin
39 | contents: read # Checkout repository
40 |
41 | environment:
42 | name: github-pages
43 | url: ${{ steps.deployment.outputs.page_url }}
44 | ```
45 |
46 | #### **2. Deploy Keys (SSH Method)**
47 |
48 | **Security Profile**:
49 |
50 | - ✅ **Repository-specific**: Keys scoped to individual repositories
51 | - ✅ **Write Access Control**: Can be limited to deployment-only access
52 | - ⚠️ **Key Management**: Requires secure key generation and storage
53 | - ⚠️ **Cross-repo Complexity**: Each external repo needs separate key setup
54 |
55 | #### **3. Personal Access Tokens**
56 |
57 | **Security Profile**:
58 |
59 | - ⚠️ **Broad Permissions**: Often have wider access than needed
60 | - ⚠️ **Expiration Management**: Tokens expire and need rotation
61 | - ⚠️ **Account-wide Risk**: Compromise affects all accessible repositories
62 |
63 | ### GitHub Pages Deployment Limitations
64 |
65 | #### **Technical Constraints**
66 |
67 | 1. **Site Size Limits**:
68 |
69 | - Maximum 1GB per repository
70 | - Impacts large documentation sites with assets
71 | - No compression before size calculation
72 |
73 | 2. **Build Frequency Limits**:
74 |
75 | - 10 builds per hour soft limit
76 | - Additional builds queued for next hour
77 | - Can impact rapid deployment cycles
78 |
79 | 3. **Static Content Only**:
80 | - No server-side processing
81 | - No dynamic content generation
82 | - Limited to client-side JavaScript
83 |
84 | #### **Security Constraints**
85 |
86 | 1. **Content Security Policy**:
87 |
88 | - Default CSP may block certain resources
89 | - Limited ability to customize security headers
90 | - No server-side security controls
91 |
92 | 2. **HTTPS Enforcement**:
93 | - Custom domains require manual HTTPS setup
94 | - Certificate management through GitHub
95 | - No control over TLS configuration
96 |
97 | ### CI/CD Workflow Security Best Practices
98 |
99 | #### **Recommended Security Architecture**
100 |
101 | ```yaml
102 | name: Deploy Documentation
103 |
104 | on:
105 | push:
106 | branches: [main]
107 | pull_request:
108 | branches: [main]
109 |
110 | jobs:
111 | security-scan:
112 | runs-on: ubuntu-latest
113 | steps:
114 | - uses: actions/checkout@v4
115 | - name: Security scan
116 | run: |
117 | # Scan for secrets, vulnerabilities
118 | npm audit --audit-level high
119 |
120 | build:
121 | needs: security-scan
122 | runs-on: ubuntu-latest
123 | steps:
124 | - uses: actions/checkout@v4
125 | - name: Build site
126 | run: npm run build
127 | - name: Upload artifact
128 | uses: actions/upload-pages-artifact@v3
129 | with:
130 | path: ./dist
131 |
132 | deploy:
133 | if: github.ref == 'refs/heads/main'
134 | needs: build
135 | runs-on: ubuntu-latest
136 | permissions:
137 | pages: write
138 | id-token: write
139 | environment:
140 | name: github-pages
141 | url: ${{ steps.deployment.outputs.page_url }}
142 | steps:
143 | - name: Deploy to GitHub Pages
144 | id: deployment
145 | uses: actions/deploy-pages@v4
146 | ```
147 |
148 | #### **Security Validation Steps**
149 |
150 | 1. **Pre-deployment Checks**:
151 |
152 | - Secret scanning
153 | - Dependency vulnerability assessment
154 | - Content validation
155 |
156 | 2. **Deployment Security**:
157 |
158 | - Environment protection rules
159 | - Required reviewers for production
160 | - Branch protection enforcement
161 |
162 | 3. **Post-deployment Verification**:
163 | - Site accessibility validation
164 | - Security header verification
165 | - Content integrity checks
166 |
167 | ### DocuMCP Security Implementation Recommendations
168 |
169 | #### **Multi-layered Security Approach**
170 |
171 | 1. **Tool-level Security**:
172 |
173 | ```typescript
174 | // Example security validation in MCP tool
175 | const validateDeploymentSecurity = (config: DeploymentConfig) => {
176 | const securityChecks = {
177 | hasSecretScanning: checkSecretScanning(config),
178 | hasEnvironmentProtection: checkEnvironmentRules(config),
179 | hasProperPermissions: validatePermissions(config),
180 | hasSecurityHeaders: validateSecurityHeaders(config),
181 | };
182 |
183 | return securityChecks;
184 | };
185 | ```
186 |
187 | 2. **Configuration Template Security**:
188 |
189 | - Generate workflows with minimal required permissions
190 | - Include security scanning by default
191 | - Enforce environment protection for production
192 |
193 | 3. **User Education Components**:
194 | - Security best practices documentation
195 | - Common vulnerability warnings
196 | - Regular security updates guidance
197 |
198 | ### Risk Assessment & Mitigation
199 |
200 | #### **High-Risk Scenarios**
201 |
202 | 1. **Secret Exposure in Repositories**:
203 |
204 | - **Risk**: API keys, tokens in code
205 | - **Mitigation**: Mandatory secret scanning, education
206 |
207 | 2. **Malicious Pull Request Deployments**:
208 |
209 | - **Risk**: Untrusted code in preview deployments
210 | - **Mitigation**: Environment protection, review requirements
211 |
212 | 3. **Supply Chain Attacks**:
213 | - **Risk**: Compromised dependencies
214 | - **Mitigation**: Dependency scanning, lock files
215 |
216 | #### **Medium-Risk Scenarios**
217 |
218 | 1. **Excessive Permissions**:
219 |
220 | - **Risk**: Overprivileged deployment workflows
221 | - **Mitigation**: Principle of least privilege templates
222 |
223 | 2. **Unprotected Environments**:
224 | - **Risk**: Direct production deployments
225 | - **Mitigation**: Default environment protection
226 |
227 | ### Implementation Priorities for DocuMCP
228 |
229 | #### **Critical Security Features**
230 |
231 | 1. **Automated Security Scanning**: Integrate secret and vulnerability scanning
232 | 2. **Permission Minimization**: Generate workflows with minimal required permissions
233 | 3. **Environment Protection**: Default protection rules for production environments
234 | 4. **Security Documentation**: Clear guidance on security best practices
235 |
236 | #### **Enhanced Security Features**
237 |
238 | 1. **Custom Security Checks**: Advanced validation for specific project types
239 | 2. **Security Reporting**: Automated security posture assessment
240 | 3. **Incident Response**: Guidance for security issue handling
241 |
242 | ## Research Validation Status
243 |
244 | - ✅ GitHub Pages security model analyzed
245 | - ✅ Deployment methods evaluated
246 | - ✅ Security best practices documented
247 | - ✅ Risk assessment completed
248 | - ⚠️ Needs validation: Security template effectiveness testing
249 | - ⚠️ Needs implementation: DocuMCP security feature integration
250 |
251 | ## Sources & References
252 |
253 | 1. GitHub Pages Official Documentation - Security Guidelines
254 | 2. GitHub Actions Security Best Practices
255 | 3. OWASP Static Site Security Guide
256 | 4. GitHub Security Advisory Database
257 | 5. Community Security Analysis Reports
258 |
```
--------------------------------------------------------------------------------
/docs/research/research-integration-summary-2025-01-14.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.968Z"
4 | last_validated: "2025-12-09T19:41:38.599Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # Research Integration Summary
11 |
12 | **Date**: 2025-01-14
13 | **Status**: Completed
14 | **Integration Method**: Direct ADR Updates + Implementation Recommendations
15 |
16 | ## Research Integration Overview
17 |
18 | This document summarizes how research findings from systematic web research using Firecrawl MCP server have been incorporated into DocuMCP's architectural decisions and implementation planning.
19 |
20 | ## Research Areas Integrated
21 |
22 | ### ✅ **1. MCP Server Architecture (ADR-001)**
23 |
24 | **Research Source**: `domain-1-mcp-architecture/mcp-performance-research.md`
25 |
26 | **Key Integrations**:
27 |
28 | - **Performance Validation**: Confirmed TypeScript MCP SDK provides minimal overhead with JSON-RPC 2.0
29 | - **Memory Optimization**: Integrated streaming patterns (10x memory reduction) and worker threads (3-4x performance)
30 | - **Implementation Strategy**: Added concrete code patterns for repository analysis with performance benchmarks
31 |
32 | **ADR Updates Applied**:
33 |
34 | - Added "Research Integration" section with validated performance characteristics
35 | - Integrated specific implementation patterns for streaming and worker threads
36 | - Established research-validated performance targets for different repository sizes
37 |
38 | ### ✅ **2. SSG Recommendation Engine (ADR-003)**
39 |
40 | **Research Source**: `domain-3-ssg-recommendation/ssg-performance-analysis.md`
41 |
42 | **Key Integrations**:
43 |
44 | - **Performance Matrix**: Comprehensive build time analysis across SSG scales
45 | - **Algorithm Enhancement**: Research-validated scoring with scale-based weighting
46 | - **Real-World Data**: Hugo 250x faster than Gatsby (small sites), gap narrows to 40x (large sites)
47 |
48 | **ADR Updates Applied**:
49 |
50 | - Enhanced performance modeling with research-validated SSG performance matrix
51 | - Updated recommendation algorithm with evidence-based scoring
52 | - Integrated scale-based performance weighting (critical path vs features)
53 |
54 | ### ✅ **3. GitHub Pages Deployment Security (ADR-005)**
55 |
56 | **Research Source**: `domain-5-github-deployment/github-pages-security-analysis.md`
57 |
58 | **Key Integrations**:
59 |
60 | - **Security Architecture**: OIDC token authentication with JWT validation
61 | - **Permission Minimization**: Specific `pages: write` and `id-token: write` requirements
62 | - **Environment Protection**: Default security rules with approval workflows
63 | - **Automated Scanning**: Integrated secret and vulnerability detection
64 |
65 | **ADR Updates Applied**:
66 |
67 | - Enhanced repository configuration management with research-validated security practices
68 | - Added multi-layered security approach with specific implementation details
69 | - Integrated automated security scanning and environment protection requirements
70 |
71 | ## Implementation Impact Analysis
72 |
73 | ### **Immediate Implementation Requirements**
74 |
75 | 1. **High Priority Updates** (Week 1-2):
76 |
77 | - Implement streaming-based repository analysis with 10MB threshold
78 | - Create worker thread pool for parallel file processing
79 | - Integrate OIDC-based GitHub Pages deployment templates
80 |
81 | 2. **Medium Priority Enhancements** (Week 3-4):
82 | - Develop SSG performance scoring algorithm with research-validated weights
83 | - Implement automated security scanning in generated workflows
84 | - Create environment protection templates
85 |
86 | ### **Architecture Validation Status**
87 |
88 | | **Decision Area** | **Research Status** | **Validation Result** | **Implementation Ready** |
89 | | --------------------- | ------------------- | ---------------------------- | ------------------------ |
90 | | TypeScript MCP SDK | ✅ Validated | Confirmed optimal choice | ✅ Yes |
91 | | Node.js Performance | ✅ Validated | Specific patterns identified | ✅ Yes |
92 | | SSG Recommendation | ✅ Validated | Algorithm refined | ✅ Yes |
93 | | GitHub Pages Security | ✅ Validated | Security model confirmed | ✅ Yes |
94 | | Repository Analysis | ✅ Validated | Streaming patterns proven | ✅ Yes |
95 |
96 | ### **Risk Mitigation Updates**
97 |
98 | **Original Risk**: Memory constraints for large repository analysis
99 | **Research Mitigation**: 10x memory reduction with streaming + worker threads
100 | **Implementation**: Concrete code patterns integrated into ADR-001
101 |
102 | **Original Risk**: SSG recommendation accuracy
103 | **Research Mitigation**: Evidence-based performance weighting algorithm
104 | **Implementation**: Performance matrix and scoring algorithm in ADR-003
105 |
106 | **Original Risk**: Deployment security vulnerabilities
107 | **Research Mitigation**: Multi-layered security with OIDC authentication
108 | **Implementation**: Enhanced security configuration in ADR-005
109 |
110 | ## Research Validation Metrics
111 |
112 | ### **Research Quality Assessment**
113 |
114 | - **Sources Analyzed**: 15+ authoritative sources (GitHub docs, CSS-Tricks benchmarks, security guides)
115 | - **Data Points Validated**: 50+ specific performance metrics and security practices
116 | - **Implementation Patterns**: 12+ concrete code examples and configuration templates
117 | - **Best Practices**: 25+ industry-validated approaches integrated
118 |
119 | ### **ADR Enhancement Metrics**
120 |
121 | - **ADRs Updated**: 3 core architectural decisions
122 | - **New Content Added**: ~500 lines of research-validated implementation guidance
123 | - **Performance Targets**: Quantitative benchmarks established for all components
124 | - **Security Practices**: Comprehensive security model with specific configurations
125 |
126 | ## Next Steps & Continuous Integration
127 |
128 | ### **Immediate Actions** (Next 48 hours)
129 |
130 | 1. **Implementation Planning**: Use research-validated patterns for MVP development
131 | 2. **Security Review**: Validate enhanced security configurations with team
132 | 3. **Performance Testing**: Create benchmarks based on research targets
133 |
134 | ### **Short-term Integration** (Next 2 weeks)
135 |
136 | 1. **Prototype Development**: Implement streaming repository analysis
137 | 2. **Algorithm Validation**: Test SSG recommendation scoring with real projects
138 | 3. **Security Testing**: Validate OIDC deployment workflows
139 |
140 | ### **Long-term Monitoring** (Ongoing)
141 |
142 | 1. **Performance Validation**: Compare actual performance against research predictions
143 | 2. **Security Auditing**: Regular validation of security practices
144 | 3. **Research Updates**: Monitor for new performance data and security practices
145 |
146 | ## Research Integration Success Criteria
147 |
148 | ✅ **Architectural Validation**: All core decisions validated with evidence
149 | ✅ **Implementation Guidance**: Concrete patterns and code examples provided
150 | ✅ **Performance Targets**: Quantitative benchmarks established
151 | ✅ **Security Framework**: Comprehensive security model implemented
152 | ✅ **Risk Mitigation**: Major risks addressed with validated solutions
153 |
154 | **Overall Integration Status**: **SUCCESSFUL** - Ready for implementation phase
155 |
156 | ---
157 |
158 | **Research Conducted Using**: Firecrawl MCP Server systematic web research
159 | **Research Duration**: 4 hours intensive analysis
160 | **Integration Method**: Direct ADR updates with validation tracking
161 | **Confidence Level**: 95% - Based on authoritative sources and comprehensive analysis
162 |
```
--------------------------------------------------------------------------------
/tests/memory/learning.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Basic unit tests for Incremental Learning System
3 | * Tests basic instantiation and core functionality
4 | * Part of Issue #54 - Core Memory System Unit Tests
5 | */
6 |
7 | import { promises as fs } from "fs";
8 | import path from "path";
9 | import os from "os";
10 | import { MemoryManager } from "../../src/memory/manager.js";
11 | import {
12 | IncrementalLearningSystem,
13 | ProjectFeatures,
14 | } from "../../src/memory/learning.js";
15 |
16 | describe("IncrementalLearningSystem", () => {
17 | let tempDir: string;
18 | let memoryManager: MemoryManager;
19 | let learning: IncrementalLearningSystem;
20 |
21 | beforeEach(async () => {
22 | // Create unique temp directory for each test
23 | tempDir = path.join(
24 | os.tmpdir(),
25 | `memory-learning-test-${Date.now()}-${Math.random()
26 | .toString(36)
27 | .substr(2, 9)}`,
28 | );
29 | await fs.mkdir(tempDir, { recursive: true });
30 |
31 | // Create memory manager for learning system
32 | memoryManager = new MemoryManager(tempDir);
33 | await memoryManager.initialize();
34 |
35 | learning = new IncrementalLearningSystem(memoryManager);
36 | await learning.initialize();
37 | });
38 |
39 | afterEach(async () => {
40 | // Cleanup temp directory
41 | try {
42 | await fs.rm(tempDir, { recursive: true, force: true });
43 | } catch (error) {
44 | // Ignore cleanup errors
45 | }
46 | });
47 |
48 | describe("Basic Learning System Tests", () => {
49 | test("should create learning system instance", () => {
50 | expect(learning).toBeDefined();
51 | expect(learning).toBeInstanceOf(IncrementalLearningSystem);
52 | });
53 |
54 | test("should be able to enable and disable learning", () => {
55 | learning.setLearningEnabled(false);
56 | learning.setLearningEnabled(true);
57 | // Just test that the methods exist and don't throw
58 | expect(true).toBe(true);
59 | });
60 |
61 | test("should have pattern retrieval capabilities", async () => {
62 | // Test pattern retrieval without throwing errors
63 | const patterns = await learning.getPatterns();
64 | expect(Array.isArray(patterns)).toBe(true);
65 | });
66 |
67 | test("should provide learning statistics", async () => {
68 | const stats = await learning.getStatistics();
69 | expect(stats).toBeDefined();
70 | expect(typeof stats.totalPatterns).toBe("number");
71 | expect(typeof stats.averageConfidence).toBe("number");
72 | expect(Array.isArray(stats.insights)).toBe(true);
73 | });
74 |
75 | test("should handle clearing patterns", async () => {
76 | await learning.clearPatterns();
77 | // Verify patterns are cleared
78 | const patterns = await learning.getPatterns();
79 | expect(Array.isArray(patterns)).toBe(true);
80 | expect(patterns.length).toBe(0);
81 | });
82 |
83 | test("should provide improved recommendations", async () => {
84 | const projectFeatures: ProjectFeatures = {
85 | language: "typescript",
86 | framework: "react",
87 | size: "medium" as const,
88 | complexity: "moderate" as const,
89 | hasTests: true,
90 | hasCI: true,
91 | hasDocs: false,
92 | isOpenSource: true,
93 | };
94 |
95 | const baseRecommendation = {
96 | recommended: "docusaurus",
97 | confidence: 0.8,
98 | score: 0.85,
99 | };
100 |
101 | const improved = await learning.getImprovedRecommendation(
102 | projectFeatures,
103 | baseRecommendation,
104 | );
105 | expect(improved).toBeDefined();
106 | expect(improved.recommendation).toBeDefined();
107 | expect(typeof improved.confidence).toBe("number");
108 | expect(Array.isArray(improved.insights)).toBe(true);
109 | });
110 |
111 | test("should handle learning from memory entries", async () => {
112 | const memoryEntry = await memoryManager.remember(
113 | "recommendation",
114 | {
115 | recommended: "docusaurus",
116 | confidence: 0.9,
117 | language: { primary: "typescript" },
118 | framework: { name: "react" },
119 | },
120 | {
121 | projectId: "test-project",
122 | ssg: "docusaurus",
123 | },
124 | );
125 |
126 | // Learn from successful outcome
127 | await learning.learn(memoryEntry, "success");
128 | // Verify no errors thrown
129 | expect(true).toBe(true);
130 | });
131 | });
132 |
133 | describe("Learning Statistics and Analysis", () => {
134 | test("should provide comprehensive learning statistics", async () => {
135 | const stats = await learning.getStatistics();
136 | expect(stats).toBeDefined();
137 | expect(typeof stats.totalPatterns).toBe("number");
138 | expect(typeof stats.averageConfidence).toBe("number");
139 | expect(typeof stats.learningVelocity).toBe("number");
140 | expect(typeof stats.patternsByType).toBe("object");
141 | expect(Array.isArray(stats.insights)).toBe(true);
142 | });
143 |
144 | test("should handle multiple learning iterations", async () => {
145 | const projectFeatures: ProjectFeatures = {
146 | language: "javascript",
147 | framework: "vue",
148 | size: "small" as const,
149 | complexity: "simple" as const,
150 | hasTests: false,
151 | hasCI: false,
152 | hasDocs: true,
153 | isOpenSource: false,
154 | };
155 |
156 | const baseRecommendation = {
157 | recommended: "vuepress",
158 | confidence: 0.7,
159 | score: 0.75,
160 | };
161 |
162 | // Multiple learning cycles
163 | for (let i = 0; i < 3; i++) {
164 | const improved = await learning.getImprovedRecommendation(
165 | projectFeatures,
166 | baseRecommendation,
167 | );
168 | expect(improved.recommendation).toBeDefined();
169 | }
170 |
171 | expect(true).toBe(true);
172 | });
173 | });
174 |
175 | describe("Error Handling", () => {
176 | test("should handle empty patterns gracefully", async () => {
177 | // Clear all patterns first
178 | await learning.clearPatterns();
179 |
180 | const patterns = await learning.getPatterns();
181 | expect(Array.isArray(patterns)).toBe(true);
182 | expect(patterns.length).toBe(0);
183 | });
184 |
185 | test("should handle learning with minimal data", async () => {
186 | const projectFeatures: ProjectFeatures = {
187 | language: "unknown",
188 | size: "small" as const,
189 | complexity: "simple" as const,
190 | hasTests: false,
191 | hasCI: false,
192 | hasDocs: false,
193 | isOpenSource: false,
194 | };
195 |
196 | const baseRecommendation = {
197 | recommended: "jekyll",
198 | confidence: 0.5,
199 | };
200 |
201 | const improved = await learning.getImprovedRecommendation(
202 | projectFeatures,
203 | baseRecommendation,
204 | );
205 | expect(improved).toBeDefined();
206 | expect(improved.recommendation).toBeDefined();
207 | });
208 |
209 | test("should handle concurrent learning operations", async () => {
210 | const promises = Array.from({ length: 3 }, async (_, i) => {
211 | const projectFeatures: ProjectFeatures = {
212 | language: "go",
213 | size: "medium" as const,
214 | complexity: "moderate" as const,
215 | hasTests: true,
216 | hasCI: true,
217 | hasDocs: true,
218 | isOpenSource: true,
219 | };
220 |
221 | const baseRecommendation = {
222 | recommended: "hugo",
223 | confidence: 0.8 + i * 0.02,
224 | };
225 |
226 | return learning.getImprovedRecommendation(
227 | projectFeatures,
228 | baseRecommendation,
229 | );
230 | });
231 |
232 | const results = await Promise.all(promises);
233 | expect(results.length).toBe(3);
234 | results.forEach((result) => {
235 | expect(result.recommendation).toBeDefined();
236 | });
237 | });
238 | });
239 | });
240 |
```
--------------------------------------------------------------------------------
/docs/how-to/prompting-guide.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.953Z"
4 | last_validated: "2025-12-09T19:41:38.585Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # How to Prompt DocuMCP Effectively
11 |
12 | This guide shows you how to interact with DocuMCP using effective prompts to get the best results from the system.
13 |
14 | ## 🎯 Pro Tip: Use @LLM_CONTEXT.md
15 |
16 | When using DocuMCP in your AI assistant (Claude, ChatGPT, etc.), **reference the LLM_CONTEXT.md file** for instant context about all 45 available tools:
17 |
18 | ```
19 | @LLM_CONTEXT.md analyze my repository and recommend the best deployment strategy
20 | ```
21 |
22 | The `LLM_CONTEXT.md` file provides:
23 |
24 | - Complete tool descriptions and parameters
25 | - Usage examples for all 45 tools
26 | - Common workflow patterns
27 | - Memory system documentation
28 | - Phase 3 code-to-docs sync capabilities
29 |
30 | **Location**: `/LLM_CONTEXT.md` in the root of your project
31 |
32 | This ensures your AI assistant has full context about DocuMCP's capabilities and can provide more accurate recommendations.
33 |
34 | ## Quick Start
35 |
36 | DocuMCP responds to natural language prompts. Here are the most common patterns:
37 |
38 | ### Basic Analysis
39 |
40 | ```
41 | analyze my repository for documentation needs
42 | ```
43 |
44 | ### Get Recommendations
45 |
46 | ```
47 | what static site generator should I use for my project?
48 | ```
49 |
50 | ### Deploy Documentation
51 |
52 | ```
53 | set up GitHub Pages deployment for my docs
54 | ```
55 |
56 | ## Available Tools
57 |
58 | DocuMCP provides several tools you can invoke through natural prompts:
59 |
60 | ### 1. Repository Analysis
61 |
62 | **Purpose**: Analyze your project structure, dependencies, and documentation needs.
63 |
64 | **Example Prompts**:
65 |
66 | - "Analyze my repository structure"
67 | - "What documentation gaps do I have?"
68 | - "Examine my project for documentation opportunities"
69 |
70 | **What it returns**: Project analysis with language detection, dependency mapping, and complexity assessment.
71 |
72 | ### 2. SSG Recommendations
73 |
74 | **Purpose**: Get intelligent static site generator recommendations based on your project.
75 |
76 | **Example Prompts**:
77 |
78 | - "Recommend a static site generator for my TypeScript project"
79 | - "Which SSG works best with my Python documentation?"
80 | - "Compare documentation tools for my project"
81 |
82 | **What it returns**: Weighted recommendations with justifications for Jekyll, Hugo, Docusaurus, MkDocs, or Eleventy.
83 |
84 | ### 3. Configuration Generation
85 |
86 | **Purpose**: Generate SSG-specific configuration files.
87 |
88 | **Example Prompts**:
89 |
90 | - "Generate a Hugo config for my project"
91 | - "Create MkDocs configuration files"
92 | - "Set up Docusaurus for my documentation"
93 |
94 | **What it returns**: Ready-to-use configuration files optimized for your project.
95 |
96 | ### 4. Documentation Structure
97 |
98 | **Purpose**: Create Diataxis-compliant documentation structure.
99 |
100 | **Example Prompts**:
101 |
102 | - "Set up documentation structure following Diataxis"
103 | - "Create organized docs folders for my project"
104 | - "Build a comprehensive documentation layout"
105 |
106 | **What it returns**: Organized folder structure with templates following documentation best practices.
107 |
108 | ### 5. GitHub Pages Deployment
109 |
110 | **Purpose**: Automate GitHub Pages deployment workflows.
111 |
112 | **Example Prompts**:
113 |
114 | - "Deploy my docs to GitHub Pages"
115 | - "Set up automated documentation deployment"
116 | - "Create GitHub Actions for my documentation site"
117 |
118 | **What it returns**: GitHub Actions workflows configured for your chosen SSG.
119 |
120 | ### 6. Deployment Verification
121 |
122 | **Purpose**: Verify and troubleshoot GitHub Pages deployments.
123 |
124 | **Example Prompts**:
125 |
126 | - "Check if my GitHub Pages deployment is working"
127 | - "Troubleshoot my documentation deployment"
128 | - "Verify my docs site is live"
129 |
130 | **What it returns**: Deployment status and troubleshooting recommendations.
131 |
132 | ## Advanced Prompting Techniques
133 |
134 | ### Chained Operations
135 |
136 | You can chain multiple operations in a single conversation:
137 |
138 | ```
139 | 1. First analyze my repository
140 | 2. Then recommend the best SSG
141 | 3. Finally set up the deployment workflow
142 | ```
143 |
144 | ### Specific Requirements
145 |
146 | Be specific about your needs:
147 |
148 | ```
149 | I need a documentation site that:
150 | - Works with TypeScript
151 | - Supports API documentation
152 | - Has good search functionality
153 | - Deploys automatically on commits
154 | ```
155 |
156 | ### Context-Aware Requests
157 |
158 | Reference previous analysis:
159 |
160 | ```
161 | Based on the analysis you just did, create the documentation structure and deploy it to GitHub Pages
162 | ```
163 |
164 | ## Best Practices
165 |
166 | ### 1. Start with Analysis
167 |
168 | Always begin with repository analysis to get tailored recommendations:
169 |
170 | ```
171 | analyze my project for documentation needs
172 | ```
173 |
174 | ### 2. Be Specific About Goals
175 |
176 | Tell DocuMCP what you're trying to achieve:
177 |
178 | - "I need developer documentation for my API"
179 | - "I want user guides for my application"
180 | - "I need project documentation for contributors"
181 |
182 | ### 3. Specify Constraints
183 |
184 | Mention any limitations or preferences:
185 |
186 | - "I prefer minimal setup"
187 | - "I need something that works with our CI/CD pipeline"
188 | - "I want to use our existing design system"
189 |
190 | ### 4. Ask for Explanations
191 |
192 | Request reasoning behind recommendations:
193 |
194 | ```
195 | why did you recommend Hugo over Jekyll for my project?
196 | ```
197 |
198 | ### 5. Iterate and Refine
199 |
200 | Use follow-up prompts to refine results:
201 |
202 | ```
203 | can you modify the GitHub Actions workflow to also run tests?
204 | ```
205 |
206 | ## Common Workflows
207 |
208 | ### Complete Documentation Setup
209 |
210 | ```
211 | 1. "Analyze my repository for documentation needs"
212 | 2. "Recommend the best static site generator for my project"
213 | 3. "Generate configuration files for the recommended SSG"
214 | 4. "Set up Diataxis-compliant documentation structure"
215 | 5. "Deploy everything to GitHub Pages"
216 | ```
217 |
218 | ### Documentation Audit
219 |
220 | ```
221 | 1. "Analyze my existing documentation"
222 | 2. "What gaps do you see in my current docs?"
223 | 3. "How can I improve my documentation structure?"
224 | ```
225 |
226 | ### Deployment Troubleshooting
227 |
228 | ```
229 | 1. "My GitHub Pages site isn't working"
230 | 2. "Check my deployment configuration"
231 | 3. "Help me fix the build errors"
232 | ```
233 |
234 | ## Memory and Context
235 |
236 | DocuMCP remembers context within a conversation, so you can:
237 |
238 | - Reference previous analysis results
239 | - Build on earlier recommendations
240 | - Chain operations together seamlessly
241 |
242 | Example conversation flow:
243 |
244 | ```
245 | User: "analyze my repository"
246 | DocuMCP: [provides analysis]
247 | User: "based on that analysis, what SSG do you recommend?"
248 | DocuMCP: [provides recommendation using analysis context]
249 | User: "set it up with that recommendation"
250 | DocuMCP: [configures the recommended SSG]
251 | ```
252 |
253 | ## Troubleshooting Prompts
254 |
255 | If you're not getting the results you expect, try:
256 |
257 | ### More Specific Prompts
258 |
259 | Instead of: "help with docs"
260 | Try: "analyze my TypeScript project and recommend documentation tools"
261 |
262 | ### Context Setting
263 |
264 | Instead of: "set up deployment"
265 | Try: "set up GitHub Pages deployment for the MkDocs site we just configured"
266 |
267 | ### Direct Tool Requests
268 |
269 | If you know exactly what you want:
270 |
271 | - "use the analyze_repository tool on my current directory"
272 | - "run the recommend_ssg tool with my project data"
273 |
274 | ## Getting Help
275 |
276 | If you need assistance with prompting:
277 |
278 | - Ask DocuMCP to explain available tools: "what can you help me with?"
279 | - Request examples: "show me example prompts for documentation setup"
280 | - Ask for clarification: "I don't understand the recommendation, can you explain?"
281 |
282 | Remember: DocuMCP is designed to understand natural language, so don't hesitate to ask questions in your own words!
283 |
```
--------------------------------------------------------------------------------
/docs/tutorials/memory-workflows.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.973Z"
4 | last_validated: "2025-12-09T19:41:38.604Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # Memory Workflows and Advanced Features
11 |
12 | This tutorial covers DocuMCP's memory system and advanced workflow features for intelligent documentation management.
13 |
14 | ## Overview
15 |
16 | DocuMCP includes a sophisticated memory system that learns from your documentation patterns and provides intelligent assistance:
17 |
18 | ### Memory System Features
19 |
20 | - **Historical Analysis**: Learns from past documentation projects
21 | - **User Preferences**: Adapts to your documentation style
22 | - **Pattern Recognition**: Identifies successful documentation patterns
23 | - **Smart Recommendations**: Provides context-aware suggestions
24 |
25 | ### Advanced Workflows
26 |
27 | - **Multi-Project Memory**: Share insights across projects
28 | - **Collaborative Learning**: Learn from team documentation patterns
29 | - **Automated Optimization**: Continuously improve documentation quality
30 |
31 | ## Getting Started with Memory
32 |
33 | ### Initial Setup
34 |
35 | ```bash
36 | # Initialize memory system:
37 | "initialize memory system for my documentation workflow"
38 | ```
39 |
40 | ### Basic Memory Operations
41 |
42 | ```bash
43 | # Store analysis results:
44 | "store this analysis in memory for future reference"
45 |
46 | # Recall similar projects:
47 | "find similar projects in my memory"
48 |
49 | # Update preferences:
50 | "update my documentation preferences based on this project"
51 | ```
52 |
53 | ## Memory System Architecture
54 |
55 | ### Memory Components
56 |
57 | 1. **Analysis Memory**: Stores repository analysis results
58 | 2. **Recommendation Memory**: Tracks SSG recommendation patterns
59 | 3. **Deployment Memory**: Records deployment success patterns
60 | 4. **User Preference Memory**: Learns individual preferences
61 |
62 | ### Memory Storage
63 |
64 | ```yaml
65 | # Memory configuration
66 | memory:
67 | storage_path: ".documcp/memory"
68 | retention_policy: "keep_all"
69 | backup_enabled: true
70 | compression: true
71 | ```
72 |
73 | ## Advanced Memory Features
74 |
75 | ### Contextual Retrieval
76 |
77 | ```bash
78 | # Find relevant memories:
79 | "find memories related to TypeScript documentation projects"
80 |
81 | # Get contextual suggestions:
82 | "get suggestions based on my previous documentation patterns"
83 | ```
84 |
85 | ### Pattern Learning
86 |
87 | ```bash
88 | # Learn from successful deployments:
89 | "learn from successful documentation deployments"
90 |
91 | # Identify patterns:
92 | "identify successful documentation patterns in my memory"
93 | ```
94 |
95 | ### Collaborative Memory
96 |
97 | ```bash
98 | # Share memories with team:
99 | "share documentation patterns with my team"
100 |
101 | # Import team memories:
102 | "import documentation patterns from team members"
103 | ```
104 |
105 | ## Memory Workflow Examples
106 |
107 | ### Project Analysis Workflow
108 |
109 | ```bash
110 | # Complete analysis with memory integration:
111 | "analyze my repository and store results in memory for future reference"
112 | ```
113 |
114 | This workflow:
115 |
116 | 1. Analyzes the current repository
117 | 2. Compares with similar projects in memory
118 | 3. Provides enhanced recommendations
119 | 4. Stores results for future reference
120 |
121 | ### Recommendation Workflow
122 |
123 | ```bash
124 | # Get memory-enhanced recommendations:
125 | "recommend SSG based on my memory and current project"
126 | ```
127 |
128 | This workflow:
129 |
130 | 1. Retrieves relevant memories
131 | 2. Applies learned patterns
132 | 3. Provides personalized recommendations
133 | 4. Updates memory with results
134 |
135 | ### Deployment Workflow
136 |
137 | ```bash
138 | # Deploy with memory insights:
139 | "deploy documentation using insights from my memory"
140 | ```
141 |
142 | This workflow:
143 |
144 | 1. Applies learned deployment patterns
145 | 2. Uses successful configuration templates
146 | 3. Monitors for known issues
147 | 4. Records results for future learning
148 |
149 | ## Memory Management
150 |
151 | ### Memory Operations
152 |
153 | ```bash
154 | # List all memories:
155 | "list all memories in my system"
156 |
157 | # Search memories:
158 | "search memories for 'React documentation'"
159 |
160 | # Export memories:
161 | "export my documentation memories"
162 |
163 | # Import memories:
164 | "import documentation memories from file"
165 | ```
166 |
167 | ### Memory Optimization
168 |
169 | ```bash
170 | # Optimize memory storage:
171 | "optimize memory storage and remove duplicates"
172 |
173 | # Clean up old memories:
174 | "clean up memories older than 6 months"
175 |
176 | # Compress memory:
177 | "compress memory storage for efficiency"
178 | ```
179 |
180 | ## Advanced Workflow Patterns
181 |
182 | ### Multi-Project Memory Sharing
183 |
184 | ```bash
185 | # Set up project memory sharing:
186 | "set up memory sharing between my projects"
187 | ```
188 |
189 | ### Team Collaboration
190 |
191 | ```bash
192 | # Enable team memory sharing:
193 | "enable team memory sharing for documentation patterns"
194 | ```
195 |
196 | ### Automated Learning
197 |
198 | ```bash
199 | # Enable automated learning:
200 | "enable automated learning from documentation patterns"
201 | ```
202 |
203 | ## Memory Analytics
204 |
205 | ### Memory Insights
206 |
207 | ```bash
208 | # Get memory insights:
209 | "provide insights from my documentation memory"
210 | ```
211 |
212 | ### Success Pattern Analysis
213 |
214 | ```bash
215 | # Analyze success patterns:
216 | "analyze successful documentation patterns in my memory"
217 | ```
218 |
219 | ### Performance Tracking
220 |
221 | ```bash
222 | # Track memory performance:
223 | "track performance of memory-enhanced recommendations"
224 | ```
225 |
226 | ## Troubleshooting Memory Issues
227 |
228 | ### Common Problems
229 |
230 | **Problem**: Memory not loading
231 | **Solution**: Check memory file permissions and integrity
232 |
233 | **Problem**: Slow memory operations
234 | **Solution**: Optimize memory storage and clean up old data
235 |
236 | **Problem**: Inconsistent recommendations
237 | **Solution**: Review memory data quality and patterns
238 |
239 | ### Memory Debugging
240 |
241 | ```bash
242 | # Debug memory issues:
243 | "debug memory system problems"
244 |
245 | # Validate memory integrity:
246 | "validate memory data integrity"
247 |
248 | # Reset memory system:
249 | "reset memory system to defaults"
250 | ```
251 |
252 | ## Best Practices
253 |
254 | ### Memory Management
255 |
256 | 1. **Regular Backups**: Backup memory data regularly
257 | 2. **Quality Control**: Review and clean memory data
258 | 3. **Privacy**: Be mindful of sensitive data in memories
259 | 4. **Performance**: Monitor memory system performance
260 | 5. **Documentation**: Document your memory workflows
261 |
262 | ### Workflow Optimization
263 |
264 | 1. **Consistent Patterns**: Use consistent documentation patterns
265 | 2. **Regular Updates**: Update memory with new learnings
266 | 3. **Team Sharing**: Share successful patterns with team
267 | 4. **Continuous Learning**: Enable continuous learning features
268 | 5. **Performance Monitoring**: Monitor workflow performance
269 |
270 | ## Memory System Integration
271 |
272 | ### With Documentation Tools
273 |
274 | - **Repository Analysis**: Enhanced with historical data
275 | - **SSG Recommendations**: Improved with pattern learning
276 | - **Deployment Automation**: Optimized with success patterns
277 | - **Content Generation**: Informed by previous content
278 |
279 | ### With External Systems
280 |
281 | - **CI/CD Integration**: Memory-aware deployment pipelines
282 | - **Analytics Integration**: Memory-enhanced performance tracking
283 | - **Team Tools**: Collaborative memory sharing
284 | - **Backup Systems**: Automated memory backup
285 |
286 | ## Advanced Configuration
287 |
288 | ### Memory Configuration
289 |
290 | ```yaml
291 | # Advanced memory configuration
292 | memory:
293 | storage:
294 | type: "local"
295 | path: ".documcp/memory"
296 | encryption: true
297 |
298 | learning:
299 | enabled: true
300 | auto_update: true
301 | pattern_detection: true
302 |
303 | sharing:
304 | team_enabled: false
305 | project_sharing: true
306 | export_format: "json"
307 | ```
308 |
309 | ### Performance Tuning
310 |
311 | ```bash
312 | # Tune memory performance:
313 | "optimize memory system performance"
314 | ```
315 |
316 | ## Next Steps
317 |
318 | - [How-to Guides](../how-to/)
319 | - [Reference Documentation](../reference/)
320 | - [Architecture Explanation](../explanation/)
321 | - [Advanced Configuration](../reference/configuration.md)
322 |
```
--------------------------------------------------------------------------------
/docs/how-to/documentation-freshness-tracking.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.950Z"
4 | last_validated: "2025-12-09T19:41:38.581Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # How to Track Documentation Freshness
11 |
12 | This guide shows you how to use DocuMCP's documentation freshness tracking system to monitor and maintain up-to-date documentation.
13 |
14 | ## Quick Start
15 |
16 | ```bash
17 | # Initialize freshness tracking:
18 | "validate documentation freshness for my docs directory"
19 |
20 | # Check freshness status:
21 | "track documentation freshness"
22 | ```
23 |
24 | ## Overview
25 |
26 | Documentation freshness tracking helps you:
27 |
28 | - **Identify stale documentation**: Find files that haven't been updated recently
29 | - **Maintain quality**: Ensure documentation stays current with code changes
30 | - **Track history**: Monitor documentation updates over time via knowledge graph integration
31 | - **Automate maintenance**: Set up workflows for regular freshness checks
32 |
33 | ## Initial Setup
34 |
35 | ### Step 1: Initialize Freshness Metadata
36 |
37 | Before tracking freshness, you need to initialize metadata for your documentation files:
38 |
39 | ```json
40 | {
41 | "docsPath": "/path/to/docs",
42 | "projectPath": "/path/to/project",
43 | "initializeMissing": true,
44 | "validateAgainstGit": true
45 | }
46 | ```
47 |
48 | This will:
49 |
50 | - Create freshness metadata for all documentation files
51 | - Set initial timestamps based on file modification dates
52 | - Link metadata to git history (if available)
53 |
54 | ### Step 2: Verify Initialization
55 |
56 | Check that metadata was created successfully:
57 |
58 | ```bash
59 | # Track freshness to see initialized files:
60 | "track documentation freshness for my docs"
61 | ```
62 |
63 | You should see all files marked as "fresh" initially.
64 |
65 | ## Regular Freshness Checks
66 |
67 | ### Basic Tracking
68 |
69 | Run regular freshness checks to monitor documentation staleness:
70 |
71 | ```json
72 | {
73 | "docsPath": "/path/to/docs",
74 | "includeFileList": true
75 | }
76 | ```
77 |
78 | ### Using Presets
79 |
80 | DocuMCP provides convenient presets for different update frequencies:
81 |
82 | - **realtime**: For documentation that changes frequently (minutes/hours)
83 | - **active**: For actively maintained docs (days)
84 | - **recent**: For recently updated docs (weeks)
85 | - **weekly**: For weekly review cycles
86 | - **monthly**: For monthly maintenance (default)
87 | - **quarterly**: For quarterly reviews
88 |
89 | ```json
90 | {
91 | "docsPath": "/path/to/docs",
92 | "preset": "monthly"
93 | }
94 | ```
95 |
96 | ### Custom Thresholds
97 |
98 | Define your own staleness thresholds:
99 |
100 | ```json
101 | {
102 | "docsPath": "/path/to/docs",
103 | "warningThreshold": {
104 | "value": 7,
105 | "unit": "days"
106 | },
107 | "staleThreshold": {
108 | "value": 30,
109 | "unit": "days"
110 | },
111 | "criticalThreshold": {
112 | "value": 90,
113 | "unit": "days"
114 | }
115 | }
116 | ```
117 |
118 | ## Understanding Freshness Levels
119 |
120 | ### Fresh ✅
121 |
122 | Files updated within the warning threshold (default: 7 days)
123 |
124 | ### Warning 🟡
125 |
126 | Files older than warning threshold but newer than stale threshold (7-30 days)
127 |
128 | ### Stale 🟠
129 |
130 | Files older than stale threshold but newer than critical threshold (30-90 days)
131 |
132 | ### Critical 🔴
133 |
134 | Files older than critical threshold (90+ days)
135 |
136 | ### Unknown ❓
137 |
138 | Files without freshness metadata (need initialization)
139 |
140 | ## Workflow Examples
141 |
142 | ### Weekly Documentation Review
143 |
144 | ```bash
145 | # 1. Check freshness status
146 | "track documentation freshness with preset weekly"
147 |
148 | # 2. Review stale files and update as needed
149 | # (manually update documentation)
150 |
151 | # 3. Validate freshness after updates
152 | "validate documentation freshness and update existing metadata"
153 | ```
154 |
155 | ### After Major Code Changes
156 |
157 | ```bash
158 | # 1. Update documentation to reflect code changes
159 | # (manually update files)
160 |
161 | # 2. Validate freshness against git
162 | "validate documentation freshness with git validation"
163 |
164 | # 3. Track updated status
165 | "track documentation freshness"
166 | ```
167 |
168 | ### Automated CI/CD Integration
169 |
170 | Add freshness checks to your CI/CD pipeline:
171 |
172 | ```yaml
173 | # .github/workflows/docs-freshness.yml
174 | - name: Check Documentation Freshness
175 | run: |
176 | documcp track_documentation_freshness \
177 | --docsPath ./docs \
178 | --preset monthly \
179 | --failOnStale true
180 | ```
181 |
182 | ## Advanced Usage
183 |
184 | ### Knowledge Graph Integration
185 |
186 | Freshness tracking events are automatically stored in the knowledge graph:
187 |
188 | ```json
189 | {
190 | "docsPath": "/path/to/docs",
191 | "projectPath": "/path/to/project",
192 | "storeInKG": true
193 | }
194 | ```
195 |
196 | This enables:
197 |
198 | - Historical analysis of documentation updates
199 | - Pattern recognition across projects
200 | - Intelligent recommendations based on past behavior
201 |
202 | ### Sorting and Filtering
203 |
204 | Customize how files are displayed:
205 |
206 | ```json
207 | {
208 | "docsPath": "/path/to/docs",
209 | "sortBy": "staleness", // Options: "age", "path", "staleness"
210 | "includeFileList": true
211 | }
212 | ```
213 |
214 | ### Git Integration
215 |
216 | Validate freshness against git history:
217 |
218 | ```json
219 | {
220 | "docsPath": "/path/to/docs",
221 | "projectPath": "/path/to/project",
222 | "validateAgainstGit": true
223 | }
224 | ```
225 |
226 | This compares file modification times with git commit history for more accurate staleness detection.
227 |
228 | ## Best Practices
229 |
230 | ### 1. Initialize Early
231 |
232 | Set up freshness tracking when you first create documentation:
233 |
234 | ```bash
235 | "initialize freshness tracking for my new documentation"
236 | ```
237 |
238 | ### 2. Regular Checks
239 |
240 | Schedule regular freshness checks:
241 |
242 | - Weekly for active projects
243 | - Monthly for stable projects
244 | - Quarterly for archived documentation
245 |
246 | ### 3. Update Thresholds
247 |
248 | Adjust thresholds based on your project's update frequency:
249 |
250 | - Active projects: 7/30/90 days
251 | - Stable projects: 30/90/180 days
252 | - Archived docs: 90/180/365 days
253 |
254 | ### 4. Integrate with Workflows
255 |
256 | Combine freshness tracking with other DocuMCP tools:
257 |
258 | ```bash
259 | # Check freshness → Update stale docs → Validate → Deploy
260 | "track documentation freshness, then update stale files, validate, and deploy"
261 | ```
262 |
263 | ### 5. Monitor Trends
264 |
265 | Use knowledge graph insights to identify patterns:
266 |
267 | ```bash
268 | # Get freshness insights from knowledge graph
269 | "get insights about documentation freshness trends"
270 | ```
271 |
272 | ## Troubleshooting
273 |
274 | ### Problem: All files show as "unknown"
275 |
276 | **Solution**: Run `validate_documentation_freshness` with `initializeMissing: true`
277 |
278 | ### Problem: Freshness not updating after file changes
279 |
280 | **Solution**: Run `validate_documentation_freshness` with `updateExisting: true`
281 |
282 | ### Problem: Git validation failing
283 |
284 | **Solution**: Ensure `projectPath` points to git repository root and git is initialized
285 |
286 | ### Problem: Thresholds not working as expected
287 |
288 | **Solution**: Check that threshold values are positive numbers and units match your needs
289 |
290 | ## Integration with Other Tools
291 |
292 | ### With Sitemap Management
293 |
294 | ```bash
295 | # Track freshness → Generate sitemap → Deploy
296 | "track documentation freshness, then generate sitemap and deploy"
297 | ```
298 |
299 | ### With Content Validation
300 |
301 | ```bash
302 | # Validate freshness → Validate content → Check links
303 | "validate documentation freshness, then validate content and check links"
304 | ```
305 |
306 | ### With Gap Detection
307 |
308 | ```bash
309 | # Detect gaps → Track freshness → Update documentation
310 | "detect documentation gaps, track freshness, and update stale files"
311 | ```
312 |
313 | ## Next Steps
314 |
315 | - [Site Monitoring](site-monitoring.md) - Monitor your documentation site health
316 | - [SEO Optimization](seo-optimization.md) - Improve search engine visibility
317 | - [Performance Optimization](performance-optimization.md) - Optimize documentation performance
318 | - [MCP Tools Reference](../reference/mcp-tools.md#documentation-freshness-tracking-tools) - Complete API reference for freshness tracking tools
319 |
```
--------------------------------------------------------------------------------
/docs/how-to/analytics-setup.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.949Z"
4 | last_validated: "2025-12-09T19:41:38.580Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | validated_against_commit: 306567b32114502c606244ad6c2930360bcd4201
8 | ---
9 |
10 | # How to Use DocuMCP Deployment Analytics
11 |
12 | This guide shows you how to access and use DocuMCP's built-in deployment analytics to track your documentation deployment success and patterns.
13 |
14 | ## Quick Setup
15 |
16 | ```bash
17 | # Analyze deployment patterns:
18 | "analyze my deployment history and provide insights"
19 | ```
20 |
21 | ## Analytics Overview
22 |
23 | DocuMCP provides comprehensive **deployment analytics** to help you understand and optimize your documentation deployment process:
24 |
25 | ### Analytics Types
26 |
27 | - **Deployment Success Tracking**: Monitor deployment success/failure rates
28 | - **SSG Performance Analytics**: Compare static site generator effectiveness
29 | - **Build Time Metrics**: Track deployment speed and optimization opportunities
30 | - **Project Pattern Analysis**: Understand which configurations work best
31 |
32 | ### Built-in Analytics Features
33 |
34 | - **Deployment Health Scoring**: 0-100 health score for your deployment pipeline
35 | - **SSG Comparison**: Compare success rates across different static site generators
36 | - **Trend Analysis**: Track deployment patterns over time
37 | - **Knowledge Graph Integration**: Learn from deployment history for better recommendations
38 |
39 | ## Using Deployment Analytics
40 |
41 | ### Method 1: Generate Full Analytics Report
42 |
43 | ```bash
44 | # Get comprehensive deployment analytics:
45 | "analyze my deployments and provide a full report"
46 | ```
47 |
48 | This will provide:
49 |
50 | 1. Overall deployment success rates
51 | 2. SSG performance comparison
52 | 3. Build time analysis
53 | 4. Project pattern insights
54 | 5. Recommendations for optimization
55 |
56 | ### Method 2: Specific Analytics Queries
57 |
58 | #### Get SSG Statistics
59 |
60 | ```bash
61 | # Analyze specific SSG performance:
62 | "show me statistics for Docusaurus deployments"
63 | ```
64 |
65 | #### Compare SSG Performance
66 |
67 | ```bash
68 | # Compare multiple SSGs:
69 | "compare deployment success rates between Hugo and Jekyll"
70 | ```
71 |
72 | #### Get Deployment Health Score
73 |
74 | ```bash
75 | # Check deployment pipeline health:
76 | "what is my deployment health score?"
77 | ```
78 |
79 | #### Analyze Deployment Trends
80 |
81 | ```bash
82 | # View deployment trends over time:
83 | "show me deployment trends for the last 30 days"
84 | ```
85 |
86 | ## Deployment Analytics Examples
87 |
88 | ### Sample Analytics Report
89 |
90 | ```typescript
91 | // Example deployment analytics report structure
92 | {
93 | "summary": {
94 | "totalProjects": 15,
95 | "totalDeployments": 42,
96 | "overallSuccessRate": 0.85,
97 | "mostUsedSSG": "docusaurus",
98 | "mostSuccessfulSSG": "hugo"
99 | },
100 | "patterns": [
101 | {
102 | "ssg": "docusaurus",
103 | "totalDeployments": 18,
104 | "successfulDeployments": 16,
105 | "failedDeployments": 2,
106 | "successRate": 0.89,
107 | "averageBuildTime": 45000,
108 | "projectCount": 8
109 | }
110 | ],
111 | "insights": [
112 | {
113 | "type": "success",
114 | "title": "High Success Rate",
115 | "description": "Excellent! 85% of deployments succeed"
116 | }
117 | ]
118 | }
119 | ```
120 |
121 | ### Health Score Breakdown
122 |
123 | ```typescript
124 | // Example health score analysis
125 | {
126 | "score": 78,
127 | "factors": [
128 | {
129 | "name": "Overall Success Rate",
130 | "impact": 34,
131 | "status": "good"
132 | },
133 | {
134 | "name": "Active Projects",
135 | "impact": 20,
136 | "status": "good"
137 | },
138 | {
139 | "name": "Deployment Activity",
140 | "impact": 15,
141 | "status": "warning"
142 | },
143 | {
144 | "name": "SSG Diversity",
145 | "impact": 9,
146 | "status": "warning"
147 | }
148 | ]
149 | }
150 | ```
151 |
152 | ### MCP Tool Integration
153 |
154 | ```typescript
155 | // Using the analyze_deployments MCP tool directly
156 | import { analyzeDeployments } from "./dist/tools/analyze-deployments.js";
157 |
158 | // Get full analytics report
159 | const report = await analyzeDeployments({
160 | analysisType: "full_report",
161 | });
162 |
163 | // Get specific SSG statistics
164 | const docusaurusStats = await analyzeDeployments({
165 | analysisType: "ssg_stats",
166 | ssg: "docusaurus",
167 | });
168 |
169 | // Compare multiple SSGs
170 | const comparison = await analyzeDeployments({
171 | analysisType: "compare",
172 | ssgs: ["hugo", "jekyll", "docusaurus"],
173 | });
174 |
175 | // Get deployment health score
176 | const health = await analyzeDeployments({
177 | analysisType: "health",
178 | });
179 | ```
180 |
181 | ## Advanced Deployment Analytics
182 |
183 | ### Deployment Pattern Analysis
184 |
185 | ```bash
186 | # Analyze deployment patterns by technology:
187 | "show me deployment success patterns for TypeScript projects"
188 |
189 | # Analyze by project size:
190 | "compare deployment success rates for small vs large projects"
191 |
192 | # Analyze by team size:
193 | "show deployment patterns for different team sizes"
194 | ```
195 |
196 | ### Knowledge Graph Insights
197 |
198 | ```bash
199 | # Get insights from deployment history:
200 | "what SSG works best for React projects based on deployment history?"
201 |
202 | # Learn from similar projects:
203 | "recommend deployment strategy based on similar successful projects"
204 |
205 | # Analyze failure patterns:
206 | "what are the common causes of deployment failures?"
207 | ```
208 |
209 | ### Trend Analysis
210 |
211 | ```bash
212 | # Analyze deployment trends:
213 | "show me deployment success trends over the last 6 months"
214 |
215 | # Compare time periods:
216 | "compare deployment performance between Q3 and Q4"
217 |
218 | # Identify improvement opportunities:
219 | "what deployment metrics have improved recently?"
220 | ```
221 |
222 | ## Troubleshooting
223 |
224 | ### Common Issues
225 |
226 | **Problem**: No deployment data available
227 | **Solution**: Deploy at least one project to start collecting analytics data
228 |
229 | **Problem**: Analytics tool returns empty results
230 | **Solution**: Ensure knowledge graph storage directory exists and has proper permissions
231 |
232 | **Problem**: Health score seems low
233 | **Solution**: Review deployment failures and optimize SSG configurations
234 |
235 | **Problem**: Missing deployment history
236 | **Solution**: Check that deployment tracking is enabled in knowledge graph
237 |
238 | ### Analytics Debugging
239 |
240 | ```bash
241 | # Debug deployment analytics issues:
242 | "check my deployment analytics configuration and data availability"
243 | ```
244 |
245 | ## Best Practices
246 |
247 | ### Deployment Analytics Guidelines
248 |
249 | 1. **Regular Deployments**: Deploy frequently to build meaningful analytics data
250 | 2. **Track Failures**: Learn from deployment failures to improve success rates
251 | 3. **Monitor Trends**: Review analytics weekly to identify patterns
252 | 4. **Compare SSGs**: Use analytics to choose the best SSG for each project type
253 | 5. **Health Monitoring**: Keep deployment health score above 70
254 |
255 | ### Data Quality
256 |
257 | 1. **Consistent Tracking**: Ensure all deployments are tracked in knowledge graph
258 | 2. **Clean Data**: Review and clean up failed deployment records periodically
259 | 3. **Regular Analysis**: Run analytics reports monthly to identify trends
260 | 4. **Documentation**: Document deployment patterns and insights
261 | 5. **Team Sharing**: Share analytics insights with your development team
262 |
263 | ## Deployment Analytics Tools
264 |
265 | ### Built-in DocuMCP Analytics
266 |
267 | - **Deployment success tracking**: Monitor success/failure rates
268 | - **SSG performance analysis**: Compare static site generator effectiveness
269 | - **Build time metrics**: Track deployment speed and optimization opportunities
270 | - **Knowledge graph insights**: Learn from deployment history patterns
271 |
272 | ### MCP Tools Available
273 |
274 | - `analyze_deployments`: Generate comprehensive deployment analytics
275 | - `deploy_pages`: Track deployment attempts and outcomes
276 | - `recommend_ssg`: Get SSG recommendations based on analytics
277 |
278 | ## Next Steps
279 |
280 | - [Deploy Pages](../reference/mcp-tools.md#deploy_pages)
281 | - [SSG Recommendations](../reference/mcp-tools.md#recommend_ssg)
282 | - [Knowledge Graph](../knowledge-graph.md)
283 | - [Troubleshooting](troubleshooting.md)
284 |
```
--------------------------------------------------------------------------------
/src/tools/verify-deployment.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { z } from "zod";
2 | import { promises as fs } from "fs";
3 | import path from "path";
4 | import { MCPToolResponse, formatMCPResponse } from "../types/api.js";
5 |
6 | const inputSchema = z.object({
7 | repository: z.string(),
8 | url: z.string().optional(),
9 | });
10 |
11 | interface DeploymentCheck {
12 | check: string;
13 | status: "pass" | "fail" | "warning";
14 | message: string;
15 | recommendation?: string;
16 | }
17 |
18 | export async function verifyDeployment(
19 | args: unknown,
20 | ): Promise<{ content: any[] }> {
21 | const startTime = Date.now();
22 | const { repository, url } = inputSchema.parse(args);
23 |
24 | try {
25 | const checks: DeploymentCheck[] = [];
26 |
27 | // Determine repository path
28 | const repoPath = repository.startsWith("http") ? "." : repository;
29 |
30 | // Check 1: GitHub Actions workflow exists
31 | const workflowPath = path.join(repoPath, ".github", "workflows");
32 | try {
33 | const workflows = await fs.readdir(workflowPath);
34 | const deployWorkflow = workflows.find(
35 | (f) =>
36 | f.includes("deploy") || f.includes("pages") || f.includes("docs"),
37 | );
38 |
39 | if (deployWorkflow) {
40 | checks.push({
41 | check: "GitHub Actions Workflow",
42 | status: "pass",
43 | message: `Found deployment workflow: ${deployWorkflow}`,
44 | });
45 | } else {
46 | checks.push({
47 | check: "GitHub Actions Workflow",
48 | status: "fail",
49 | message: "No deployment workflow found",
50 | recommendation: "Run deploy_pages tool to create a workflow",
51 | });
52 | }
53 | } catch {
54 | checks.push({
55 | check: "GitHub Actions Workflow",
56 | status: "fail",
57 | message: "No .github/workflows directory found",
58 | recommendation: "Run deploy_pages tool to set up GitHub Actions",
59 | });
60 | }
61 |
62 | // Check 2: Documentation source files exist
63 | const docsPaths = ["docs", "documentation", "site", "content"];
64 | let docsFound = false;
65 |
66 | for (const docsPath of docsPaths) {
67 | try {
68 | const fullPath = path.join(repoPath, docsPath);
69 | const stats = await fs.stat(fullPath);
70 | if (stats.isDirectory()) {
71 | const files = await fs.readdir(fullPath);
72 | const mdFiles = files.filter(
73 | (f) => f.endsWith(".md") || f.endsWith(".mdx"),
74 | );
75 |
76 | if (mdFiles.length > 0) {
77 | docsFound = true;
78 | checks.push({
79 | check: "Documentation Source Files",
80 | status: "pass",
81 | message: `Found ${mdFiles.length} documentation files in ${docsPath}/`,
82 | });
83 | break;
84 | }
85 | }
86 | } catch {
87 | // Directory doesn't exist, continue checking
88 | }
89 | }
90 |
91 | if (!docsFound) {
92 | checks.push({
93 | check: "Documentation Source Files",
94 | status: "warning",
95 | message: "No documentation files found in standard locations",
96 | recommendation:
97 | "Run setup_structure tool to create documentation structure",
98 | });
99 | }
100 |
101 | // Check 3: Configuration files
102 | const configPatterns = [
103 | "docusaurus.config.js",
104 | "mkdocs.yml",
105 | "hugo.toml",
106 | "hugo.yaml",
107 | "_config.yml",
108 | ".eleventy.js",
109 | ];
110 |
111 | let configFound = false;
112 | for (const config of configPatterns) {
113 | try {
114 | await fs.access(path.join(repoPath, config));
115 | configFound = true;
116 | checks.push({
117 | check: "SSG Configuration",
118 | status: "pass",
119 | message: `Found configuration file: ${config}`,
120 | });
121 | break;
122 | } catch {
123 | // File doesn't exist, continue
124 | }
125 | }
126 |
127 | if (!configFound) {
128 | checks.push({
129 | check: "SSG Configuration",
130 | status: "fail",
131 | message: "No static site generator configuration found",
132 | recommendation: "Run generate_config tool to create SSG configuration",
133 | });
134 | }
135 |
136 | // Check 4: Build output directory
137 | const buildDirs = ["_site", "build", "dist", "public", "out"];
138 | let buildFound = false;
139 |
140 | for (const buildDir of buildDirs) {
141 | try {
142 | const buildPath = path.join(repoPath, buildDir);
143 | const stats = await fs.stat(buildPath);
144 | if (stats.isDirectory()) {
145 | buildFound = true;
146 | checks.push({
147 | check: "Build Output",
148 | status: "pass",
149 | message: `Found build output directory: ${buildDir}/`,
150 | });
151 | break;
152 | }
153 | } catch {
154 | // Directory doesn't exist
155 | }
156 | }
157 |
158 | if (!buildFound) {
159 | checks.push({
160 | check: "Build Output",
161 | status: "warning",
162 | message: "No build output directory found",
163 | recommendation: "Run your SSG build command to generate the site",
164 | });
165 | }
166 |
167 | // Check 5: GitHub Pages settings (if URL provided)
168 | if (url) {
169 | checks.push({
170 | check: "Deployment URL",
171 | status: "warning",
172 | message: `Expected URL: ${url}`,
173 | recommendation: "Verify GitHub Pages is enabled in repository settings",
174 | });
175 | }
176 |
177 | // Generate summary
178 | const passCount = checks.filter((c) => c.status === "pass").length;
179 | const failCount = checks.filter((c) => c.status === "fail").length;
180 | const warningCount = checks.filter((c) => c.status === "warning").length;
181 |
182 | let overallStatus = "Ready for deployment";
183 | if (failCount > 0) {
184 | overallStatus = "Configuration required";
185 | } else if (warningCount > 0) {
186 | overallStatus = "Minor issues detected";
187 | }
188 |
189 | const verificationResult = {
190 | repository,
191 | url,
192 | overallStatus,
193 | checks,
194 | summary: {
195 | passed: passCount,
196 | warnings: warningCount,
197 | failed: failCount,
198 | total: checks.length,
199 | },
200 | };
201 |
202 | const response: MCPToolResponse<typeof verificationResult> = {
203 | success: true,
204 | data: verificationResult,
205 | metadata: {
206 | toolVersion: "1.0.0",
207 | executionTime: Date.now() - startTime,
208 | timestamp: new Date().toISOString(),
209 | },
210 | recommendations: [
211 | {
212 | type:
213 | failCount > 0 ? "critical" : warningCount > 0 ? "warning" : "info",
214 | title: "Deployment Verification Complete",
215 | description: `${overallStatus}. ${passCount} checks passed, ${warningCount} warnings, ${failCount} failures.`,
216 | },
217 | ],
218 | nextSteps: checks
219 | .filter((check) => check.recommendation)
220 | .map((check) => ({
221 | action: check.recommendation!,
222 | toolRequired: check.recommendation!.includes("deploy_pages")
223 | ? "deploy_pages"
224 | : check.recommendation!.includes("setup_structure")
225 | ? "setup_structure"
226 | : check.recommendation!.includes("generate_config")
227 | ? "generate_config"
228 | : "manual",
229 | description: check.message,
230 | priority: check.status === "fail" ? "high" : ("medium" as const),
231 | })),
232 | };
233 |
234 | return formatMCPResponse(response);
235 | } catch (error) {
236 | const errorResponse: MCPToolResponse = {
237 | success: false,
238 | error: {
239 | code: "VERIFICATION_FAILED",
240 | message: `Failed to verify deployment: ${error}`,
241 | resolution: "Ensure repository path is accessible",
242 | },
243 | metadata: {
244 | toolVersion: "1.0.0",
245 | executionTime: Date.now() - startTime,
246 | timestamp: new Date().toISOString(),
247 | },
248 | };
249 | return formatMCPResponse(errorResponse);
250 | }
251 | }
252 |
253 | // Removed unused getStatusEmoji function - status indicators now handled in formatMCPResponse
254 |
```