This is page 5 of 29. Use http://codebase.md/tosin2013/documcp?lines=true&page={x} to view the full context.
# Directory Structure
```
├── .dockerignore
├── .eslintignore
├── .eslintrc.json
├── .github
│ ├── agents
│ │ ├── documcp-ast.md
│ │ ├── documcp-deploy.md
│ │ ├── documcp-memory.md
│ │ ├── documcp-test.md
│ │ └── documcp-tool.md
│ ├── copilot-instructions.md
│ ├── dependabot.yml
│ ├── ISSUE_TEMPLATE
│ │ ├── automated-changelog.md
│ │ ├── bug_report.md
│ │ ├── bug_report.yml
│ │ ├── documentation_issue.md
│ │ ├── feature_request.md
│ │ ├── feature_request.yml
│ │ ├── npm-publishing-fix.md
│ │ └── release_improvements.md
│ ├── PULL_REQUEST_TEMPLATE.md
│ ├── release-drafter.yml
│ └── workflows
│ ├── auto-merge.yml
│ ├── ci.yml
│ ├── codeql.yml
│ ├── dependency-review.yml
│ ├── deploy-docs.yml
│ ├── README.md
│ ├── release-drafter.yml
│ └── release.yml
├── .gitignore
├── .husky
│ ├── commit-msg
│ └── pre-commit
├── .linkcheck.config.json
├── .markdown-link-check.json
├── .nvmrc
├── .pre-commit-config.yaml
├── .versionrc.json
├── CHANGELOG.md
├── CODE_OF_CONDUCT.md
├── commitlint.config.js
├── CONTRIBUTING.md
├── docker-compose.docs.yml
├── Dockerfile.docs
├── docs
│ ├── .docusaurus
│ │ ├── docusaurus-plugin-content-docs
│ │ │ └── default
│ │ │ └── __mdx-loader-dependency.json
│ │ └── docusaurus-plugin-content-pages
│ │ └── default
│ │ └── __plugin.json
│ ├── adrs
│ │ ├── 001-mcp-server-architecture.md
│ │ ├── 002-repository-analysis-engine.md
│ │ ├── 003-static-site-generator-recommendation-engine.md
│ │ ├── 004-diataxis-framework-integration.md
│ │ ├── 005-github-pages-deployment-automation.md
│ │ ├── 006-mcp-tools-api-design.md
│ │ ├── 007-mcp-prompts-and-resources-integration.md
│ │ ├── 008-intelligent-content-population-engine.md
│ │ ├── 009-content-accuracy-validation-framework.md
│ │ ├── 010-mcp-resource-pattern-redesign.md
│ │ └── README.md
│ ├── api
│ │ ├── .nojekyll
│ │ ├── assets
│ │ │ ├── hierarchy.js
│ │ │ ├── highlight.css
│ │ │ ├── icons.js
│ │ │ ├── icons.svg
│ │ │ ├── main.js
│ │ │ ├── navigation.js
│ │ │ ├── search.js
│ │ │ └── style.css
│ │ ├── hierarchy.html
│ │ ├── index.html
│ │ ├── modules.html
│ │ └── variables
│ │ └── TOOLS.html
│ ├── assets
│ │ └── logo.svg
│ ├── development
│ │ └── MCP_INSPECTOR_TESTING.md
│ ├── docusaurus.config.js
│ ├── explanation
│ │ ├── architecture.md
│ │ └── index.md
│ ├── guides
│ │ ├── link-validation.md
│ │ ├── playwright-integration.md
│ │ └── playwright-testing-workflow.md
│ ├── how-to
│ │ ├── analytics-setup.md
│ │ ├── custom-domains.md
│ │ ├── documentation-freshness-tracking.md
│ │ ├── github-pages-deployment.md
│ │ ├── index.md
│ │ ├── local-testing.md
│ │ ├── performance-optimization.md
│ │ ├── prompting-guide.md
│ │ ├── repository-analysis.md
│ │ ├── seo-optimization.md
│ │ ├── site-monitoring.md
│ │ ├── troubleshooting.md
│ │ └── usage-examples.md
│ ├── index.md
│ ├── knowledge-graph.md
│ ├── package-lock.json
│ ├── package.json
│ ├── phase-2-intelligence.md
│ ├── reference
│ │ ├── api-overview.md
│ │ ├── cli.md
│ │ ├── configuration.md
│ │ ├── deploy-pages.md
│ │ ├── index.md
│ │ ├── mcp-tools.md
│ │ └── prompt-templates.md
│ ├── research
│ │ ├── cross-domain-integration
│ │ │ └── README.md
│ │ ├── domain-1-mcp-architecture
│ │ │ ├── index.md
│ │ │ └── mcp-performance-research.md
│ │ ├── domain-2-repository-analysis
│ │ │ └── README.md
│ │ ├── domain-3-ssg-recommendation
│ │ │ ├── index.md
│ │ │ └── ssg-performance-analysis.md
│ │ ├── domain-4-diataxis-integration
│ │ │ └── README.md
│ │ ├── domain-5-github-deployment
│ │ │ ├── github-pages-security-analysis.md
│ │ │ └── index.md
│ │ ├── domain-6-api-design
│ │ │ └── README.md
│ │ ├── README.md
│ │ ├── research-integration-summary-2025-01-14.md
│ │ ├── research-progress-template.md
│ │ └── research-questions-2025-01-14.md
│ ├── robots.txt
│ ├── sidebars.js
│ ├── sitemap.xml
│ ├── src
│ │ └── css
│ │ └── custom.css
│ └── tutorials
│ ├── development-setup.md
│ ├── environment-setup.md
│ ├── first-deployment.md
│ ├── getting-started.md
│ ├── index.md
│ ├── memory-workflows.md
│ └── user-onboarding.md
├── jest.config.js
├── LICENSE
├── Makefile
├── MCP_PHASE2_IMPLEMENTATION.md
├── mcp-config-example.json
├── mcp.json
├── package-lock.json
├── package.json
├── README.md
├── release.sh
├── scripts
│ └── check-package-structure.cjs
├── SECURITY.md
├── setup-precommit.sh
├── src
│ ├── benchmarks
│ │ └── performance.ts
│ ├── index.ts
│ ├── memory
│ │ ├── contextual-retrieval.ts
│ │ ├── deployment-analytics.ts
│ │ ├── enhanced-manager.ts
│ │ ├── export-import.ts
│ │ ├── freshness-kg-integration.ts
│ │ ├── index.ts
│ │ ├── integration.ts
│ │ ├── kg-code-integration.ts
│ │ ├── kg-health.ts
│ │ ├── kg-integration.ts
│ │ ├── kg-link-validator.ts
│ │ ├── kg-storage.ts
│ │ ├── knowledge-graph.ts
│ │ ├── learning.ts
│ │ ├── manager.ts
│ │ ├── multi-agent-sharing.ts
│ │ ├── pruning.ts
│ │ ├── schemas.ts
│ │ ├── storage.ts
│ │ ├── temporal-analysis.ts
│ │ ├── user-preferences.ts
│ │ └── visualization.ts
│ ├── prompts
│ │ └── technical-writer-prompts.ts
│ ├── scripts
│ │ └── benchmark.ts
│ ├── templates
│ │ └── playwright
│ │ ├── accessibility.spec.template.ts
│ │ ├── Dockerfile.template
│ │ ├── docs-e2e.workflow.template.yml
│ │ ├── link-validation.spec.template.ts
│ │ └── playwright.config.template.ts
│ ├── tools
│ │ ├── analyze-deployments.ts
│ │ ├── analyze-readme.ts
│ │ ├── analyze-repository.ts
│ │ ├── check-documentation-links.ts
│ │ ├── deploy-pages.ts
│ │ ├── detect-gaps.ts
│ │ ├── evaluate-readme-health.ts
│ │ ├── generate-config.ts
│ │ ├── generate-contextual-content.ts
│ │ ├── generate-llm-context.ts
│ │ ├── generate-readme-template.ts
│ │ ├── generate-technical-writer-prompts.ts
│ │ ├── kg-health-check.ts
│ │ ├── manage-preferences.ts
│ │ ├── manage-sitemap.ts
│ │ ├── optimize-readme.ts
│ │ ├── populate-content.ts
│ │ ├── readme-best-practices.ts
│ │ ├── recommend-ssg.ts
│ │ ├── setup-playwright-tests.ts
│ │ ├── setup-structure.ts
│ │ ├── sync-code-to-docs.ts
│ │ ├── test-local-deployment.ts
│ │ ├── track-documentation-freshness.ts
│ │ ├── update-existing-documentation.ts
│ │ ├── validate-content.ts
│ │ ├── validate-documentation-freshness.ts
│ │ ├── validate-readme-checklist.ts
│ │ └── verify-deployment.ts
│ ├── types
│ │ └── api.ts
│ ├── utils
│ │ ├── ast-analyzer.ts
│ │ ├── code-scanner.ts
│ │ ├── content-extractor.ts
│ │ ├── drift-detector.ts
│ │ ├── freshness-tracker.ts
│ │ ├── language-parsers-simple.ts
│ │ ├── permission-checker.ts
│ │ └── sitemap-generator.ts
│ └── workflows
│ └── documentation-workflow.ts
├── test-docs-local.sh
├── tests
│ ├── api
│ │ └── mcp-responses.test.ts
│ ├── benchmarks
│ │ └── performance.test.ts
│ ├── edge-cases
│ │ └── error-handling.test.ts
│ ├── functional
│ │ └── tools.test.ts
│ ├── integration
│ │ ├── kg-documentation-workflow.test.ts
│ │ ├── knowledge-graph-workflow.test.ts
│ │ ├── mcp-readme-tools.test.ts
│ │ ├── memory-mcp-tools.test.ts
│ │ ├── readme-technical-writer.test.ts
│ │ └── workflow.test.ts
│ ├── memory
│ │ ├── contextual-retrieval.test.ts
│ │ ├── enhanced-manager.test.ts
│ │ ├── export-import.test.ts
│ │ ├── freshness-kg-integration.test.ts
│ │ ├── kg-code-integration.test.ts
│ │ ├── kg-health.test.ts
│ │ ├── kg-link-validator.test.ts
│ │ ├── kg-storage-validation.test.ts
│ │ ├── kg-storage.test.ts
│ │ ├── knowledge-graph-enhanced.test.ts
│ │ ├── knowledge-graph.test.ts
│ │ ├── learning.test.ts
│ │ ├── manager-advanced.test.ts
│ │ ├── manager.test.ts
│ │ ├── mcp-resource-integration.test.ts
│ │ ├── mcp-tool-persistence.test.ts
│ │ ├── schemas.test.ts
│ │ ├── storage.test.ts
│ │ ├── temporal-analysis.test.ts
│ │ └── user-preferences.test.ts
│ ├── performance
│ │ ├── memory-load-testing.test.ts
│ │ └── memory-stress-testing.test.ts
│ ├── prompts
│ │ ├── guided-workflow-prompts.test.ts
│ │ └── technical-writer-prompts.test.ts
│ ├── server.test.ts
│ ├── setup.ts
│ ├── tools
│ │ ├── all-tools.test.ts
│ │ ├── analyze-coverage.test.ts
│ │ ├── analyze-deployments.test.ts
│ │ ├── analyze-readme.test.ts
│ │ ├── analyze-repository.test.ts
│ │ ├── check-documentation-links.test.ts
│ │ ├── deploy-pages-kg-retrieval.test.ts
│ │ ├── deploy-pages-tracking.test.ts
│ │ ├── deploy-pages.test.ts
│ │ ├── detect-gaps.test.ts
│ │ ├── evaluate-readme-health.test.ts
│ │ ├── generate-contextual-content.test.ts
│ │ ├── generate-llm-context.test.ts
│ │ ├── generate-readme-template.test.ts
│ │ ├── generate-technical-writer-prompts.test.ts
│ │ ├── kg-health-check.test.ts
│ │ ├── manage-sitemap.test.ts
│ │ ├── optimize-readme.test.ts
│ │ ├── readme-best-practices.test.ts
│ │ ├── recommend-ssg-historical.test.ts
│ │ ├── recommend-ssg-preferences.test.ts
│ │ ├── recommend-ssg.test.ts
│ │ ├── simple-coverage.test.ts
│ │ ├── sync-code-to-docs.test.ts
│ │ ├── test-local-deployment.test.ts
│ │ ├── tool-error-handling.test.ts
│ │ ├── track-documentation-freshness.test.ts
│ │ ├── validate-content.test.ts
│ │ ├── validate-documentation-freshness.test.ts
│ │ └── validate-readme-checklist.test.ts
│ ├── types
│ │ └── type-safety.test.ts
│ └── utils
│ ├── ast-analyzer.test.ts
│ ├── content-extractor.test.ts
│ ├── drift-detector.test.ts
│ ├── freshness-tracker.test.ts
│ └── sitemap-generator.test.ts
├── tsconfig.json
└── typedoc.json
```
# Files
--------------------------------------------------------------------------------
/docs/tutorials/development-setup.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.970Z"
4 | last_validated: "2025-11-20T00:46:21.970Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # Setting Up Your Development Environment
10 |
11 | This tutorial covers setting up a development environment for ongoing documentation work with DocuMCP, including local testing, content workflows, and maintenance automation.
12 |
13 | ## What You'll Set Up
14 |
15 | By the end of this tutorial, you'll have:
16 |
17 | - Local documentation development environment
18 | - Live reload and preview capabilities
19 | - Content validation and testing workflow
20 | - Automated quality checks
21 | - Integration with your existing development tools
22 |
23 | ## Prerequisites
24 |
25 | - Completed [Getting Started](getting-started.md) and [First Deployment](first-deployment.md)
26 | - Node.js 20.0.0+ installed
27 | - Your preferred code editor (VS Code recommended)
28 | - Git and GitHub CLI (optional but recommended)
29 |
30 | ## Development Environment Setup
31 |
32 | ### Step 1: Local Development Server
33 |
34 | Set up local development with live reload:
35 |
36 | ```bash
37 | # Test local deployment before pushing to GitHub
38 | "test my documentation build locally with live reload"
39 | ```
40 |
41 | This will:
42 |
43 | - Install development dependencies
44 | - Start local server (typically on http://localhost:3000)
45 | - Enable live reload for instant preview
46 | - Validate build process
47 |
48 | **For different SSGs:**
49 |
50 | **Docusaurus:**
51 |
52 | ```bash
53 | npm run start
54 | # Opens http://localhost:3000 with live reload
55 | ```
56 |
57 | **MkDocs:**
58 |
59 | ```bash
60 | mkdocs serve
61 | # Opens http://127.0.0.1:8000 with auto-reload
62 | ```
63 |
64 | **Hugo:**
65 |
66 | ```bash
67 | hugo server -D
68 | # Opens http://localhost:1313 with live reload
69 | ```
70 |
71 | **Jekyll:**
72 |
73 | ```bash
74 | bundle exec jekyll serve --livereload
75 | # Opens http://localhost:4000 with live reload
76 | ```
77 |
78 | ### Step 2: Content Validation Workflow
79 |
80 | Set up automated content validation:
81 |
82 | ```bash
83 | # Validate all documentation content
84 | "validate my documentation content for accuracy and completeness"
85 | ```
86 |
87 | This checks:
88 |
89 | - **Link validation**: Internal and external links
90 | - **Code syntax**: All code blocks and examples
91 | - **Image references**: Missing or broken images
92 | - **Content structure**: Diataxis compliance
93 | - **SEO optimization**: Meta tags, headings
94 |
95 | ### Step 3: Quality Assurance Integration
96 |
97 | Integrate quality checks into your workflow:
98 |
99 | ```bash
100 | # Set up comprehensive documentation quality checks
101 | "check all documentation links and validate content quality"
102 | ```
103 |
104 | **Available validation levels:**
105 |
106 | - **Basic**: Link checking and syntax validation
107 | - **Comprehensive**: Full content analysis with Diataxis compliance
108 | - **Advanced**: Performance testing and SEO analysis
109 |
110 | ### Step 4: Development Scripts Setup
111 |
112 | Add these scripts to your `package.json`:
113 |
114 | ```json
115 | {
116 | "scripts": {
117 | "docs:dev": "docusaurus start",
118 | "docs:build": "docusaurus build",
119 | "docs:serve": "docusaurus serve",
120 | "docs:validate": "npm run docs:check-links && npm run docs:test-build",
121 | "docs:check-links": "markdown-link-check docs/**/*.md",
122 | "docs:test-build": "npm run docs:build && npm run docs:serve -- --no-open",
123 | "docs:deploy": "npm run docs:validate && npm run docs:build"
124 | }
125 | }
126 | ```
127 |
128 | ## Editor Configuration
129 |
130 | ### VS Code Setup
131 |
132 | Create `.vscode/settings.json`:
133 |
134 | ```json
135 | {
136 | "markdownlint.config": {
137 | "MD013": false,
138 | "MD033": false
139 | },
140 | "files.associations": {
141 | "*.mdx": "mdx"
142 | },
143 | "editor.wordWrap": "on",
144 | "editor.quickSuggestions": {
145 | "strings": true
146 | },
147 | "[markdown]": {
148 | "editor.defaultFormatter": "esbenp.prettier-vscode",
149 | "editor.quickSuggestions": {
150 | "comments": "off",
151 | "strings": "off",
152 | "other": "off"
153 | }
154 | }
155 | }
156 | ```
157 |
158 | **Recommended VS Code Extensions:**
159 |
160 | - Markdown All in One
161 | - markdownlint
162 | - Prettier - Code formatter
163 | - GitLens
164 | - Live Server (for static preview)
165 |
166 | ### Content Writing Workflow
167 |
168 | Establish a content creation workflow:
169 |
170 | 1. **Create branch** for documentation changes
171 | 2. **Write content** using Diataxis principles
172 | 3. **Test locally** with live server
173 | 4. **Validate content** using DocuMCP tools
174 | 5. **Review and refine** based on validation feedback
175 | 6. **Commit and push** to trigger deployment
176 |
177 | ## Automated Quality Checks
178 |
179 | ### Pre-commit Hooks
180 |
181 | Set up automated checks before commits:
182 |
183 | ```bash
184 | # Install husky for git hooks
185 | npm install --save-dev husky
186 |
187 | # Set up pre-commit hook
188 | npx husky add .husky/pre-commit "npm run docs:validate"
189 | ```
190 |
191 | Create `.husky/pre-commit`:
192 |
193 | ```bash
194 | #!/usr/bin/env sh
195 | . "$(dirname -- "$0")/_/husky.sh"
196 |
197 | echo "🔍 Validating documentation..."
198 | npm run docs:validate
199 |
200 | echo "📝 Checking markdown formatting..."
201 | npx prettier --check "docs/**/*.md"
202 |
203 | echo "🔗 Validating links..."
204 | npm run docs:check-links
205 | ```
206 |
207 | ### GitHub Actions Integration
208 |
209 | Enhance your deployment workflow with quality gates:
210 |
211 | ```yaml
212 | # .github/workflows/docs-quality.yml
213 | name: Documentation Quality
214 |
215 | on:
216 | pull_request:
217 | paths: ["docs/**", "*.md"]
218 |
219 | jobs:
220 | quality-check:
221 | runs-on: ubuntu-latest
222 | steps:
223 | - uses: actions/checkout@v4
224 |
225 | - name: Setup Node.js
226 | uses: actions/setup-node@v4
227 | with:
228 | node-version: "20"
229 | cache: "npm"
230 |
231 | - name: Install dependencies
232 | run: npm ci
233 |
234 | - name: Validate documentation
235 | run: |
236 | npm run docs:validate
237 | npm run docs:check-links
238 |
239 | - name: Test build
240 | run: npm run docs:build
241 |
242 | - name: Comment PR
243 | uses: actions/github-script@v7
244 | with:
245 | script: |
246 | github.rest.issues.createComment({
247 | issue_number: context.issue.number,
248 | owner: context.repo.owner,
249 | repo: context.repo.repo,
250 | body: '✅ Documentation quality checks passed!'
251 | });
252 | ```
253 |
254 | ## Content Management Strategies
255 |
256 | ### Diataxis Organization
257 |
258 | Organize content following Diataxis principles:
259 |
260 | **Directory Structure:**
261 |
262 | ```
263 | docs/
264 | ├── tutorials/ # Learning-oriented (beginner-friendly)
265 | │ ├── getting-started.md
266 | │ ├── first-project.md
267 | │ └── advanced-concepts.md
268 | ├── how-to-guides/ # Problem-solving (practical steps)
269 | │ ├── troubleshooting.md
270 | │ ├── configuration.md
271 | │ └── deployment.md
272 | ├── reference/ # Information-oriented (comprehensive)
273 | │ ├── api-reference.md
274 | │ ├── cli-commands.md
275 | │ └── configuration-options.md
276 | └── explanation/ # Understanding-oriented (concepts)
277 | ├── architecture.md
278 | ├── design-decisions.md
279 | └── best-practices.md
280 | ```
281 |
282 | ### Content Templates
283 |
284 | Create content templates for consistency:
285 |
286 | **Tutorial Template:**
287 |
288 | ```markdown
289 | # [Action] Tutorial
290 |
291 | ## What You'll Learn
292 |
293 | - Objective 1
294 | - Objective 2
295 |
296 | ## Prerequisites
297 |
298 | - Requirement 1
299 | - Requirement 2
300 |
301 | ## Step-by-Step Instructions
302 |
303 | ### Step 1: [Action]
304 |
305 | Instructions...
306 |
307 | ### Step 2: [Action]
308 |
309 | Instructions...
310 |
311 | ## Verification
312 |
313 | How to confirm success...
314 |
315 | ## Next Steps
316 |
317 | Where to go next...
318 | ```
319 |
320 | **How-to Guide Template:**
321 |
322 | ```markdown
323 | # How to [Solve Problem]
324 |
325 | ## Problem
326 |
327 | Clear problem statement...
328 |
329 | ## Solution
330 |
331 | Step-by-step solution...
332 |
333 | ## Alternative Approaches
334 |
335 | Other ways to solve this...
336 |
337 | ## Troubleshooting
338 |
339 | Common issues and fixes...
340 | ```
341 |
342 | ## Performance Optimization
343 |
344 | ### Build Performance
345 |
346 | Optimize build times:
347 |
348 | ```bash
349 | # Enable build caching
350 | export GATSBY_CACHE_DIR=.cache
351 | export GATSBY_PUBLIC_DIR=public
352 |
353 | # Parallel processing
354 | export NODE_OPTIONS="--max-old-space-size=8192"
355 | ```
356 |
357 | **For large sites:**
358 |
359 | - Enable incremental builds
360 | - Use build caching
361 | - Optimize image processing
362 | - Minimize plugin usage
363 |
364 | ### Development Server Performance
365 |
366 | Speed up local development:
367 |
368 | ```bash
369 | # Fast refresh mode (Docusaurus)
370 | npm run start -- --fast-refresh
371 |
372 | # Hot reload with polling (for file system issues)
373 | npm run start -- --poll
374 |
375 | # Open specific page
376 | npm run start -- --host 0.0.0.0 --port 3001
377 | ```
378 |
379 | ## Maintenance Automation
380 |
381 | ### Scheduled Content Validation
382 |
383 | Set up scheduled validation:
384 |
385 | ```yaml
386 | # .github/workflows/scheduled-validation.yml
387 | name: Scheduled Documentation Validation
388 |
389 | on:
390 | schedule:
391 | - cron: "0 2 * * 1" # Every Monday at 2 AM
392 |
393 | jobs:
394 | validate:
395 | runs-on: ubuntu-latest
396 | steps:
397 | - uses: actions/checkout@v4
398 |
399 | - name: Setup Node.js
400 | uses: actions/setup-node@v4
401 | with:
402 | node-version: "20"
403 |
404 | - name: Full validation
405 | run: |
406 | "check all documentation links with external validation"
407 | "validate all content for accuracy and completeness"
408 |
409 | - name: Create issue on failure
410 | if: failure()
411 | uses: actions/github-script@v7
412 | with:
413 | script: |
414 | github.rest.issues.create({
415 | owner: context.repo.owner,
416 | repo: context.repo.repo,
417 | title: 'Scheduled Documentation Validation Failed',
418 | body: 'The weekly documentation validation found issues. Check the workflow logs.',
419 | labels: ['documentation', 'maintenance']
420 | });
421 | ```
422 |
423 | ### Dependency Updates
424 |
425 | Automate dependency maintenance:
426 |
427 | ```yaml
428 | # .github/dependabot.yml
429 | version: 2
430 | updates:
431 | - package-ecosystem: "npm"
432 | directory: "/"
433 | schedule:
434 | interval: "weekly"
435 | open-pull-requests-limit: 5
436 | labels:
437 | - "dependencies"
438 | - "documentation"
439 | ```
440 |
441 | ## Collaboration Workflow
442 |
443 | ### Team Development
444 |
445 | For team documentation:
446 |
447 | 1. **Branching strategy**: Feature branches for documentation changes
448 | 2. **Review process**: PR reviews for all documentation updates
449 | 3. **Style guide**: Consistent writing and formatting standards
450 | 4. **Content ownership**: Assign sections to team members
451 |
452 | ### Review Checklist
453 |
454 | Documentation PR review checklist:
455 |
456 | - [ ] Content follows Diataxis principles
457 | - [ ] All links work (internal and external)
458 | - [ ] Code examples are tested and accurate
459 | - [ ] Images are optimized and accessible
460 | - [ ] SEO metadata is complete
461 | - [ ] Mobile responsiveness verified
462 | - [ ] Build succeeds locally and in CI
463 |
464 | ## Next Steps
465 |
466 | Your development environment is now ready! Next:
467 |
468 | 1. **[Learn advanced prompting](../how-to/prompting-guide.md)** for DocuMCP
469 | 2. **[Set up monitoring](../how-to/site-monitoring.md)** for your live site
470 | 3. **[Optimize for performance](../how-to/performance-optimization.md)**
471 | 4. **[Configure custom domains](../how-to/custom-domains.md)** (optional)
472 |
473 | ## Troubleshooting
474 |
475 | **Common development issues:**
476 |
477 | **Port conflicts:**
478 |
479 | ```bash
480 | # Change default port
481 | npm run start -- --port 3001
482 | ```
483 |
484 | **Memory issues:**
485 |
486 | ```bash
487 | # Increase Node.js memory limit
488 | export NODE_OPTIONS="--max-old-space-size=8192"
489 | ```
490 |
491 | **File watching problems:**
492 |
493 | ```bash
494 | # Enable polling for file changes
495 | npm run start -- --poll
496 | ```
497 |
498 | **Cache issues:**
499 |
500 | ```bash
501 | # Clear build cache
502 | rm -rf .docusaurus .cache public
503 | npm run start
504 | ```
505 |
506 | ## Summary
507 |
508 | You now have:
509 | ✅ Local development environment with live reload
510 | ✅ Content validation and quality checking
511 | ✅ Automated pre-commit hooks
512 | ✅ CI/CD integration for quality gates
513 | ✅ Performance optimization
514 | ✅ Maintenance automation
515 | ✅ Team collaboration workflow
516 |
517 | Your documentation development environment is production-ready!
518 |
```
--------------------------------------------------------------------------------
/src/tools/track-documentation-freshness.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Track Documentation Freshness Tool
3 | *
4 | * Scans documentation directory for staleness markers,
5 | * identifies files needing updates based on configurable time thresholds.
6 | */
7 |
8 | import { z } from "zod";
9 | import {
10 | scanDocumentationFreshness,
11 | STALENESS_PRESETS,
12 | type StalenessThreshold,
13 | type FreshnessScanReport,
14 | } from "../utils/freshness-tracker.js";
15 | import { type MCPToolResponse } from "../types/api.js";
16 | import {
17 | storeFreshnessEvent,
18 | getStalenessInsights,
19 | } from "../memory/freshness-kg-integration.js";
20 |
21 | /**
22 | * Input schema for track_documentation_freshness tool
23 | */
24 | export const TrackDocumentationFreshnessSchema = z.object({
25 | docsPath: z.string().describe("Path to documentation directory"),
26 | projectPath: z
27 | .string()
28 | .optional()
29 | .describe("Path to project root (for knowledge graph tracking)"),
30 | warningThreshold: z
31 | .object({
32 | value: z.number().positive(),
33 | unit: z.enum(["minutes", "hours", "days"]),
34 | })
35 | .optional()
36 | .describe("Warning threshold (yellow flag)"),
37 | staleThreshold: z
38 | .object({
39 | value: z.number().positive(),
40 | unit: z.enum(["minutes", "hours", "days"]),
41 | })
42 | .optional()
43 | .describe("Stale threshold (orange flag)"),
44 | criticalThreshold: z
45 | .object({
46 | value: z.number().positive(),
47 | unit: z.enum(["minutes", "hours", "days"]),
48 | })
49 | .optional()
50 | .describe("Critical threshold (red flag)"),
51 | preset: z
52 | .enum(["realtime", "active", "recent", "weekly", "monthly", "quarterly"])
53 | .optional()
54 | .describe("Use predefined threshold preset"),
55 | includeFileList: z
56 | .boolean()
57 | .optional()
58 | .default(true)
59 | .describe("Include detailed file list in response"),
60 | sortBy: z
61 | .enum(["age", "path", "staleness"])
62 | .optional()
63 | .default("staleness")
64 | .describe("Sort order for file list"),
65 | storeInKG: z
66 | .boolean()
67 | .optional()
68 | .default(true)
69 | .describe(
70 | "Store tracking event in knowledge graph for historical analysis",
71 | ),
72 | });
73 |
74 | export type TrackDocumentationFreshnessInput = z.input<
75 | typeof TrackDocumentationFreshnessSchema
76 | >;
77 |
78 | /**
79 | * Format freshness report for display
80 | */
81 | function formatFreshnessReport(
82 | report: FreshnessScanReport,
83 | includeFileList: boolean,
84 | sortBy: "age" | "path" | "staleness",
85 | ): string {
86 | const {
87 | totalFiles,
88 | filesWithMetadata,
89 | filesWithoutMetadata,
90 | freshFiles,
91 | warningFiles,
92 | staleFiles,
93 | criticalFiles,
94 | files,
95 | thresholds,
96 | } = report;
97 |
98 | let output = "# Documentation Freshness Report\n\n";
99 | output += `**Scanned at**: ${new Date(report.scannedAt).toLocaleString()}\n`;
100 | output += `**Documentation path**: ${report.docsPath}\n\n`;
101 |
102 | // Summary statistics
103 | output += "## Summary Statistics\n\n";
104 | output += `- **Total files**: ${totalFiles}\n`;
105 | output += `- **With metadata**: ${filesWithMetadata} (${Math.round(
106 | (filesWithMetadata / totalFiles) * 100,
107 | )}%)\n`;
108 | output += `- **Without metadata**: ${filesWithoutMetadata}\n\n`;
109 |
110 | // Freshness breakdown
111 | output += "## Freshness Breakdown\n\n";
112 | output += `- ✅ **Fresh**: ${freshFiles} files\n`;
113 | output += `- 🟡 **Warning**: ${warningFiles} files (older than ${thresholds.warning.value} ${thresholds.warning.unit})\n`;
114 | output += `- 🟠 **Stale**: ${staleFiles} files (older than ${thresholds.stale.value} ${thresholds.stale.unit})\n`;
115 | output += `- 🔴 **Critical**: ${criticalFiles} files (older than ${thresholds.critical.value} ${thresholds.critical.unit})\n`;
116 | output += `- ❓ **Unknown**: ${filesWithoutMetadata} files (no metadata)\n\n`;
117 |
118 | // Recommendations
119 | if (filesWithoutMetadata > 0 || criticalFiles > 0 || staleFiles > 0) {
120 | output += "## Recommendations\n\n";
121 |
122 | if (filesWithoutMetadata > 0) {
123 | output += `⚠️ **${filesWithoutMetadata} files lack freshness metadata**. Run \`validate_documentation_freshness\` to initialize metadata.\n\n`;
124 | }
125 |
126 | if (criticalFiles > 0) {
127 | output += `🔴 **${criticalFiles} files are critically stale**. Immediate review and update recommended.\n\n`;
128 | } else if (staleFiles > 0) {
129 | output += `🟠 **${staleFiles} files are stale**. Consider reviewing and updating soon.\n\n`;
130 | }
131 | }
132 |
133 | // File list
134 | if (includeFileList && files.length > 0) {
135 | output += "## File Details\n\n";
136 |
137 | // Sort files
138 | const sortedFiles = [...files];
139 | switch (sortBy) {
140 | case "age":
141 | sortedFiles.sort((a, b) => (b.ageInMs || 0) - (a.ageInMs || 0));
142 | break;
143 | case "path":
144 | sortedFiles.sort((a, b) =>
145 | a.relativePath.localeCompare(b.relativePath),
146 | );
147 | break;
148 | case "staleness": {
149 | const order = {
150 | critical: 0,
151 | stale: 1,
152 | warning: 2,
153 | fresh: 3,
154 | unknown: 4,
155 | };
156 | sortedFiles.sort(
157 | (a, b) => order[a.stalenessLevel] - order[b.stalenessLevel],
158 | );
159 | break;
160 | }
161 | }
162 |
163 | // Group by staleness level
164 | const grouped = {
165 | critical: sortedFiles.filter((f) => f.stalenessLevel === "critical"),
166 | stale: sortedFiles.filter((f) => f.stalenessLevel === "stale"),
167 | warning: sortedFiles.filter((f) => f.stalenessLevel === "warning"),
168 | fresh: sortedFiles.filter((f) => f.stalenessLevel === "fresh"),
169 | unknown: sortedFiles.filter((f) => f.stalenessLevel === "unknown"),
170 | };
171 |
172 | for (const [level, levelFiles] of Object.entries(grouped)) {
173 | if (levelFiles.length === 0) continue;
174 |
175 | const icon = {
176 | critical: "🔴",
177 | stale: "🟠",
178 | warning: "🟡",
179 | fresh: "✅",
180 | unknown: "❓",
181 | }[level];
182 |
183 | output += `### ${icon} ${
184 | level.charAt(0).toUpperCase() + level.slice(1)
185 | } (${levelFiles.length})\n\n`;
186 |
187 | for (const file of levelFiles) {
188 | output += `- **${file.relativePath}**`;
189 |
190 | if (file.ageFormatted) {
191 | output += ` - Last updated ${file.ageFormatted} ago`;
192 | }
193 |
194 | if (file.metadata?.validated_against_commit) {
195 | output += ` (commit: ${file.metadata.validated_against_commit.substring(
196 | 0,
197 | 7,
198 | )})`;
199 | }
200 |
201 | if (!file.hasMetadata) {
202 | output += " - ⚠️ No metadata";
203 | }
204 |
205 | output += "\n";
206 | }
207 |
208 | output += "\n";
209 | }
210 | }
211 |
212 | return output;
213 | }
214 |
215 | /**
216 | * Track documentation freshness
217 | */
218 | export async function trackDocumentationFreshness(
219 | input: TrackDocumentationFreshnessInput,
220 | ): Promise<MCPToolResponse> {
221 | const startTime = Date.now();
222 |
223 | try {
224 | const {
225 | docsPath,
226 | projectPath,
227 | warningThreshold,
228 | staleThreshold,
229 | criticalThreshold,
230 | preset,
231 | includeFileList,
232 | sortBy,
233 | storeInKG,
234 | } = input;
235 |
236 | // Determine thresholds
237 | let thresholds: {
238 | warning?: StalenessThreshold;
239 | stale?: StalenessThreshold;
240 | critical?: StalenessThreshold;
241 | } = {};
242 |
243 | if (preset) {
244 | // Use preset thresholds
245 | const presetThreshold = STALENESS_PRESETS[preset];
246 | thresholds = {
247 | warning: presetThreshold,
248 | stale: { value: presetThreshold.value * 2, unit: presetThreshold.unit },
249 | critical: {
250 | value: presetThreshold.value * 3,
251 | unit: presetThreshold.unit,
252 | },
253 | };
254 | } else {
255 | // Use custom thresholds
256 | if (warningThreshold) thresholds.warning = warningThreshold;
257 | if (staleThreshold) thresholds.stale = staleThreshold;
258 | if (criticalThreshold) thresholds.critical = criticalThreshold;
259 | }
260 |
261 | // Scan documentation
262 | const report = await scanDocumentationFreshness(docsPath, thresholds);
263 |
264 | // Store in knowledge graph if requested and projectPath provided
265 | let kgInsights:
266 | | Awaited<ReturnType<typeof getStalenessInsights>>
267 | | undefined;
268 | if (storeInKG !== false && projectPath) {
269 | try {
270 | await storeFreshnessEvent(projectPath, docsPath, report, "scan");
271 | kgInsights = await getStalenessInsights(projectPath);
272 | } catch (error) {
273 | // KG storage failed, but continue with the response
274 | console.warn(
275 | "Failed to store freshness event in knowledge graph:",
276 | error,
277 | );
278 | }
279 | }
280 |
281 | // Format response
282 | const formattedReport = formatFreshnessReport(
283 | report,
284 | includeFileList ?? true,
285 | sortBy ?? "staleness",
286 | );
287 |
288 | // Add KG insights to formatted report if available
289 | let enhancedReport = formattedReport;
290 | if (kgInsights && kgInsights.totalEvents > 0) {
291 | enhancedReport += "\n## Historical Insights\n\n";
292 | enhancedReport += `- **Total tracking events**: ${kgInsights.totalEvents}\n`;
293 | enhancedReport += `- **Average improvement score**: ${(
294 | kgInsights.averageImprovementScore * 100
295 | ).toFixed(1)}%\n`;
296 | enhancedReport += `- **Trend**: ${
297 | kgInsights.trend === "improving"
298 | ? "📈 Improving"
299 | : kgInsights.trend === "declining"
300 | ? "📉 Declining"
301 | : "➡️ Stable"
302 | }\n\n`;
303 |
304 | if (kgInsights.recommendations.length > 0) {
305 | enhancedReport += "### Knowledge Graph Insights\n\n";
306 | for (const rec of kgInsights.recommendations) {
307 | enhancedReport += `${rec}\n\n`;
308 | }
309 | }
310 | }
311 |
312 | // Convert KG insights to Recommendation objects
313 | const recommendations =
314 | kgInsights?.recommendations.map((rec) => {
315 | // Determine type based on content
316 | let type: "info" | "warning" | "critical" = "info";
317 | if (rec.includes("🔴") || rec.includes("critical")) {
318 | type = "critical";
319 | } else if (
320 | rec.includes("🟠") ||
321 | rec.includes("⚠️") ||
322 | rec.includes("warning")
323 | ) {
324 | type = "warning";
325 | }
326 |
327 | return {
328 | type,
329 | title: "Documentation Freshness Insight",
330 | description: rec,
331 | };
332 | }) || [];
333 |
334 | const response: MCPToolResponse = {
335 | success: true,
336 | data: {
337 | summary: `Scanned ${report.totalFiles} files: ${report.criticalFiles} critical, ${report.staleFiles} stale, ${report.warningFiles} warnings, ${report.freshFiles} fresh`,
338 | report,
339 | thresholds: thresholds,
340 | formattedReport: enhancedReport,
341 | kgInsights,
342 | },
343 | metadata: {
344 | toolVersion: "1.0.0",
345 | executionTime: Date.now() - startTime,
346 | timestamp: new Date().toISOString(),
347 | },
348 | recommendations,
349 | };
350 |
351 | return response;
352 | } catch (error) {
353 | return {
354 | success: false,
355 | error: {
356 | code: "FRESHNESS_TRACKING_FAILED",
357 | message:
358 | error instanceof Error
359 | ? error.message
360 | : "Unknown error tracking documentation freshness",
361 | resolution: "Check that the documentation path exists and is readable",
362 | },
363 | metadata: {
364 | toolVersion: "1.0.0",
365 | executionTime: Date.now() - startTime,
366 | timestamp: new Date().toISOString(),
367 | },
368 | };
369 | }
370 | }
371 |
```
--------------------------------------------------------------------------------
/tests/tools/recommend-ssg-preferences.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for Phase 2.2: User Preference Integration
3 | * Tests recommend_ssg tool with user preference learning and application
4 | */
5 |
6 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
7 | import { promises as fs } from "fs";
8 | import { join } from "path";
9 | import { tmpdir } from "os";
10 | import {
11 | initializeKnowledgeGraph,
12 | createOrUpdateProject,
13 | } from "../../src/memory/kg-integration.js";
14 | import { recommendSSG } from "../../src/tools/recommend-ssg.js";
15 | import { MemoryManager } from "../../src/memory/manager.js";
16 | import {
17 | getUserPreferenceManager,
18 | clearPreferenceManagerCache,
19 | } from "../../src/memory/user-preferences.js";
20 |
21 | describe("recommendSSG with User Preferences (Phase 2.2)", () => {
22 | let testDir: string;
23 | let originalEnv: string | undefined;
24 | let memoryManager: MemoryManager;
25 |
26 | // Helper to create analysis memory entry in correct format
27 | const createAnalysisMemory = async (analysisData: any) => {
28 | return await memoryManager.remember("analysis", analysisData);
29 | };
30 |
31 | beforeEach(async () => {
32 | // Create temporary test directory
33 | testDir = join(tmpdir(), `recommend-ssg-preferences-test-${Date.now()}`);
34 | await fs.mkdir(testDir, { recursive: true });
35 |
36 | // Set environment variable for storage
37 | originalEnv = process.env.DOCUMCP_STORAGE_DIR;
38 | process.env.DOCUMCP_STORAGE_DIR = testDir;
39 |
40 | // Initialize KG and memory
41 | await initializeKnowledgeGraph(testDir);
42 | memoryManager = new MemoryManager(testDir);
43 | await memoryManager.initialize();
44 |
45 | // Clear preference manager cache
46 | clearPreferenceManagerCache();
47 | });
48 |
49 | afterEach(async () => {
50 | // Restore environment
51 | if (originalEnv) {
52 | process.env.DOCUMCP_STORAGE_DIR = originalEnv;
53 | } else {
54 | delete process.env.DOCUMCP_STORAGE_DIR;
55 | }
56 |
57 | // Clean up test directory
58 | try {
59 | await fs.rm(testDir, { recursive: true, force: true });
60 | } catch (error) {
61 | console.warn("Failed to clean up test directory:", error);
62 | }
63 |
64 | // Clear preference manager cache
65 | clearPreferenceManagerCache();
66 | });
67 |
68 | describe("User Preference Application", () => {
69 | it("should apply user preferences when auto-apply is enabled", async () => {
70 | // Set up user preferences
71 | const userId = "test-user-1";
72 | const manager = await getUserPreferenceManager(userId);
73 | await manager.updatePreferences({
74 | preferredSSGs: ["hugo", "eleventy"],
75 | autoApplyPreferences: true,
76 | });
77 |
78 | // Create analysis that would normally recommend Docusaurus
79 | const memoryEntry = await createAnalysisMemory({
80 | path: "/test/js-project",
81 | dependencies: {
82 | ecosystem: "javascript",
83 | languages: ["javascript", "typescript"],
84 | },
85 | structure: { totalFiles: 60 },
86 | });
87 |
88 | // Get recommendation
89 | const result = await recommendSSG({
90 | analysisId: memoryEntry.id,
91 | userId,
92 | });
93 |
94 | const content = result.content[0];
95 | expect(content.type).toBe("text");
96 | const data = JSON.parse(content.text);
97 |
98 | // Should recommend Hugo (user's top preference)
99 | expect(data.recommended).toBe("hugo");
100 | expect(data.reasoning[0]).toContain("Switched to hugo");
101 | expect(data.reasoning[0]).toContain("usage history");
102 | });
103 |
104 | it("should not apply preferences when auto-apply is disabled", async () => {
105 | const userId = "test-user-2";
106 | const manager = await getUserPreferenceManager(userId);
107 | await manager.updatePreferences({
108 | preferredSSGs: ["jekyll"],
109 | autoApplyPreferences: false,
110 | });
111 |
112 | const memoryEntry = await createAnalysisMemory({
113 | path: "/test/js-project",
114 | dependencies: {
115 | ecosystem: "javascript",
116 | languages: ["javascript"],
117 | },
118 | structure: { totalFiles: 60 },
119 | });
120 |
121 | const result = await recommendSSG({
122 | analysisId: memoryEntry.id,
123 | userId,
124 | });
125 |
126 | const content = result.content[0];
127 | const data = JSON.parse(content.text);
128 |
129 | // Should use default recommendation, not user preference
130 | expect(data.recommended).toBe("docusaurus");
131 | expect(data.reasoning[0]).not.toContain("Switched");
132 | });
133 |
134 | it("should keep recommendation if it matches user preference", async () => {
135 | const userId = "test-user-3";
136 | const manager = await getUserPreferenceManager(userId);
137 | await manager.updatePreferences({
138 | preferredSSGs: ["mkdocs"],
139 | autoApplyPreferences: true,
140 | });
141 |
142 | const memoryEntry = await createAnalysisMemory({
143 | path: "/test/python-project",
144 | dependencies: {
145 | ecosystem: "python",
146 | languages: ["python"],
147 | },
148 | structure: { totalFiles: 40 },
149 | });
150 |
151 | const result = await recommendSSG({
152 | analysisId: memoryEntry.id,
153 | userId,
154 | });
155 |
156 | const content = result.content[0];
157 | const data = JSON.parse(content.text);
158 |
159 | // Should recommend mkdocs (matches both analysis and preference)
160 | expect(data.recommended).toBe("mkdocs");
161 | // Either "Matches" or "Switched to" is acceptable - both indicate preference was applied
162 | expect(data.reasoning[0]).toMatch(
163 | /Matches your preferred SSG|Switched to mkdocs/,
164 | );
165 | });
166 |
167 | it("should switch to user preference even if not ideal for ecosystem", async () => {
168 | const userId = "test-user-4";
169 | const manager = await getUserPreferenceManager(userId);
170 | await manager.updatePreferences({
171 | preferredSSGs: ["mkdocs", "jekyll"], // Python/Ruby SSGs
172 | autoApplyPreferences: true,
173 | });
174 |
175 | const memoryEntry = await createAnalysisMemory({
176 | path: "/test/js-project",
177 | dependencies: {
178 | ecosystem: "javascript",
179 | languages: ["javascript"],
180 | },
181 | structure: { totalFiles: 60 },
182 | });
183 |
184 | const result = await recommendSSG({
185 | analysisId: memoryEntry.id,
186 | userId,
187 | });
188 |
189 | const content = result.content[0];
190 | const data = JSON.parse(content.text);
191 |
192 | // Should switch to mkdocs (user's top preference)
193 | // User preferences override ecosystem recommendations
194 | expect(data.recommended).toBe("mkdocs");
195 | expect(data.reasoning[0]).toContain("Switched to mkdocs");
196 | expect(data.reasoning[0]).toContain("usage history");
197 | });
198 | });
199 |
200 | describe("Preference Tracking Integration", () => {
201 | it("should use default user when no userId provided", async () => {
202 | const memoryEntry = await createAnalysisMemory({
203 | path: "/test/project",
204 | dependencies: {
205 | ecosystem: "javascript",
206 | languages: ["javascript"],
207 | },
208 | structure: { totalFiles: 50 },
209 | });
210 |
211 | // Should not throw error with no userId
212 | const result = await recommendSSG({
213 | analysisId: memoryEntry.id,
214 | });
215 |
216 | const content = result.content[0];
217 | expect(content.type).toBe("text");
218 | const data = JSON.parse(content.text);
219 | expect(data.recommended).toBeDefined();
220 | });
221 |
222 | it("should work with multiple users independently", async () => {
223 | const user1 = "user1";
224 | const user2 = "user2";
225 |
226 | // Set different preferences for each user
227 | const manager1 = await getUserPreferenceManager(user1);
228 | await manager1.updatePreferences({
229 | preferredSSGs: ["hugo"],
230 | autoApplyPreferences: true,
231 | });
232 |
233 | const manager2 = await getUserPreferenceManager(user2);
234 | await manager2.updatePreferences({
235 | preferredSSGs: ["eleventy"],
236 | autoApplyPreferences: true,
237 | });
238 |
239 | const memoryEntry = await createAnalysisMemory({
240 | path: "/test/project",
241 | dependencies: {
242 | ecosystem: "javascript",
243 | languages: ["javascript"],
244 | },
245 | structure: { totalFiles: 50 },
246 | });
247 |
248 | // Get recommendations for both users
249 | const result1 = await recommendSSG({
250 | analysisId: memoryEntry.id,
251 | userId: user1,
252 | });
253 | const result2 = await recommendSSG({
254 | analysisId: memoryEntry.id,
255 | userId: user2,
256 | });
257 |
258 | const data1 = JSON.parse(result1.content[0].text);
259 | const data2 = JSON.parse(result2.content[0].text);
260 |
261 | // Each user should get their preferred SSG
262 | expect(data1.recommended).toBe("hugo");
263 | expect(data2.recommended).toBe("eleventy");
264 | });
265 | });
266 |
267 | describe("Confidence Adjustment", () => {
268 | it("should boost confidence when preference is applied", async () => {
269 | const userId = "test-user-5";
270 | const manager = await getUserPreferenceManager(userId);
271 | await manager.updatePreferences({
272 | preferredSSGs: ["eleventy"],
273 | autoApplyPreferences: true,
274 | });
275 |
276 | const memoryEntry = await createAnalysisMemory({
277 | path: "/test/js-project",
278 | dependencies: {
279 | ecosystem: "javascript",
280 | languages: ["javascript"],
281 | },
282 | structure: { totalFiles: 60 },
283 | });
284 |
285 | const result = await recommendSSG({
286 | analysisId: memoryEntry.id,
287 | userId,
288 | });
289 |
290 | const content = result.content[0];
291 | const data = JSON.parse(content.text);
292 |
293 | // Confidence should be boosted when preference is applied
294 | // Base confidence varies by SSG, but preference adds +0.05 boost
295 | expect(data.confidence).toBeGreaterThan(0.7);
296 | expect(data.reasoning[0]).toContain("🎯");
297 | });
298 | });
299 |
300 | describe("Edge Cases", () => {
301 | it("should handle empty preferred SSGs list", async () => {
302 | const userId = "test-user-6";
303 | const manager = await getUserPreferenceManager(userId);
304 | await manager.updatePreferences({
305 | preferredSSGs: [],
306 | autoApplyPreferences: true,
307 | });
308 |
309 | const memoryEntry = await createAnalysisMemory({
310 | path: "/test/project",
311 | dependencies: {
312 | ecosystem: "javascript",
313 | languages: ["javascript"],
314 | },
315 | structure: { totalFiles: 50 },
316 | });
317 |
318 | const result = await recommendSSG({
319 | analysisId: memoryEntry.id,
320 | userId,
321 | });
322 |
323 | const content = result.content[0];
324 | const data = JSON.parse(content.text);
325 |
326 | // Should use default recommendation
327 | expect(data.recommended).toBe("docusaurus");
328 | expect(data.reasoning[0]).not.toContain("Switched");
329 | });
330 |
331 | it("should handle preference manager initialization failure gracefully", async () => {
332 | const memoryEntry = await createAnalysisMemory({
333 | path: "/test/project",
334 | dependencies: {
335 | ecosystem: "javascript",
336 | languages: ["javascript"],
337 | },
338 | structure: { totalFiles: 50 },
339 | });
340 |
341 | // Should not throw even with invalid userId
342 | const result = await recommendSSG({
343 | analysisId: memoryEntry.id,
344 | userId: "any-user-id",
345 | });
346 |
347 | const content = result.content[0];
348 | expect(content.type).toBe("text");
349 | const data = JSON.parse(content.text);
350 | expect(data.recommended).toBeDefined();
351 | });
352 | });
353 | });
354 |
```
--------------------------------------------------------------------------------
/tests/prompts/guided-workflow-prompts.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | import { generateTechnicalWriterPrompts } from "../../src/prompts/technical-writer-prompts.js";
2 | import { promises as fs } from "fs";
3 | import { join } from "path";
4 | import { tmpdir } from "os";
5 |
6 | describe("Guided Workflow Prompts", () => {
7 | let tempDir: string;
8 |
9 | beforeEach(async () => {
10 | tempDir = join(
11 | tmpdir(),
12 | `test-prompts-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
13 | );
14 | await fs.mkdir(tempDir, { recursive: true });
15 |
16 | // Create a test project structure
17 | await fs.writeFile(
18 | join(tempDir, "package.json"),
19 | JSON.stringify({
20 | name: "test-project",
21 | version: "1.0.0",
22 | dependencies: { react: "^18.0.0" },
23 | scripts: { test: "jest", build: "webpack" },
24 | }),
25 | );
26 | await fs.writeFile(
27 | join(tempDir, "README.md"),
28 | "# Test Project\n\nThis is a test project.",
29 | );
30 | await fs.mkdir(join(tempDir, "src"));
31 | await fs.writeFile(join(tempDir, "src/index.js"), 'console.log("hello");');
32 | await fs.mkdir(join(tempDir, "tests"));
33 | await fs.writeFile(
34 | join(tempDir, "tests/index.test.js"),
35 | 'test("basic", () => {});',
36 | );
37 | });
38 |
39 | afterEach(async () => {
40 | try {
41 | await fs.rm(tempDir, { recursive: true });
42 | } catch {
43 | // Ignore cleanup errors
44 | }
45 | });
46 |
47 | describe("analyze-and-recommend prompt", () => {
48 | it("should generate comprehensive analysis and recommendation prompt", async () => {
49 | const messages = await generateTechnicalWriterPrompts(
50 | "analyze-and-recommend",
51 | tempDir,
52 | {
53 | analysis_depth: "standard",
54 | preferences: "performance and ease of use",
55 | },
56 | );
57 |
58 | expect(messages).toHaveLength(1);
59 | expect(messages[0]).toHaveProperty("role", "user");
60 | expect(messages[0]).toHaveProperty("content");
61 | expect(messages[0].content).toHaveProperty("type", "text");
62 | expect(messages[0].content.text).toContain(
63 | "Execute a complete repository analysis",
64 | );
65 | expect(messages[0].content.text).toContain("SSG recommendation workflow");
66 | expect(messages[0].content.text).toContain("Analysis Depth: standard");
67 | expect(messages[0].content.text).toContain(
68 | "Preferences: performance and ease of use",
69 | );
70 | expect(messages[0].content.text).toContain("Repository Analysis");
71 | expect(messages[0].content.text).toContain("Implementation Guidance");
72 | expect(messages[0].content.text).toContain("Best Practices");
73 | });
74 |
75 | it("should use default values when optional parameters are not provided", async () => {
76 | const messages = await generateTechnicalWriterPrompts(
77 | "analyze-and-recommend",
78 | tempDir,
79 | {},
80 | );
81 |
82 | expect(messages[0].content.text).toContain("Analysis Depth: standard");
83 | expect(messages[0].content.text).toContain(
84 | "balanced approach with good community support",
85 | );
86 | });
87 |
88 | it("should include project context information", async () => {
89 | const messages = await generateTechnicalWriterPrompts(
90 | "analyze-and-recommend",
91 | tempDir,
92 | {
93 | analysis_depth: "deep",
94 | },
95 | );
96 |
97 | expect(messages[0].content.text).toContain("Type: node_application");
98 | expect(messages[0].content.text).toContain("Has Tests: true");
99 | expect(messages[0].content.text).toContain("Package Manager: npm");
100 | });
101 | });
102 |
103 | describe("setup-documentation prompt", () => {
104 | it("should generate comprehensive documentation setup prompt", async () => {
105 | const messages = await generateTechnicalWriterPrompts(
106 | "setup-documentation",
107 | tempDir,
108 | {
109 | ssg_type: "docusaurus",
110 | include_examples: true,
111 | },
112 | );
113 |
114 | expect(messages).toHaveLength(1);
115 | expect(messages[0]).toHaveProperty("role", "user");
116 | expect(messages[0].content.text).toContain(
117 | "Create a comprehensive documentation structure",
118 | );
119 | expect(messages[0].content.text).toContain("SSG Type: docusaurus");
120 | expect(messages[0].content.text).toContain("Include Examples: true");
121 | expect(messages[0].content.text).toContain(
122 | "Diataxis Framework Implementation",
123 | );
124 | expect(messages[0].content.text).toContain(
125 | "Tutorials: Learning-oriented content",
126 | );
127 | expect(messages[0].content.text).toContain(
128 | "How-to Guides: Problem-solving content",
129 | );
130 | expect(messages[0].content.text).toContain(
131 | "Reference: Information-oriented content",
132 | );
133 | expect(messages[0].content.text).toContain(
134 | "Explanations: Understanding-oriented content",
135 | );
136 | expect(messages[0].content.text).toContain("Configuration Setup");
137 | expect(messages[0].content.text).toContain("GitHub Pages deployment");
138 | expect(messages[0].content.text).toContain("with examples");
139 | });
140 |
141 | it("should handle minimal configuration", async () => {
142 | const messages = await generateTechnicalWriterPrompts(
143 | "setup-documentation",
144 | tempDir,
145 | {
146 | include_examples: false,
147 | },
148 | );
149 |
150 | expect(messages[0].content.text).toContain(
151 | "SSG Type: recommended based on project analysis",
152 | );
153 | expect(messages[0].content.text).toContain("Include Examples: false");
154 | expect(messages[0].content.text).toContain("templates");
155 | expect(messages[0].content.text).not.toContain("with examples");
156 | });
157 |
158 | it("should include current documentation gaps", async () => {
159 | const messages = await generateTechnicalWriterPrompts(
160 | "setup-documentation",
161 | tempDir,
162 | {},
163 | );
164 |
165 | expect(messages[0].content.text).toContain("Current Documentation Gaps:");
166 | expect(messages[0].content.text).toContain("Development Integration");
167 | expect(messages[0].content.text).toContain(
168 | "production-ready documentation system",
169 | );
170 | });
171 | });
172 |
173 | describe("troubleshoot-deployment prompt", () => {
174 | it("should generate comprehensive troubleshooting prompt", async () => {
175 | const messages = await generateTechnicalWriterPrompts(
176 | "troubleshoot-deployment",
177 | tempDir,
178 | {
179 | repository: "owner/repo",
180 | deployment_url: "https://owner.github.io/repo",
181 | issue_description: "build failing on GitHub Actions",
182 | },
183 | );
184 |
185 | expect(messages).toHaveLength(1);
186 | expect(messages[0]).toHaveProperty("role", "user");
187 | expect(messages[0].content.text).toContain(
188 | "Diagnose and fix GitHub Pages deployment issues",
189 | );
190 | expect(messages[0].content.text).toContain("Repository: owner/repo");
191 | expect(messages[0].content.text).toContain(
192 | "Expected URL: https://owner.github.io/repo",
193 | );
194 | expect(messages[0].content.text).toContain(
195 | "Issue Description: build failing on GitHub Actions",
196 | );
197 | expect(messages[0].content.text).toContain("Troubleshooting Checklist");
198 | expect(messages[0].content.text).toContain("Repository Settings");
199 | expect(messages[0].content.text).toContain("Build Configuration");
200 | expect(messages[0].content.text).toContain("Content Issues");
201 | expect(messages[0].content.text).toContain("Deployment Workflow");
202 | expect(messages[0].content.text).toContain("Performance and Security");
203 | expect(messages[0].content.text).toContain("Root cause analysis");
204 | expect(messages[0].content.text).toContain("Systematic Testing");
205 | });
206 |
207 | it("should use default values for optional parameters", async () => {
208 | const messages = await generateTechnicalWriterPrompts(
209 | "troubleshoot-deployment",
210 | tempDir,
211 | {
212 | repository: "test/repo",
213 | },
214 | );
215 |
216 | expect(messages[0].content.text).toContain(
217 | "Expected URL: GitHub Pages URL",
218 | );
219 | expect(messages[0].content.text).toContain(
220 | "Issue Description: deployment not working as expected",
221 | );
222 | });
223 |
224 | it("should include project context for troubleshooting", async () => {
225 | const messages = await generateTechnicalWriterPrompts(
226 | "troubleshoot-deployment",
227 | tempDir,
228 | {
229 | repository: "test/repo",
230 | },
231 | );
232 |
233 | expect(messages[0].content.text).toContain("Project Context");
234 | expect(messages[0].content.text).toContain("Type: node_application");
235 | expect(messages[0].content.text).toContain("Diagnostic Approach");
236 | expect(messages[0].content.text).toContain("Systematic Testing");
237 | });
238 | });
239 |
240 | describe("Error handling", () => {
241 | it("should throw error for unknown prompt type", async () => {
242 | await expect(
243 | generateTechnicalWriterPrompts("unknown-prompt-type", tempDir, {}),
244 | ).rejects.toThrow("Unknown prompt type: unknown-prompt-type");
245 | });
246 |
247 | it("should handle missing project directory gracefully", async () => {
248 | const nonExistentDir = join(tmpdir(), "non-existent-dir");
249 |
250 | // Should not throw, but may have reduced context
251 | const messages = await generateTechnicalWriterPrompts(
252 | "analyze-and-recommend",
253 | nonExistentDir,
254 | {},
255 | );
256 |
257 | expect(messages).toHaveLength(1);
258 | expect(messages[0].content.text).toContain("repository analysis");
259 | });
260 |
261 | it("should handle malformed package.json gracefully", async () => {
262 | await fs.writeFile(join(tempDir, "package.json"), "invalid json content");
263 |
264 | const messages = await generateTechnicalWriterPrompts(
265 | "setup-documentation",
266 | tempDir,
267 | {},
268 | );
269 |
270 | expect(messages).toHaveLength(1);
271 | expect(messages[0].content.text).toContain("documentation structure");
272 | });
273 | });
274 |
275 | describe("Prompt content validation", () => {
276 | it("should generate prompts with consistent structure", async () => {
277 | const promptTypes = [
278 | "analyze-and-recommend",
279 | "setup-documentation",
280 | "troubleshoot-deployment",
281 | ];
282 |
283 | for (const promptType of promptTypes) {
284 | const args =
285 | promptType === "troubleshoot-deployment"
286 | ? { repository: "test/repo" }
287 | : {};
288 |
289 | const messages = await generateTechnicalWriterPrompts(
290 | promptType,
291 | tempDir,
292 | args,
293 | );
294 |
295 | expect(messages).toHaveLength(1);
296 | expect(messages[0]).toHaveProperty("role", "user");
297 | expect(messages[0]).toHaveProperty("content");
298 | expect(messages[0].content).toHaveProperty("type", "text");
299 | expect(messages[0].content.text).toBeTruthy();
300 | expect(messages[0].content.text.length).toBeGreaterThan(100);
301 | }
302 | });
303 |
304 | it("should include project-specific information in all prompts", async () => {
305 | const promptTypes = ["analyze-and-recommend", "setup-documentation"];
306 |
307 | for (const promptType of promptTypes) {
308 | const messages = await generateTechnicalWriterPrompts(
309 | promptType,
310 | tempDir,
311 | {},
312 | );
313 |
314 | expect(messages[0].content.text).toContain("Project Context");
315 | expect(messages[0].content.text).toContain("Type:");
316 | expect(messages[0].content.text).toContain("Languages:");
317 | }
318 | });
319 | });
320 | });
321 |
```
--------------------------------------------------------------------------------
/docs/how-to/local-testing.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.952Z"
4 | last_validated: "2025-11-20T00:46:21.952Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # Local Documentation Testing
10 |
11 | This guide shows how to test your documentation locally before deploying to GitHub Pages using containerized environments that don't affect your system.
12 |
13 | ## 🎯 Best Practice: Test Build Before Pushing
14 |
15 | **Always test your documentation build locally before pushing to git** to ensure GitHub Actions will build successfully:
16 |
17 | ### Option 1: Test Node.js Build (Recommended - Matches GitHub Actions)
18 |
19 | ```bash
20 | # Test the same build process GitHub Actions uses
21 | cd docs
22 | npm ci
23 | npm run build
24 | ```
25 |
26 | This uses the exact same process as GitHub Actions and catches build issues early.
27 |
28 | ### Option 2: Test Docker Build (Optional - For Container Validation)
29 |
30 | ```bash
31 | # Quick Docker validation (if Dockerfile is configured)
32 | docker build -f Dockerfile.docs -t documcp-docs-test . && echo "✅ Docker build ready"
33 | ```
34 |
35 | **Note**: Docker testing validates containerized environments, but GitHub Actions uses Node.js directly, so Option 1 is more reliable for CI validation.
36 |
37 | ## Quick Start - Containerized Testing
38 |
39 | DocuMCP automatically generates a containerized testing environment that requires only Docker or Podman:
40 |
41 | ```bash
42 | # Run the containerized testing script
43 | ./test-docs-local.sh
44 | ```
45 |
46 | This script will:
47 |
48 | 1. **Detect** your container runtime (Podman or Docker)
49 | 2. **Build** a documentation container
50 | 3. **Check** for broken links in your documentation
51 | 4. **Serve** the documentation at http://localhost:3001
52 |
53 | ### Prerequisites
54 |
55 | You need either Docker or Podman installed:
56 |
57 | **Option 1: Podman (rootless, more secure)**
58 |
59 | ```bash
60 | # macOS
61 | brew install podman
62 |
63 | # Ubuntu/Debian
64 | sudo apt-get install podman
65 |
66 | # RHEL/CentOS/Fedora
67 | sudo dnf install podman
68 | ```
69 |
70 | **Option 2: Docker**
71 |
72 | ```bash
73 | # macOS
74 | brew install docker
75 |
76 | # Or download from: https://docs.docker.com/get-docker/
77 | ```
78 |
79 | ## Container-Based Testing Methods
80 |
81 | ### Method 1: Using the Generated Script (Recommended)
82 |
83 | ```bash
84 | # Simple one-command testing
85 | ./test-docs-local.sh
86 | ```
87 |
88 | ### Method 2: Using Docker Compose
89 |
90 | ```bash
91 | # Build and run with Docker Compose
92 | docker-compose -f docker-compose.docs.yml up --build
93 |
94 | # Or with Podman Compose
95 | podman-compose -f docker-compose.docs.yml up --build
96 | ```
97 |
98 | ### Method 3: Manual Container Commands
99 |
100 | ```bash
101 | # Build the container
102 | docker build -f Dockerfile.docs -t documcp-docs .
103 | # or: podman build -f Dockerfile.docs -t documcp-docs .
104 |
105 | # Run the container
106 | docker run --rm -p 3001:3001 documcp-docs
107 | # or: podman run --rm -p 3001:3001 documcp-docs
108 | ```
109 |
110 | ### Method 4: Pre-Push Docker Validation
111 |
112 | **Recommended workflow before pushing to git:**
113 |
114 | ```bash
115 | # 1. Test Docker build (validates CI will work)
116 | docker build -f Dockerfile.docs -t documcp-docs-test .
117 |
118 | # 2. If successful, test locally
119 | docker run --rm -p 3001:3001 documcp-docs-test
120 |
121 | # 3. Verify at http://localhost:3001, then push to git
122 | ```
123 |
124 | This ensures your Docker build matches what GitHub Actions will use.
125 |
126 | ### Method 5: Legacy Local Installation (Not Recommended)
127 |
128 | If you prefer to install dependencies locally (affects your system):
129 |
130 | ```bash
131 | cd docs
132 | npm install
133 | npm run build
134 | npm run serve
135 | ```
136 |
137 | ## Pre-Push Checklist
138 |
139 | Before pushing documentation changes to git, ensure:
140 |
141 | - [ ] **Node.js build succeeds**: `cd docs && npm ci && npm run build` (matches GitHub Actions)
142 | - [ ] **Local preview works**: Documentation serves correctly at http://localhost:3001
143 | - [ ] **No broken links**: Run link checker (included in test script)
144 | - [ ] **Build output valid**: Check `docs/build` directory structure
145 | - [ ] **No console errors**: Check browser console for JavaScript errors
146 |
147 | **Quick pre-push validation command (Node.js - Recommended):**
148 |
149 | ```bash
150 | cd docs && npm ci && npm run build && echo "✅ Ready to push!"
151 | ```
152 |
153 | **Alternative Docker validation (if Dockerfile is configured):**
154 |
155 | ```bash
156 | docker build -f Dockerfile.docs -t documcp-docs-test . && \
157 | docker run --rm -d -p 3001:3001 --name docs-test documcp-docs-test && \
158 | sleep 5 && curl -f http://localhost:3001 > /dev/null && \
159 | docker stop docs-test && echo "✅ Ready to push!"
160 | ```
161 |
162 | **Note**: GitHub Actions uses Node.js directly (not Docker), so testing with `npm run build` is the most reliable way to validate CI will succeed.
163 |
164 | ## Verification Checklist
165 |
166 | ### ✅ Content Verification
167 |
168 | - [ ] All pages load without errors
169 | - [ ] Navigation works correctly
170 | - [ ] Links between pages function properly
171 | - [ ] Search functionality works (if enabled)
172 | - [ ] Code blocks render correctly with syntax highlighting
173 | - [ ] Images and assets load properly
174 |
175 | ### ✅ Structure Verification
176 |
177 | - [ ] Sidebar navigation reflects your documentation structure
178 | - [ ] Categories and sections are properly organized
179 | - [ ] Page titles and descriptions are accurate
180 | - [ ] Breadcrumb navigation works
181 | - [ ] Footer links are functional
182 |
183 | ### ✅ Content Quality
184 |
185 | - [ ] No broken internal links
186 | - [ ] No broken external links
187 | - [ ] Code examples are up-to-date
188 | - [ ] Screenshots are current and clear
189 | - [ ] All content follows Diataxis framework principles
190 |
191 | ### ✅ Performance Testing
192 |
193 | - [ ] Pages load quickly (< 3 seconds)
194 | - [ ] Search is responsive
195 | - [ ] No console errors in browser developer tools
196 | - [ ] Mobile responsiveness works correctly
197 |
198 | ## Troubleshooting Common Issues
199 |
200 | ### Container Build Failures
201 |
202 | **Problem**: Container build fails
203 |
204 | **Solutions**:
205 |
206 | ```bash
207 | # Clean up any existing containers and images
208 | docker system prune -f
209 | # or: podman system prune -f
210 |
211 | # Rebuild from scratch
212 | docker build --no-cache -f Dockerfile.docs -t documcp-docs .
213 | # or: podman build --no-cache -f Dockerfile.docs -t documcp-docs .
214 |
215 | # Check for syntax errors in markdown files
216 | find docs -name "*.md" -exec npx markdownlint {} \;
217 | ```
218 |
219 | ### Container Runtime Issues
220 |
221 | **Problem**: "Neither Podman nor Docker found"
222 |
223 | **Solutions**:
224 |
225 | ```bash
226 | # Check if Docker/Podman is installed and running
227 | docker --version
228 | podman --version
229 |
230 | # On macOS, ensure Docker Desktop is running
231 | # On Linux, ensure Docker daemon is started:
232 | sudo systemctl start docker
233 |
234 | # For Podman on macOS, start the machine:
235 | podman machine start
236 | ```
237 |
238 | ### Broken Links
239 |
240 | **Problem**: Links between documentation pages don't work
241 |
242 | **Solutions**:
243 |
244 | - Check that file paths in your markdown match actual file locations
245 | - Ensure relative links use correct syntax (e.g., `[text](../reference/configuration.md)`)
246 | - Verify that `sidebars.js` references match actual file names
247 |
248 | ### Missing Pages
249 |
250 | **Problem**: Some documentation pages don't appear in navigation
251 |
252 | **Solutions**:
253 |
254 | - Update `docs-site/sidebars.js` to include new pages
255 | - Ensure files are in the correct directory structure
256 | - Check that frontmatter is properly formatted
257 |
258 | ### Styling Issues
259 |
260 | **Problem**: Documentation doesn't look right
261 |
262 | **Solutions**:
263 |
264 | - Check `docs-site/src/css/custom.css` for custom styles
265 | - Verify Docusaurus theme configuration
266 | - Clear browser cache and reload
267 |
268 | ## Link Checking
269 |
270 | ### Automated Link Checking
271 |
272 | DocuMCP provides built-in link checking:
273 |
274 | ```bash
275 | # Check all links
276 | npm run docs:check-links
277 |
278 | # Check only external links
279 | npm run docs:check-links:external
280 |
281 | # Check only internal links
282 | npm run docs:check-links:internal
283 | ```
284 |
285 | ### Manual Link Checking
286 |
287 | Use markdown-link-check for comprehensive link validation:
288 |
289 | ```bash
290 | # Install globally
291 | npm install -g markdown-link-check
292 |
293 | # Check specific file
294 | markdown-link-check docs/index.md
295 |
296 | # Check all markdown files
297 | find docs -name "*.md" -exec markdown-link-check {} \;
298 | ```
299 |
300 | ## Container Configuration Testing
301 |
302 | ### Verify Container Configuration
303 |
304 | ```bash
305 | # Test container health
306 | docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
307 | # or: podman ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
308 |
309 | # Check container logs
310 | docker logs documcp-docs-test
311 | # or: podman logs documcp-docs-test
312 |
313 | # Execute commands inside running container
314 | docker exec -it documcp-docs-test sh
315 | # or: podman exec -it documcp-docs-test sh
316 | ```
317 |
318 | ### Test Different Container Environments
319 |
320 | ```bash
321 | # Test production build in container
322 | docker run --rm -e NODE_ENV=production -p 3001:3001 documcp-docs
323 |
324 | # Interactive debugging mode
325 | docker run --rm -it --entrypoint sh documcp-docs
326 | # Inside container: cd docs-site && npm run build --verbose
327 | ```
328 |
329 | ## Deployment Preview
330 |
331 | Before deploying to GitHub Pages, test with production settings:
332 |
333 | ```bash
334 | # Build with production configuration
335 | npm run build
336 |
337 | # Serve the production build locally
338 | npm run serve
339 | ```
340 |
341 | This simulates exactly what GitHub Pages will serve.
342 |
343 | ## Integration with Development Workflow
344 |
345 | ### Pre-commit Testing
346 |
347 | Add documentation testing to your git hooks:
348 |
349 | ```bash
350 | # .husky/pre-commit
351 | #!/usr/bin/env sh
352 | . "$(dirname -- "$0")/_/husky.sh"
353 |
354 | # Run documentation tests
355 | ./test-docs-local.sh --build-only
356 |
357 | # Run your regular tests
358 | npm test
359 | ```
360 |
361 | ### CI/CD Integration
362 |
363 | Add documentation testing to your GitHub Actions:
364 |
365 | ```yaml
366 | # .github/workflows/docs-test.yml
367 | name: Documentation Tests
368 |
369 | on:
370 | pull_request:
371 | paths:
372 | - "docs/**"
373 | - "docs-site/**"
374 |
375 | jobs:
376 | test-docs:
377 | runs-on: ubuntu-latest
378 | steps:
379 | - uses: actions/checkout@v4
380 |
381 | - name: Setup Node.js
382 | uses: actions/setup-node@v4
383 | with:
384 | node-version: "20"
385 | cache: "npm"
386 | cache-dependency-path: "docs-site/package-lock.json"
387 |
388 | - name: Test documentation build
389 | run: ./test-docs-local.sh --build-only
390 | ```
391 |
392 | ## Advanced Testing
393 |
394 | ### Performance Testing
395 |
396 | ```bash
397 | # Install lighthouse CLI
398 | npm install -g lighthouse
399 |
400 | # Test performance of local documentation
401 | lighthouse http://localhost:3001 --output=json --output-path=./lighthouse-report.json
402 |
403 | # Check specific performance metrics
404 | lighthouse http://localhost:3001 --only-categories=performance
405 | ```
406 |
407 | ### Accessibility Testing
408 |
409 | ```bash
410 | # Test accessibility
411 | lighthouse http://localhost:3001 --only-categories=accessibility
412 |
413 | # Use axe for detailed accessibility testing
414 | npm install -g axe-cli
415 | axe http://localhost:3001
416 | ```
417 |
418 | ### SEO Testing
419 |
420 | ```bash
421 | # Test SEO optimization
422 | lighthouse http://localhost:3001 --only-categories=seo
423 |
424 | # Check meta tags and structure
425 | curl -s http://localhost:3001 | grep -E "<title>|<meta"
426 | ```
427 |
428 | ## Automated Testing Script
429 |
430 | Create a comprehensive test script:
431 |
432 | ```bash
433 | #!/bin/bash
434 | # comprehensive-docs-test.sh
435 |
436 | echo "🧪 Running comprehensive documentation tests..."
437 |
438 | # Build test
439 | echo "📦 Testing build..."
440 | cd docs-site && npm run build
441 |
442 | # Link checking
443 | echo "🔗 Checking links..."
444 | cd .. && npm run docs:check-links:all
445 |
446 | # Performance test (if lighthouse is available)
447 | if command -v lighthouse &> /dev/null; then
448 | echo "⚡ Testing performance..."
449 | cd docs-site && npm run serve &
450 | SERVER_PID=$!
451 | sleep 5
452 | lighthouse http://localhost:3001 --quiet --only-categories=performance
453 | kill $SERVER_PID
454 | fi
455 |
456 | echo "✅ All tests completed!"
457 | ```
458 |
459 | ## Best Practices
460 |
461 | ### 1. Test Early and Often
462 |
463 | - Test after every significant documentation change
464 | - Include documentation testing in your regular development workflow
465 | - Set up automated testing in CI/CD pipelines
466 |
467 | ### 2. Test Different Scenarios
468 |
469 | - Test with different screen sizes and devices
470 | - Test with JavaScript disabled
471 | - Test with slow internet connections
472 |
473 | ### 3. Monitor Performance
474 |
475 | - Keep an eye on build times
476 | - Monitor page load speeds
477 | - Check for large images or files that slow down the site
478 |
479 | ### 4. Validate Content Quality
480 |
481 | - Use spell checkers and grammar tools
482 | - Ensure code examples work and are current
483 | - Verify that external links are still valid
484 |
485 | By following this guide, you can ensure your documentation works perfectly before deploying to GitHub Pages, providing a better experience for your users and avoiding broken deployments.
486 |
```
--------------------------------------------------------------------------------
/MCP_PHASE2_IMPLEMENTATION.md:
--------------------------------------------------------------------------------
```markdown
1 | # MCP Phase 2 Implementation: Roots Permission System
2 |
3 | **Status:** ✅ Complete
4 | **Implementation Date:** October 9, 2025
5 | **Build Status:** ✅ Successful
6 | **Test Status:** ✅ 127/127 tests passing
7 |
8 | ## Overview
9 |
10 | Phase 2 implements the **Roots Permission System** for DocuMCP, adding user-granted file/folder access control following MCP best practices. This enhances security by restricting server operations to explicitly allowed directories and improves UX by enabling autonomous file discovery.
11 |
12 | ## Key Features Implemented
13 |
14 | ### 1. **Roots Capability Declaration**
15 |
16 | - Added `roots.listChanged: true` to server capabilities
17 | - Signals to MCP clients that the server supports roots management
18 | - Enables clients to query allowed directories via `ListRoots` request
19 |
20 | ### 2. **CLI Argument Parsing**
21 |
22 | - Added `--root` flag support for specifying allowed directories
23 | - Supports multiple roots: `--root /path/one --root /path/two`
24 | - Automatic `~` expansion for home directory paths
25 | - Defaults to current working directory if no roots specified
26 |
27 | ### 3. **ListRoots Handler**
28 |
29 | - Implements MCP `ListRootsRequest` protocol
30 | - Returns all allowed roots as file:// URIs
31 | - Provides friendly names using `path.basename()`
32 | - Example response:
33 | ```json
34 | {
35 | "roots": [
36 | { "uri": "file:///Users/user/projects", "name": "projects" },
37 | { "uri": "file:///Users/user/workspace", "name": "workspace" }
38 | ]
39 | }
40 | ```
41 |
42 | ### 4. **Permission Checker Utility**
43 |
44 | - **Location:** `src/utils/permission-checker.ts`
45 | - **Functions:**
46 | - `isPathAllowed(requestedPath, allowedRoots)` - Validates path access
47 | - `getPermissionDeniedMessage(requestedPath, allowedRoots)` - User-friendly error messages
48 | - **Security:** Uses `path.relative()` to detect directory traversal attempts
49 | - **Algorithm:** Resolves paths to absolute, checks if relative path doesn't start with `..`
50 |
51 | ### 5. **read_directory Tool**
52 |
53 | - New tool for discovering files and directories within allowed roots
54 | - Enables autonomous exploration without requiring full absolute paths from users
55 | - Returns structured data:
56 | ```typescript
57 | {
58 | path: string,
59 | files: string[],
60 | directories: string[],
61 | totalFiles: number,
62 | totalDirectories: number
63 | }
64 | ```
65 | - Enforces permission checks before listing
66 |
67 | ### 6. **Permission Enforcement in File-Based Tools**
68 |
69 | - Added permission checks to 5 critical tools:
70 | - `analyze_repository`
71 | - `setup_structure`
72 | - `populate_diataxis_content`
73 | - `validate_diataxis_content`
74 | - `check_documentation_links`
75 | - Returns structured `PERMISSION_DENIED` errors with resolution guidance
76 | - Example error:
77 | ```json
78 | {
79 | "success": false,
80 | "error": {
81 | "code": "PERMISSION_DENIED",
82 | "message": "Access denied: Path \"/etc/passwd\" is outside allowed roots. Allowed roots: /Users/user/project",
83 | "resolution": "Request access to this directory by starting the server with --root argument, or use a path within allowed roots."
84 | }
85 | }
86 | ```
87 |
88 | ## Files Modified
89 |
90 | ### 1. `src/index.ts` (+120 lines)
91 |
92 | **Changes:**
93 |
94 | - Added default `path` import and permission checker imports (lines 17, 44-48)
95 | - CLI argument parsing for `--root` flags (lines 69-84)
96 | - Added roots capability to server (lines 101-103)
97 | - Added `read_directory` tool definition (lines 706-717)
98 | - Implemented `ListRoots` handler (lines 1061-1067)
99 | - Implemented `read_directory` handler (lines 1874-1938)
100 | - Added permission checks to 5 file-based tools (multiple sections)
101 |
102 | ### 2. `src/utils/permission-checker.ts` (NEW +49 lines)
103 |
104 | **Functions:**
105 |
106 | - `isPathAllowed()` - Core permission validation logic
107 | - `getPermissionDeniedMessage()` - Standardized error messaging
108 | - Comprehensive JSDoc documentation with examples
109 |
110 | ## Technical Implementation Details
111 |
112 | ### CLI Argument Parsing
113 |
114 | ```typescript
115 | // Parse allowed roots from command line arguments
116 | const allowedRoots: string[] = [];
117 | process.argv.forEach((arg, index) => {
118 | if (arg === "--root" && process.argv[index + 1]) {
119 | const rootPath = process.argv[index + 1];
120 | // Resolve to absolute path and expand ~ for home directory
121 | const expandedPath = rootPath.startsWith("~")
122 | ? join(
123 | process.env.HOME || process.env.USERPROFILE || "",
124 | rootPath.slice(1),
125 | )
126 | : rootPath;
127 | allowedRoots.push(path.resolve(expandedPath));
128 | }
129 | });
130 |
131 | // If no roots specified, allow current working directory by default
132 | if (allowedRoots.length === 0) {
133 | allowedRoots.push(process.cwd());
134 | }
135 | ```
136 |
137 | ### Permission Check Pattern
138 |
139 | ```typescript
140 | // Check if path is allowed
141 | const repoPath = (args as any)?.path;
142 | if (repoPath && !isPathAllowed(repoPath, allowedRoots)) {
143 | return formatMCPResponse({
144 | success: false,
145 | error: {
146 | code: "PERMISSION_DENIED",
147 | message: getPermissionDeniedMessage(repoPath, allowedRoots),
148 | resolution:
149 | "Request access to this directory by starting the server with --root argument, or use a path within allowed roots.",
150 | },
151 | metadata: {
152 | toolVersion: packageJson.version,
153 | executionTime: 0,
154 | timestamp: new Date().toISOString(),
155 | },
156 | });
157 | }
158 | ```
159 |
160 | ### Security Algorithm
161 |
162 | The `isPathAllowed()` function uses `path.relative()` to detect directory traversal:
163 |
164 | 1. Resolve requested path to absolute path
165 | 2. For each allowed root:
166 | - Resolve root to absolute path
167 | - Calculate relative path from root to requested path
168 | - If relative path doesn't start with `..` and isn't absolute, access is granted
169 | 3. Return `false` if no roots allow access
170 |
171 | This prevents attacks like:
172 |
173 | - `/project/../../../etc/passwd` - blocked (relative path starts with `..`)
174 | - `/etc/passwd` when root is `/project` - blocked (not within root)
175 |
176 | ## Testing Results
177 |
178 | ### Build Status
179 |
180 | ✅ TypeScript compilation successful with no errors
181 |
182 | ### Test Suite
183 |
184 | ✅ **127/127 tests passing (100%)**
185 |
186 | **Key Test Coverage:**
187 |
188 | - Tool validation and error handling
189 | - Memory system integration
190 | - Knowledge graph operations
191 | - Functional end-to-end workflows
192 | - Integration tests
193 | - Edge case handling
194 |
195 | **No Regressions:**
196 |
197 | - All existing tests continue to pass
198 | - No breaking changes to tool APIs
199 | - Backward compatible implementation
200 |
201 | ## Security Improvements
202 |
203 | ### Before Phase 2
204 |
205 | - ❌ Server could access any file on the system
206 | - ❌ No permission boundaries
207 | - ❌ Users must provide full absolute paths
208 | - ❌ No visibility into allowed directories
209 |
210 | ### After Phase 2
211 |
212 | - ✅ Access restricted to explicitly allowed roots
213 | - ✅ Directory traversal attacks prevented
214 | - ✅ Users can use relative paths within roots
215 | - ✅ Clients can query allowed directories via ListRoots
216 | - ✅ Clear, actionable error messages when access denied
217 | - ✅ Default to CWD for safe local development
218 |
219 | ## User Experience Improvements
220 |
221 | ### Discovery Without Full Paths
222 |
223 | Users can now explore repositories without knowing exact file locations:
224 |
225 | ```
226 | User: "Analyze my project"
227 | Claude: Uses read_directory to discover project structure
228 | Claude: Finds package.json, analyzes dependencies, generates docs
229 | ```
230 |
231 | ### Clear Error Messages
232 |
233 | When access is denied, users receive helpful guidance:
234 |
235 | ```
236 | Access denied: Path "/private/data" is outside allowed roots.
237 | Allowed roots: /Users/user/projects
238 | Resolution: Request access to this directory by starting the server
239 | with --root argument, or use a path within allowed roots.
240 | ```
241 |
242 | ### Flexible Configuration
243 |
244 | Server can be started with multiple allowed roots:
245 |
246 | ```bash
247 | # Single root
248 | npx documcp --root /Users/user/projects
249 |
250 | # Multiple roots
251 | npx documcp --root /Users/user/projects --root /Users/user/workspace
252 |
253 | # Default (current directory)
254 | npx documcp
255 | ```
256 |
257 | ## Usage Examples
258 |
259 | ### Starting Server with Roots
260 |
261 | ```bash
262 | # Allow access to specific project
263 | npx documcp --root /Users/user/my-project
264 |
265 | # Allow access to multiple directories
266 | npx documcp --root ~/projects --root ~/workspace
267 |
268 | # Use home directory expansion
269 | npx documcp --root ~/code
270 |
271 | # Default to current directory
272 | npx documcp
273 | ```
274 |
275 | ### read_directory Tool Usage
276 |
277 | ```typescript
278 | // Discover files in allowed root
279 | {
280 | "name": "read_directory",
281 | "arguments": {
282 | "path": "/Users/user/projects/my-app"
283 | }
284 | }
285 |
286 | // Response
287 | {
288 | "success": true,
289 | "data": {
290 | "path": "/Users/user/projects/my-app",
291 | "files": ["package.json", "README.md", "tsconfig.json"],
292 | "directories": ["src", "tests", "docs"],
293 | "totalFiles": 3,
294 | "totalDirectories": 3
295 | }
296 | }
297 | ```
298 |
299 | ### ListRoots Request
300 |
301 | ```typescript
302 | // Request
303 | {
304 | "method": "roots/list"
305 | }
306 |
307 | // Response
308 | {
309 | "roots": [
310 | {"uri": "file:///Users/user/projects", "name": "projects"}
311 | ]
312 | }
313 | ```
314 |
315 | ## Alignment with MCP Best Practices
316 |
317 | ✅ **Roots Protocol Compliance**
318 |
319 | - Implements `roots.listChanged` capability
320 | - Provides `ListRoots` handler
321 | - Uses standardized file:// URI format
322 |
323 | ✅ **Security First**
324 |
325 | - Path validation using battle-tested algorithms
326 | - Directory traversal prevention
327 | - Principle of least privilege (explicit allow-list)
328 |
329 | ✅ **User-Centric Design**
330 |
331 | - Clear error messages with actionable resolutions
332 | - Flexible CLI configuration
333 | - Safe defaults (CWD)
334 |
335 | ✅ **Autonomous Operation**
336 |
337 | - `read_directory` enables file discovery
338 | - No need for users to specify full paths
339 | - Tools can explore within allowed roots
340 |
341 | ## Integration with Phase 1
342 |
343 | Phase 2 builds on Phase 1's foundation:
344 |
345 | **Phase 1 (Progress & Logging):**
346 |
347 | - Added visibility into long-running operations
348 | - Tools report progress at logical checkpoints
349 |
350 | **Phase 2 (Roots & Permissions):**
351 |
352 | - Adds security boundaries and permission checks
353 | - Progress notifications can now include permission validation steps
354 | - Example: "Validating path permissions..." → "Analyzing repository..."
355 |
356 | **Combined Benefits:**
357 |
358 | - Users see both progress AND permission enforcement
359 | - Clear feedback when operations are blocked by permissions
360 | - Transparent, secure, and user-friendly experience
361 |
362 | ## Performance Impact
363 |
364 | ✅ **Negligible Overhead**
365 |
366 | - Permission checks: O(n) where n = number of allowed roots (typically 1-5)
367 | - `path.resolve()` and `path.relative()` are highly optimized native operations
368 | - No measurable impact on tool execution time
369 | - All tests pass with no performance degradation
370 |
371 | ## Troubleshooting Guide
372 |
373 | ### Issue: "Access denied" errors
374 |
375 | **Cause:** Requested path is outside allowed roots
376 | **Solution:** Start server with `--root` flag for the desired directory
377 |
378 | ### Issue: ListRoots returns empty array
379 |
380 | **Cause:** No roots specified and CWD not writable
381 | **Solution:** Explicitly specify roots with `--root` flag
382 |
383 | ### Issue: ~ expansion not working
384 |
385 | **Cause:** Server doesn't have HOME or USERPROFILE environment variable
386 | **Solution:** Use absolute paths instead of ~ shorthand
387 |
388 | ## Next Steps (Phase 3)
389 |
390 | Phase 3 will implement:
391 |
392 | 1. **HTTP Transport** - Remote server deployment with HTTP/HTTPS
393 | 2. **Transport Selection** - Environment-based stdio vs. HTTP choice
394 | 3. **Sampling Support** - LLM-powered content generation for creative tasks
395 | 4. **Configuration Management** - Environment variables for all settings
396 |
397 | ## Conclusion
398 |
399 | Phase 2 successfully implements the Roots Permission System, bringing DocuMCP into full compliance with MCP security best practices. The implementation:
400 |
401 | - ✅ Enforces strict access control without compromising usability
402 | - ✅ Enables autonomous file discovery within allowed roots
403 | - ✅ Provides clear, actionable feedback for permission violations
404 | - ✅ Maintains 100% backward compatibility
405 | - ✅ Passes all 127 tests with no regressions
406 | - ✅ Adds minimal performance overhead
407 | - ✅ Follows MCP protocol standards
408 |
409 | **Total Changes:**
410 |
411 | - 1 new file created (`permission-checker.ts`)
412 | - 1 existing file modified (`index.ts`)
413 | - 169 net lines added
414 | - 6 new capabilities added (roots, ListRoots, read_directory, 5 tool permission checks)
415 |
416 | **Quality Metrics:**
417 |
418 | - Build: ✅ Successful
419 | - Tests: ✅ 127/127 passing (100%)
420 | - Regressions: ✅ None
421 | - Performance: ✅ No measurable impact
422 | - Security: ✅ Significantly improved
423 |
```
--------------------------------------------------------------------------------
/docs/reference/prompt-templates.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.963Z"
4 | last_validated: "2025-11-20T00:46:21.963Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # Prompt Templates
10 |
11 | DocuMCP provides a comprehensive set of prompt templates to help you interact effectively with the system. These templates are designed to get optimal results from DocuMCP's AI-powered documentation tools.
12 |
13 | ## Quick Reference
14 |
15 | ### Complete Workflow Templates
16 |
17 | **Full Documentation Deployment:**
18 |
19 | ```
20 | analyze my repository, recommend the best static site generator, set up Diataxis documentation structure, and deploy to GitHub Pages
21 | ```
22 |
23 | **Documentation Audit:**
24 |
25 | ```
26 | analyze my existing documentation for gaps, validate content accuracy, and provide recommendations for improvement
27 | ```
28 |
29 | **Quick Setup:**
30 |
31 | ```
32 | analyze my [LANGUAGE] project and set up documentation with the most suitable static site generator
33 | ```
34 |
35 | ## Repository Analysis Templates
36 |
37 | ### Basic Analysis
38 |
39 | ```
40 | analyze my repository for documentation needs
41 | ```
42 |
43 | ### Specific Project Types
44 |
45 | ```
46 | analyze my TypeScript library for API documentation requirements
47 | analyze my Python package for comprehensive documentation needs
48 | analyze my React application for user guide documentation
49 | analyze my CLI tool for usage documentation
50 | ```
51 |
52 | ### Deep Analysis
53 |
54 | ```
55 | perform deep analysis of my repository including dependency analysis, complexity assessment, and team collaboration patterns
56 | ```
57 |
58 | ### Focused Analysis
59 |
60 | ```
61 | analyze my repository focusing on [SPECIFIC_AREA]
62 | # Examples:
63 | # - API documentation opportunities
64 | # - user onboarding needs
65 | # - developer experience gaps
66 | # - deployment documentation requirements
67 | ```
68 |
69 | ## SSG Recommendation Templates
70 |
71 | ### Basic Recommendation
72 |
73 | ```
74 | recommend the best static site generator for my project based on the analysis
75 | ```
76 |
77 | ### Preference-Based Recommendations
78 |
79 | ```
80 | recommend a static site generator for my project with preferences for [ECOSYSTEM] and [PRIORITY]
81 | # Ecosystem options: javascript, python, ruby, go, any
82 | # Priority options: simplicity, features, performance
83 | ```
84 |
85 | ### Comparison Requests
86 |
87 | ```
88 | compare static site generators for my [PROJECT_TYPE] with focus on [CRITERIA]
89 | # Project types: library, application, tool, documentation
90 | # Criteria: ease of use, customization, performance, community support
91 | ```
92 |
93 | ### Specific Requirements
94 |
95 | ```
96 | recommend SSG for my project that supports:
97 | - TypeScript integration
98 | - API documentation generation
99 | - Search functionality
100 | - Custom theming
101 | - Multi-language support
102 | ```
103 |
104 | ## Configuration Generation Templates
105 |
106 | ### Basic Configuration
107 |
108 | ```
109 | generate [SSG_NAME] configuration for my project
110 | # Examples:
111 | # - generate Docusaurus configuration for my project
112 | # - generate Hugo configuration for my project
113 | # - generate MkDocs configuration for my project
114 | ```
115 |
116 | ### Detailed Configuration
117 |
118 | ```
119 | generate comprehensive [SSG_NAME] configuration with:
120 | - GitHub integration
121 | - Custom domain setup
122 | - Analytics integration
123 | - SEO optimization
124 | - Performance optimizations
125 | ```
126 |
127 | ### Production-Ready Setup
128 |
129 | ```
130 | generate production-ready [SSG_NAME] configuration with security best practices and performance optimization
131 | ```
132 |
133 | ## Documentation Structure Templates
134 |
135 | ### Basic Structure
136 |
137 | ```
138 | set up Diataxis documentation structure for my project
139 | ```
140 |
141 | ### SSG-Specific Structure
142 |
143 | ```
144 | create [SSG_NAME] documentation structure following Diataxis principles with example content
145 | ```
146 |
147 | ### Content Population
148 |
149 | ```
150 | set up documentation structure and populate it with project-specific content based on my code analysis
151 | ```
152 |
153 | ### Advanced Structure
154 |
155 | ```
156 | create comprehensive documentation structure with:
157 | - Diataxis organization
158 | - Project-specific content
159 | - Code examples from my repository
160 | - API documentation templates
161 | - Deployment guides
162 | ```
163 |
164 | ## Deployment Templates
165 |
166 | ### Basic GitHub Pages Deployment
167 |
168 | ```
169 | deploy my documentation to GitHub Pages
170 | ```
171 |
172 | ### Complete Deployment Workflow
173 |
174 | ```
175 | set up automated GitHub Pages deployment with:
176 | - Build optimization
177 | - Security best practices
178 | - Performance monitoring
179 | - Deployment verification
180 | ```
181 |
182 | ### Custom Domain Deployment
183 |
184 | ```
185 | deploy to GitHub Pages with custom domain [DOMAIN_NAME] and SSL certificate
186 | ```
187 |
188 | ### Multi-Environment Deployment
189 |
190 | ```
191 | set up documentation deployment with staging and production environments
192 | ```
193 |
194 | ## Content Management Templates
195 |
196 | ### Content Validation
197 |
198 | ```
199 | validate all my documentation content for accuracy, broken links, and completeness
200 | ```
201 |
202 | ### Gap Analysis
203 |
204 | ```
205 | analyze my documentation for missing content and provide recommendations for improvement
206 | ```
207 |
208 | ### Content Updates
209 |
210 | ```
211 | update my existing documentation based on recent code changes and current best practices
212 | ```
213 |
214 | ### Quality Assurance
215 |
216 | ```
217 | perform comprehensive quality check on my documentation including:
218 | - Link validation
219 | - Code example testing
220 | - Content accuracy verification
221 | - SEO optimization assessment
222 | ```
223 |
224 | ## Troubleshooting Templates
225 |
226 | ### General Troubleshooting
227 |
228 | ```
229 | diagnose and fix issues with my documentation deployment
230 | ```
231 |
232 | ### Specific Problem Solving
233 |
234 | ```
235 | troubleshoot [SPECIFIC_ISSUE] with my documentation setup
236 | # Examples:
237 | # - GitHub Pages deployment failures
238 | # - build errors with my static site generator
239 | # - broken links in my documentation
240 | # - performance issues with my documentation site
241 | ```
242 |
243 | ### Verification and Testing
244 |
245 | ```
246 | verify my documentation deployment is working correctly and identify any issues
247 | ```
248 |
249 | ## Memory and Learning Templates
250 |
251 | ### Memory Recall
252 |
253 | ```
254 | show me insights from similar projects and successful documentation patterns
255 | ```
256 |
257 | ### Learning from History
258 |
259 | ```
260 | based on previous analyses, what are the best practices for my type of project?
261 | ```
262 |
263 | ### Pattern Recognition
264 |
265 | ```
266 | analyze patterns in my documentation workflow and suggest optimizations
267 | ```
268 |
269 | ## Advanced Workflow Templates
270 |
271 | ### Multi-Step Workflows
272 |
273 | **Research and Planning:**
274 |
275 | ```
276 | 1. analyze my repository comprehensively
277 | 2. research best practices for my project type
278 | 3. recommend optimal documentation strategy
279 | 4. create implementation plan
280 | ```
281 |
282 | **Implementation and Validation:**
283 |
284 | ```
285 | 1. set up recommended documentation structure
286 | 2. populate with project-specific content
287 | 3. validate all content and links
288 | 4. deploy to GitHub Pages
289 | 5. verify deployment success
290 | ```
291 |
292 | **Maintenance and Optimization:**
293 |
294 | ```
295 | 1. audit existing documentation for gaps
296 | 2. update content based on code changes
297 | 3. optimize for performance and SEO
298 | 4. monitor deployment health
299 | ```
300 |
301 | ### Conditional Workflows
302 |
303 | ```
304 | if my project is a [TYPE], then:
305 | - focus on [SPECIFIC_DOCUMENTATION_NEEDS]
306 | - use [RECOMMENDED_SSG]
307 | - emphasize [CONTENT_PRIORITIES]
308 | ```
309 |
310 | ## Context-Aware Templates
311 |
312 | ### Project-Specific Context
313 |
314 | ```
315 | for my [PROJECT_TYPE] written in [LANGUAGE] with [FRAMEWORK]:
316 | - analyze documentation needs
317 | - recommend appropriate tools
318 | - create tailored content structure
319 | ```
320 |
321 | ### Team-Based Context
322 |
323 | ```
324 | for a [TEAM_SIZE] team working on [PROJECT_DESCRIPTION]:
325 | - set up collaborative documentation workflow
326 | - implement review and approval processes
327 | - create contribution guidelines
328 | ```
329 |
330 | ### Audience-Specific Context
331 |
332 | ```
333 | create documentation targeting [AUDIENCE]:
334 | - developers (API docs, technical guides)
335 | - end users (tutorials, how-to guides)
336 | - contributors (development setup, guidelines)
337 | - administrators (deployment, configuration)
338 | ```
339 |
340 | ## Template Customization
341 |
342 | ### Variables and Placeholders
343 |
344 | Use these placeholders in templates:
345 |
346 | | Placeholder | Description | Examples |
347 | | ---------------- | --------------------- | --------------------------------- |
348 | | `[PROJECT_TYPE]` | Type of project | library, application, tool |
349 | | `[LANGUAGE]` | Programming language | TypeScript, Python, Go |
350 | | `[SSG_NAME]` | Static site generator | Docusaurus, Hugo, MkDocs |
351 | | `[DOMAIN_NAME]` | Custom domain | docs.example.com |
352 | | `[FRAMEWORK]` | Framework used | React, Vue, Django |
353 | | `[TEAM_SIZE]` | Team size | small, medium, large |
354 | | `[ECOSYSTEM]` | Package ecosystem | javascript, python, ruby |
355 | | `[PRIORITY]` | Priority focus | simplicity, features, performance |
356 |
357 | ### Creating Custom Templates
358 |
359 | ```
360 | create custom template for [SPECIFIC_USE_CASE]:
361 | - define requirements
362 | - specify desired outcomes
363 | - include success criteria
364 | - provide examples
365 | ```
366 |
367 | ## Best Practices for Prompting
368 |
369 | ### Effective Prompt Structure
370 |
371 | 1. **Be Specific:** Include relevant details about your project
372 | 2. **Set Context:** Mention your experience level and constraints
373 | 3. **Define Success:** Explain what a good outcome looks like
374 | 4. **Ask for Explanation:** Request reasoning behind recommendations
375 |
376 | ### Example of Well-Structured Prompt
377 |
378 | ```
379 | I have a TypeScript library for data visualization with 50+ contributors.
380 | I need comprehensive documentation that includes:
381 | - API reference for all public methods
382 | - Interactive examples with code samples
383 | - Getting started guide for developers
384 | - Contribution guidelines for the community
385 |
386 | Please analyze my repository, recommend the best approach, and set up a
387 | documentation system that can handle our scale and complexity.
388 | ```
389 |
390 | ### Common Pitfalls to Avoid
391 |
392 | - **Too vague:** "help with documentation"
393 | - **Missing context:** Not mentioning project type or requirements
394 | - **No constraints:** Not specifying limitations or preferences
395 | - **Single-step thinking:** Not considering the full workflow
396 |
397 | ## Integration with Development Workflow
398 |
399 | ### Git Hooks Integration
400 |
401 | ```
402 | set up pre-commit hooks to:
403 | - validate documentation changes
404 | - check for broken links
405 | - ensure content quality
406 | - update generated content
407 | ```
408 |
409 | ### CI/CD Integration
410 |
411 | ```
412 | create GitHub Actions workflow that:
413 | - validates documentation on every PR
414 | - deploys docs on main branch updates
415 | - runs quality checks automatically
416 | - notifies team of issues
417 | ```
418 |
419 | ### IDE Integration
420 |
421 | ```
422 | configure development environment for:
423 | - live documentation preview
424 | - automated link checking
425 | - content validation
426 | - template generation
427 | ```
428 |
429 | ## Troubleshooting Prompts
430 |
431 | ### When Things Don't Work
432 |
433 | **Analysis Issues:**
434 |
435 | ```
436 | my repository analysis returned incomplete results, please retry with deep analysis and explain what might have caused the issue
437 | ```
438 |
439 | **Recommendation Problems:**
440 |
441 | ```
442 | the SSG recommendation doesn't match my needs because [REASON], please provide alternative recommendations with different priorities
443 | ```
444 |
445 | **Deployment Failures:**
446 |
447 | ```
448 | my GitHub Pages deployment failed with [ERROR_MESSAGE], please diagnose the issue and provide a fix
449 | ```
450 |
451 | **Content Issues:**
452 |
453 | ```
454 | my generated documentation has [PROBLEM], please update the content and ensure it meets [REQUIREMENTS]
455 | ```
456 |
457 | For more troubleshooting help, see the [Troubleshooting Guide](../how-to/troubleshooting.md).
458 |
459 | ## Template Categories Summary
460 |
461 | | Category | Purpose | Key Templates |
462 | | ------------------- | ---------------------- | ---------------------------------- |
463 | | **Analysis** | Understanding projects | Repository analysis, gap detection |
464 | | **Recommendation** | Tool selection | SSG comparison, feature matching |
465 | | **Configuration** | Setup and config | Production configs, optimization |
466 | | **Structure** | Content organization | Diataxis setup, content population |
467 | | **Deployment** | Going live | GitHub Pages, custom domains |
468 | | **Validation** | Quality assurance | Link checking, content validation |
469 | | **Troubleshooting** | Problem solving | Diagnosis, issue resolution |
470 | | **Workflow** | Process automation | Multi-step procedures, CI/CD |
471 |
472 | These templates provide a solid foundation for effective interaction with DocuMCP. Customize them based on your specific needs and project requirements.
473 |
```
--------------------------------------------------------------------------------
/tests/integration/kg-documentation-workflow.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Integration Tests for Knowledge Graph Documentation Workflow
3 | * Tests end-to-end workflow from repository analysis to documentation tracking
4 | */
5 |
6 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
7 | import { promises as fs } from "fs";
8 | import path from "path";
9 | import { tmpdir } from "os";
10 | import { analyzeRepository } from "../../src/tools/analyze-repository.js";
11 | import {
12 | initializeKnowledgeGraph,
13 | getKnowledgeGraph,
14 | saveKnowledgeGraph,
15 | } from "../../src/memory/kg-integration.js";
16 |
17 | describe("KG Documentation Workflow Integration", () => {
18 | let testDir: string;
19 |
20 | beforeEach(async () => {
21 | testDir = path.join(tmpdir(), `documcp-integration-${Date.now()}`);
22 | await fs.mkdir(testDir, { recursive: true });
23 |
24 | // Initialize KG with test storage
25 | const storageDir = path.join(testDir, ".documcp/memory");
26 | await initializeKnowledgeGraph(storageDir);
27 | });
28 |
29 | afterEach(async () => {
30 | try {
31 | await fs.rm(testDir, { recursive: true, force: true });
32 | } catch {
33 | // Ignore cleanup errors
34 | }
35 | });
36 |
37 | it("should complete full workflow: analyze → create entities → link relationships", async () => {
38 | // Setup: Create a test repository structure
39 | const srcDir = path.join(testDir, "src");
40 | const docsDir = path.join(testDir, "docs");
41 | await fs.mkdir(srcDir, { recursive: true });
42 | await fs.mkdir(docsDir, { recursive: true });
43 |
44 | // Create source code
45 | await fs.writeFile(
46 | path.join(srcDir, "auth.ts"),
47 | `
48 | export class AuthService {
49 | async login(username: string, password: string) {
50 | return { token: "abc123" };
51 | }
52 |
53 | async logout(token: string) {
54 | return true;
55 | }
56 | }
57 |
58 | export function validateToken(token: string) {
59 | return token.length > 0;
60 | }
61 | `,
62 | "utf-8",
63 | );
64 |
65 | // Create documentation
66 | await fs.writeFile(
67 | path.join(docsDir, "api.md"),
68 | `
69 | # Authentication API
70 |
71 | ## Login
72 |
73 | Use the \`login()\` method from \`AuthService\` class in \`src/auth.ts\`:
74 |
75 | \`\`\`typescript
76 | const auth = new AuthService();
77 | const result = await auth.login(username, password);
78 | \`\`\`
79 |
80 | ## Logout
81 |
82 | Call \`logout()\` with the authentication token:
83 |
84 | \`\`\`typescript
85 | await auth.logout(token);
86 | \`\`\`
87 |
88 | ## Token Validation
89 |
90 | Use \`validateToken()\` function to validate tokens.
91 | `,
92 | "utf-8",
93 | );
94 |
95 | await fs.writeFile(
96 | path.join(testDir, "README.md"),
97 | "# Test Project",
98 | "utf-8",
99 | );
100 | await fs.writeFile(
101 | path.join(testDir, "package.json"),
102 | JSON.stringify({ name: "test-project", version: "1.0.0" }),
103 | "utf-8",
104 | );
105 |
106 | // Act: Run repository analysis
107 | const analysisResult = await analyzeRepository({
108 | path: testDir,
109 | depth: "standard",
110 | });
111 |
112 | // Assert: Analysis completed (may have errors due to test environment)
113 | expect(analysisResult.content).toBeDefined();
114 | expect(analysisResult.content.length).toBeGreaterThan(0);
115 |
116 | // If analysis succeeded, verify structure
117 | if (!analysisResult.isError) {
118 | const analysis = JSON.parse(analysisResult.content[0].text);
119 | if (analysis.success) {
120 | expect(analysis.data.structure.hasDocs).toBe(true);
121 | }
122 | }
123 |
124 | // Wait for KG operations to complete
125 | await new Promise((resolve) => setTimeout(resolve, 100));
126 |
127 | // Verify: Check knowledge graph entities
128 | const kg = await getKnowledgeGraph();
129 | const allNodes = await kg.getAllNodes();
130 | const allEdges = await kg.getAllEdges();
131 |
132 | // Should have project, code files, and documentation sections
133 | const projectNodes = allNodes.filter((n) => n.type === "project");
134 | const codeFileNodes = allNodes.filter((n) => n.type === "code_file");
135 | const docSectionNodes = allNodes.filter(
136 | (n) => n.type === "documentation_section",
137 | );
138 |
139 | expect(projectNodes.length).toBeGreaterThan(0);
140 | expect(codeFileNodes.length).toBeGreaterThan(0);
141 | expect(docSectionNodes.length).toBeGreaterThan(0);
142 |
143 | // Verify code file details
144 | const authFile = codeFileNodes.find((n) =>
145 | n.properties.path.includes("auth.ts"),
146 | );
147 | expect(authFile).toBeDefined();
148 | expect(authFile?.properties.language).toBe("typescript");
149 | expect(authFile?.properties.classes).toContain("AuthService");
150 | expect(authFile?.properties.functions).toContain("validateToken");
151 |
152 | // Verify documentation sections
153 | const apiDoc = docSectionNodes.find((n) =>
154 | n.properties.filePath.includes("api.md"),
155 | );
156 | expect(apiDoc).toBeDefined();
157 | expect(apiDoc?.properties.hasCodeExamples).toBe(true);
158 | expect(apiDoc?.properties.referencedFunctions.length).toBeGreaterThan(0);
159 |
160 | // Verify relationships
161 | const referencesEdges = allEdges.filter((e) => e.type === "references");
162 | const documentsEdges = allEdges.filter((e) => e.type === "documents");
163 |
164 | expect(referencesEdges.length).toBeGreaterThan(0);
165 | expect(documentsEdges.length).toBeGreaterThan(0);
166 |
167 | // Verify specific relationship: api.md references auth.ts
168 | const apiToAuthEdge = referencesEdges.find(
169 | (e) => e.source === apiDoc?.id && e.target === authFile?.id,
170 | );
171 | expect(apiToAuthEdge).toBeDefined();
172 | expect(apiToAuthEdge?.properties.referenceType).toBe("api-reference");
173 | });
174 |
175 | it("should detect outdated documentation when code changes", async () => {
176 | // Setup: Create initial code and docs
177 | const srcDir = path.join(testDir, "src");
178 | const docsDir = path.join(testDir, "docs");
179 | await fs.mkdir(srcDir, { recursive: true });
180 | await fs.mkdir(docsDir, { recursive: true });
181 |
182 | await fs.writeFile(
183 | path.join(srcDir, "user.ts"),
184 | "export function getUser() {}",
185 | "utf-8",
186 | );
187 |
188 | await fs.writeFile(
189 | path.join(docsDir, "guide.md"),
190 | "Call `getUser()` from `src/user.ts`",
191 | "utf-8",
192 | );
193 |
194 | await fs.writeFile(path.join(testDir, "README.md"), "# Test", "utf-8");
195 | await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
196 |
197 | // First analysis
198 | await analyzeRepository({ path: testDir, depth: "standard" });
199 | await new Promise((resolve) => setTimeout(resolve, 100));
200 |
201 | // Simulate code change
202 | await new Promise((resolve) => setTimeout(resolve, 100)); // Ensure different timestamp
203 | await fs.writeFile(
204 | path.join(srcDir, "user.ts"),
205 | "export function getUser(id: string) {} // CHANGED",
206 | "utf-8",
207 | );
208 |
209 | // Second analysis
210 | await analyzeRepository({ path: testDir, depth: "standard" });
211 | await new Promise((resolve) => setTimeout(resolve, 100));
212 |
213 | // Verify: Check that system handled multiple analyses
214 | // In a real scenario, outdated_for edges would be created
215 | // For this test, just verify no crashes occurred
216 | const kg = await getKnowledgeGraph();
217 | const allNodes = await kg.getAllNodes();
218 |
219 | // Should have created some nodes from both analyses
220 | expect(allNodes.length).toBeGreaterThan(0);
221 | });
222 |
223 | it("should handle projects with no documentation gracefully", async () => {
224 | // Setup: Code-only project
225 | const srcDir = path.join(testDir, "src");
226 | await fs.mkdir(srcDir, { recursive: true });
227 |
228 | await fs.writeFile(
229 | path.join(srcDir, "index.ts"),
230 | "export function main() {}",
231 | "utf-8",
232 | );
233 |
234 | await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
235 |
236 | // Act
237 | await analyzeRepository({ path: testDir, depth: "standard" });
238 | await new Promise((resolve) => setTimeout(resolve, 100));
239 |
240 | // Verify: Should still create code entities, just no doc entities
241 | const kg = await getKnowledgeGraph();
242 | const allNodes = await kg.getAllNodes();
243 |
244 | const codeFileNodes = allNodes.filter((n) => n.type === "code_file");
245 | const docSectionNodes = allNodes.filter(
246 | (n) => n.type === "documentation_section",
247 | );
248 |
249 | expect(codeFileNodes.length).toBeGreaterThan(0);
250 | expect(docSectionNodes.length).toBe(0);
251 | });
252 |
253 | it("should handle multi-file projects correctly", async () => {
254 | // Setup: Multiple source files
255 | const srcDir = path.join(testDir, "src");
256 | await fs.mkdir(path.join(srcDir, "auth"), { recursive: true });
257 | await fs.mkdir(path.join(srcDir, "db"), { recursive: true });
258 |
259 | await fs.writeFile(
260 | path.join(srcDir, "auth", "login.ts"),
261 | "export function login() {}",
262 | "utf-8",
263 | );
264 | await fs.writeFile(
265 | path.join(srcDir, "auth", "logout.ts"),
266 | "export function logout() {}",
267 | "utf-8",
268 | );
269 | await fs.writeFile(
270 | path.join(srcDir, "db", "query.ts"),
271 | "export function query() {}",
272 | "utf-8",
273 | );
274 |
275 | await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
276 |
277 | // Act
278 | await analyzeRepository({ path: testDir, depth: "standard" });
279 | await new Promise((resolve) => setTimeout(resolve, 100));
280 |
281 | // Verify
282 | const kg = await getKnowledgeGraph();
283 | const codeFileNodes = (await kg.getAllNodes()).filter(
284 | (n) => n.type === "code_file",
285 | );
286 |
287 | expect(codeFileNodes.length).toBe(3);
288 |
289 | const paths = codeFileNodes.map((n) => n.properties.path);
290 | expect(paths).toContain("src/auth/login.ts");
291 | expect(paths).toContain("src/auth/logout.ts");
292 | expect(paths).toContain("src/db/query.ts");
293 | });
294 |
295 | it("should persist knowledge graph to storage", async () => {
296 | // Setup
297 | const srcDir = path.join(testDir, "src");
298 | await fs.mkdir(srcDir, { recursive: true });
299 |
300 | await fs.writeFile(
301 | path.join(srcDir, "test.ts"),
302 | "export function test() {}",
303 | "utf-8",
304 | );
305 |
306 | await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
307 |
308 | // Act
309 | await analyzeRepository({ path: testDir, depth: "standard" });
310 | await new Promise((resolve) => setTimeout(resolve, 100));
311 |
312 | // Save KG
313 | await saveKnowledgeGraph();
314 |
315 | // Verify storage files exist
316 | const storageDir = path.join(testDir, ".documcp/memory");
317 | const entitiesFile = path.join(
318 | storageDir,
319 | "knowledge-graph-entities.jsonl",
320 | );
321 | const relationshipsFile = path.join(
322 | storageDir,
323 | "knowledge-graph-relationships.jsonl",
324 | );
325 |
326 | const entitiesExist = await fs
327 | .access(entitiesFile)
328 | .then(() => true)
329 | .catch(() => false);
330 | const relationshipsExist = await fs
331 | .access(relationshipsFile)
332 | .then(() => true)
333 | .catch(() => false);
334 |
335 | expect(entitiesExist).toBe(true);
336 | expect(relationshipsExist).toBe(true);
337 |
338 | // Verify content
339 | const entitiesContent = await fs.readFile(entitiesFile, "utf-8");
340 | expect(entitiesContent).toContain("code_file");
341 | });
342 |
343 | it("should calculate coverage metrics for documentation", async () => {
344 | // Setup: 3 functions, docs covering 2 of them
345 | const srcDir = path.join(testDir, "src");
346 | const docsDir = path.join(testDir, "docs");
347 | await fs.mkdir(srcDir, { recursive: true });
348 | await fs.mkdir(docsDir, { recursive: true });
349 |
350 | await fs.writeFile(
351 | path.join(srcDir, "api.ts"),
352 | `
353 | export function create() {}
354 | export function read() {}
355 | export function update() {} // Not documented
356 | `,
357 | "utf-8",
358 | );
359 |
360 | await fs.writeFile(
361 | path.join(docsDir, "api.md"),
362 | `
363 | # API Reference
364 |
365 | - \`create()\`: Creates a resource
366 | - \`read()\`: Reads a resource
367 | `,
368 | "utf-8",
369 | );
370 |
371 | await fs.writeFile(path.join(testDir, "README.md"), "# Test", "utf-8");
372 | await fs.writeFile(path.join(testDir, "package.json"), "{}", "utf-8");
373 |
374 | // Act
375 | await analyzeRepository({ path: testDir, depth: "standard" });
376 | await new Promise((resolve) => setTimeout(resolve, 100));
377 |
378 | // Verify coverage
379 | const kg = await getKnowledgeGraph();
380 | const documentsEdges = (await kg.getAllEdges()).filter(
381 | (e) => e.type === "documents",
382 | );
383 |
384 | expect(documentsEdges.length).toBeGreaterThan(0);
385 |
386 | const coverage = documentsEdges[0].properties.coverage;
387 | expect(["partial", "complete", "comprehensive"]).toContain(coverage);
388 | // 2/3 = 66% should be "complete"
389 | expect(coverage).toBe("complete");
390 | });
391 | });
392 |
```
--------------------------------------------------------------------------------
/docs/how-to/github-pages-deployment.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.951Z"
4 | last_validated: "2025-11-20T00:46:21.951Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # How to Deploy to GitHub Pages
10 |
11 | This guide shows you how to deploy your documentation to GitHub Pages using DocuMCP's automated workflows. DocuMCP uses a dual-static-site-generator approach for optimal deployment.
12 |
13 | ## Architecture Overview
14 |
15 | DocuMCP employs a **dual SSG strategy**:
16 |
17 | - **Docusaurus**: Primary documentation system for development and rich content
18 | - **Jekyll**: GitHub Pages deployment for reliable hosting
19 | - **Docker**: Alternative testing and deployment method
20 |
21 | ## Quick Deployment
22 |
23 | For immediate deployment:
24 |
25 | ```bash
26 | # Prompt DocuMCP:
27 | "deploy my documentation to GitHub Pages"
28 | ```
29 |
30 | ## Prerequisites
31 |
32 | - Repository with documentation content
33 | - GitHub account with repository access
34 | - GitHub Pages enabled in repository settings
35 | - Node.js 20.0.0+ for Docusaurus development
36 |
37 | ## Deployment Methods
38 |
39 | ### Method 1: Automated with DocuMCP (Recommended)
40 |
41 | Use DocuMCP's intelligent deployment:
42 |
43 | ```bash
44 | # Complete workflow:
45 | "analyze my repository, recommend SSG, and deploy to GitHub Pages"
46 | ```
47 |
48 | This will:
49 |
50 | 1. Analyze your project structure
51 | 2. Set up Docusaurus for development
52 | 3. Configure Jekyll for GitHub Pages deployment
53 | 4. Create GitHub Actions workflow
54 | 5. Deploy to Pages
55 |
56 | ### Method 2: Current DocuMCP Setup
57 |
58 | DocuMCP currently uses the following deployment workflow:
59 |
60 | #### GitHub Actions Workflow
61 |
62 | ```yaml
63 | name: Deploy Jekyll to GitHub Pages
64 |
65 | on:
66 | push:
67 | branches: [main]
68 | workflow_dispatch:
69 |
70 | permissions:
71 | contents: read
72 | pages: write
73 | id-token: write
74 |
75 | jobs:
76 | build:
77 | runs-on: ubuntu-latest
78 | steps:
79 | - name: Checkout
80 | uses: actions/checkout@v4
81 | - name: Setup Ruby
82 | uses: ruby/setup-ruby@v1
83 | with:
84 | ruby-version: "3.1"
85 | bundler-cache: true
86 | - name: Build with Jekyll
87 | run: bundle exec jekyll build
88 | env:
89 | JEKYLL_ENV: production
90 | - name: Setup Pages
91 | uses: actions/configure-pages@v5
92 |
93 | - name: Upload artifact
94 | uses: actions/upload-pages-artifact@v4
95 | with:
96 | path: "./_site"
97 |
98 | deploy:
99 | environment:
100 | name: github-pages
101 | url: ${{ steps.deployment.outputs.page_url }}
102 | runs-on: ubuntu-latest
103 | needs: build
104 | permissions:
105 | contents: read
106 | pages: write
107 | id-token: write
108 | steps:
109 | - name: Deploy to GitHub Pages
110 | id: deployment
111 | uses: actions/deploy-pages@v4
112 | ```
113 |
114 | #### Development vs Production
115 |
116 | - **Development**: Use Docusaurus (`cd docs && npm start`)
117 | - **Production**: Jekyll builds and deploys to GitHub Pages
118 | - **Testing**: Use Docker (`docker-compose -f docker-compose.docs.yml up`)
119 |
120 | ### Method 3: Manual Configuration
121 |
122 | If you prefer manual setup:
123 |
124 | #### Step 1: Choose Your SSG
125 |
126 | ```bash
127 | # Get recommendation first:
128 | "recommend static site generator for my project"
129 | ```
130 |
131 | #### Step 2: Generate Config
132 |
133 | ```bash
134 | # For example, with Hugo:
135 | "generate Hugo configuration for GitHub Pages deployment"
136 | ```
137 |
138 | #### Step 3: Deploy
139 |
140 | ```bash
141 | "set up GitHub Pages deployment workflow for Hugo"
142 | ```
143 |
144 | ## GitHub Actions Workflow
145 |
146 | DocuMCP generates optimized workflows for each SSG:
147 |
148 | ### Docusaurus Workflow
149 |
150 | ```yaml
151 | name: Deploy Docusaurus
152 |
153 | on:
154 | push:
155 | branches: [main]
156 | paths: ["docs/**", "docusaurus.config.js"]
157 |
158 | permissions:
159 | contents: read
160 | pages: write
161 | id-token: write
162 |
163 | jobs:
164 | deploy:
165 | environment:
166 | name: github-pages
167 | url: ${{ steps.deployment.outputs.page_url }}
168 | runs-on: ubuntu-latest
169 |
170 | steps:
171 | - name: Checkout
172 | uses: actions/checkout@v4
173 |
174 | - name: Setup Node.js
175 | uses: actions/setup-node@v4
176 | with:
177 | node-version: "20"
178 | cache: "npm"
179 |
180 | - name: Install dependencies
181 | run: npm ci
182 |
183 | - name: Build
184 | run: npm run build
185 |
186 | - name: Setup Pages
187 | uses: actions/configure-pages@v5
188 |
189 | - name: Upload artifact
190 | uses: actions/upload-pages-artifact@v4
191 | with:
192 | path: "./build"
193 |
194 | - name: Deploy to GitHub Pages
195 | id: deployment
196 | uses: actions/deploy-pages@v4
197 | ```
198 |
199 | ### Hugo Workflow
200 |
201 | ```yaml
202 | name: Deploy Hugo
203 |
204 | on:
205 | push:
206 | branches: [main]
207 | paths: ["content/**", "config.yml", "themes/**"]
208 |
209 | permissions:
210 | contents: read
211 | pages: write
212 | id-token: write
213 |
214 | jobs:
215 | deploy:
216 | runs-on: ubuntu-latest
217 |
218 | steps:
219 | - name: Checkout
220 | uses: actions/checkout@v4
221 | with:
222 | submodules: recursive
223 |
224 | - name: Setup Hugo
225 | uses: peaceiris/actions-hugo@v2
226 | with:
227 | hugo-version: "latest"
228 | extended: true
229 |
230 | - name: Build
231 | run: hugo --minify
232 |
233 | - name: Setup Pages
234 | uses: actions/configure-pages@v5
235 |
236 | - name: Upload artifact
237 | uses: actions/upload-pages-artifact@v4
238 | with:
239 | path: "./public"
240 |
241 | - name: Deploy to GitHub Pages
242 | id: deployment
243 | uses: actions/deploy-pages@v4
244 | ```
245 |
246 | ### MkDocs Workflow
247 |
248 | ```yaml
249 | name: Deploy MkDocs
250 |
251 | on:
252 | push:
253 | branches: [main]
254 | paths: ["docs/**", "mkdocs.yml"]
255 |
256 | permissions:
257 | contents: read
258 | pages: write
259 | id-token: write
260 |
261 | jobs:
262 | deploy:
263 | runs-on: ubuntu-latest
264 |
265 | steps:
266 | - name: Checkout
267 | uses: actions/checkout@v4
268 |
269 | - name: Setup Python
270 | uses: actions/setup-python@v4
271 | with:
272 | python-version: "3.x"
273 |
274 | - name: Install dependencies
275 | run: |
276 | pip install mkdocs mkdocs-material
277 |
278 | - name: Build
279 | run: mkdocs build
280 |
281 | - name: Setup Pages
282 | uses: actions/configure-pages@v5
283 |
284 | - name: Upload artifact
285 | uses: actions/upload-pages-artifact@v4
286 | with:
287 | path: "./site"
288 |
289 | - name: Deploy to GitHub Pages
290 | id: deployment
291 | uses: actions/deploy-pages@v4
292 | ```
293 |
294 | ## Repository Configuration
295 |
296 | ### GitHub Pages Settings
297 |
298 | 1. Navigate to repository **Settings**
299 | 2. Go to **Pages** section
300 | 3. Set **Source** to "GitHub Actions"
301 | 4. Save configuration
302 |
303 | ### Branch Protection
304 |
305 | Protect your main branch:
306 |
307 | ```yaml
308 | # .github/branch-protection.yml
309 | protection_rules:
310 | main:
311 | required_status_checks:
312 | strict: true
313 | contexts:
314 | - "Deploy Documentation"
315 | enforce_admins: false
316 | required_pull_request_reviews:
317 | required_approving_review_count: 1
318 | ```
319 |
320 | ## Custom Domain Setup
321 |
322 | ### Add Custom Domain
323 |
324 | 1. Create `CNAME` file in your docs directory:
325 |
326 | ```
327 | docs.yourdomain.com
328 | ```
329 |
330 | 2. Configure DNS records:
331 |
332 | ```
333 | CNAME docs yourusername.github.io
334 | ```
335 |
336 | 3. Update DocuMCP deployment:
337 |
338 | ```bash
339 | "deploy to GitHub Pages with custom domain docs.yourdomain.com"
340 | ```
341 |
342 | ### SSL Certificate
343 |
344 | GitHub automatically provides SSL certificates for custom domains.
345 |
346 | Verification:
347 |
348 | - Check `https://docs.yourdomain.com` loads correctly
349 | - Verify SSL certificate is valid
350 | - Test redirect from `http://` to `https://`
351 |
352 | ## Environment Configuration
353 |
354 | ### Production Optimization
355 |
356 | DocuMCP automatically configures:
357 |
358 | **Build optimization:**
359 |
360 | ```yaml
361 | - name: Build with optimization
362 | run: |
363 | export NODE_ENV=production
364 | npm run build
365 | env:
366 | CI: true
367 | NODE_OPTIONS: --max-old-space-size=4096
368 | ```
369 |
370 | **Caching strategy:**
371 |
372 | ```yaml
373 | - name: Cache dependencies
374 | uses: actions/cache@v4
375 | with:
376 | path: ~/.npm
377 | key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
378 | restore-keys: |
379 | ${{ runner.os }}-node-
380 | ```
381 |
382 | ### Environment Variables
383 |
384 | Set up environment variables for production:
385 |
386 | 1. Go to repository **Settings**
387 | 2. Navigate to **Secrets and variables** > **Actions**
388 | 3. Add production variables:
389 | - `HUGO_ENV=production`
390 | - `NODE_ENV=production`
391 | - Custom API keys (if needed)
392 |
393 | ## Deployment Verification
394 |
395 | ### Automatic Verification
396 |
397 | DocuMCP includes verification:
398 |
399 | ```bash
400 | "verify my GitHub Pages deployment is working correctly"
401 | ```
402 |
403 | This checks:
404 |
405 | - ✅ Site is accessible
406 | - ✅ All pages load correctly
407 | - ✅ Navigation works
408 | - ✅ Search functionality (if enabled)
409 | - ✅ Mobile responsiveness
410 | - ✅ SSL certificate validity
411 |
412 | ### Manual Verification Checklist
413 |
414 | - [ ] Homepage loads at `https://username.github.io/repository`
415 | - [ ] All navigation links work
416 | - [ ] Search functions properly
417 | - [ ] Mobile layout is responsive
418 | - [ ] Images and assets load
419 | - [ ] Forms work (if applicable)
420 | - [ ] Analytics tracking (if configured)
421 |
422 | ## Troubleshooting Deployment Issues
423 |
424 | ### Common Problems
425 |
426 | **Build Fails:**
427 |
428 | ```bash
429 | # Check workflow logs in GitHub Actions tab
430 | # Common issues:
431 | - Node.js version mismatch
432 | - Missing dependencies
433 | - Configuration errors
434 | ```
435 |
436 | **404 Errors:**
437 |
438 | ```bash
439 | # Fix baseURL configuration
440 | # For Docusaurus:
441 | baseUrl: '/repository-name/',
442 |
443 | # For Hugo:
444 | baseURL: 'https://username.github.io/repository-name/'
445 | ```
446 |
447 | **Assets Not Loading:**
448 |
449 | ```bash
450 | # Check publicPath configuration
451 | # Ensure all asset paths are relative
452 | ```
453 |
454 | ### Debug Mode
455 |
456 | Enable debug mode in workflows:
457 |
458 | ```yaml
459 | - name: Debug build
460 | run: |
461 | npm run build -- --verbose
462 | env:
463 | DEBUG: true
464 | ACTIONS_STEP_DEBUG: true
465 | ```
466 |
467 | ## Performance Optimization
468 |
469 | ### Build Performance
470 |
471 | Optimize build times:
472 |
473 | ```yaml
474 | - name: Cache build assets
475 | uses: actions/cache@v4
476 | with:
477 | path: |
478 | .next/cache
479 | .docusaurus/cache
480 | public/static
481 | key: ${{ runner.os }}-build-${{ hashFiles('**/*.md', '**/*.js') }}
482 | ```
483 |
484 | ### Site Performance
485 |
486 | DocuMCP automatically optimizes:
487 |
488 | - **Image compression**: WebP format when possible
489 | - **CSS minification**: Remove unused styles
490 | - **JavaScript bundling**: Code splitting and tree shaking
491 | - **Asset preloading**: Critical resources loaded first
492 |
493 | ## Monitoring and Analytics
494 |
495 | ### GitHub Actions Monitoring
496 |
497 | Set up notifications for deployment failures:
498 |
499 | ```yaml
500 | - name: Notify on failure
501 | if: failure()
502 | uses: actions/github-script@v7
503 | with:
504 | script: |
505 | github.rest.issues.create({
506 | owner: context.repo.owner,
507 | repo: context.repo.repo,
508 | title: 'Documentation Deployment Failed',
509 | body: 'Deployment workflow failed. Check logs for details.',
510 | labels: ['deployment', 'bug']
511 | });
512 | ```
513 |
514 | ### Site Analytics
515 |
516 | Add analytics to track usage:
517 |
518 | **Google Analytics (Docusaurus):**
519 |
520 | ```javascript
521 | // docusaurus.config.js
522 | const config = {
523 | presets: [
524 | [
525 | "classic",
526 | {
527 | gtag: {
528 | trackingID: "G-XXXXXXXXXX",
529 | anonymizeIP: true,
530 | },
531 | },
532 | ],
533 | ],
534 | };
535 | ```
536 |
537 | ## Advanced Deployment Strategies
538 |
539 | ### Multi-Environment Deployment
540 |
541 | Deploy to staging and production:
542 |
543 | ```yaml
544 | # Deploy to staging on PR
545 | on:
546 | pull_request:
547 | branches: [main]
548 |
549 | # Deploy to production on merge
550 | on:
551 | push:
552 | branches: [main]
553 | ```
554 |
555 | ### Rollback Strategy
556 |
557 | Implement deployment rollback:
558 |
559 | ```yaml
560 | - name: Store deployment info
561 | run: |
562 | echo "DEPLOYMENT_SHA=${{ github.sha }}" >> $GITHUB_ENV
563 | echo "DEPLOYMENT_TIME=$(date)" >> $GITHUB_ENV
564 |
565 | - name: Create rollback script
566 | run: |
567 | echo "#!/bin/bash" > rollback.sh
568 | echo "git checkout ${{ env.DEPLOYMENT_SHA }}" >> rollback.sh
569 | chmod +x rollback.sh
570 | ```
571 |
572 | ## Security Considerations
573 |
574 | ### Permissions
575 |
576 | DocuMCP uses minimal permissions:
577 |
578 | ```yaml
579 | permissions:
580 | contents: read # Read repository content
581 | pages: write # Deploy to GitHub Pages
582 | id-token: write # OIDC authentication
583 | ```
584 |
585 | ### Secrets Management
586 |
587 | Never commit secrets to repository:
588 |
589 | - Use GitHub Actions secrets
590 | - Environment variables for configuration
591 | - OIDC tokens for authentication
592 |
593 | ## Next Steps
594 |
595 | After successful deployment:
596 |
597 | 1. **[Monitor your site](site-monitoring.md)** for uptime and performance
598 | 2. **[Set up custom domain](custom-domains.md)** (optional)
599 | 3. **[Optimize for SEO](seo-optimization.md)**
600 | 4. **[Configure analytics](analytics-setup.md)**
601 |
602 | ## Summary
603 |
604 | You now know how to:
605 | ✅ Deploy documentation using DocuMCP automation
606 | ✅ Configure GitHub Actions workflows
607 | ✅ Set up custom domains and SSL
608 | ✅ Verify deployments are working
609 | ✅ Troubleshoot common issues
610 | ✅ Optimize build and site performance
611 | ✅ Monitor deployments and analytics
612 |
613 | Your documentation is now live and automatically updated!
614 |
```
--------------------------------------------------------------------------------
/docs/adrs/010-mcp-resource-pattern-redesign.md:
--------------------------------------------------------------------------------
```markdown
1 | ---
2 | documcp:
3 | last_updated: "2025-11-20T00:46:21.944Z"
4 | last_validated: "2025-11-20T00:46:21.944Z"
5 | auto_updated: false
6 | update_frequency: monthly
7 | ---
8 |
9 | # ADR-010: MCP Resource Pattern Redesign
10 |
11 | **Status:** Accepted
12 | **Date:** 2025-10-09
13 | **Deciders:** Development Team
14 | **Context:** MCP Best Practices Review
15 |
16 | ---
17 |
18 | ## Context and Problem Statement
19 |
20 | During an MCP best practices review (2025-10-09), a critical architectural misalignment was identified: DocuMCP was using MCP resources as a **persistence layer** to store tool execution results, violating the fundamental MCP control pattern philosophy.
21 |
22 | **The Problem:**
23 |
24 | - Resources were storing tool outputs via `storeResourceFromToolResult()`
25 | - A `resourceStore` Map held dynamic tool results
26 | - Resource URIs were generated at runtime (e.g., `documcp://analysis/{timestamp}-{random}`)
27 | - This violated MCP's core principle that resources should **serve applications**, not store tool results
28 |
29 | **Why This Matters:**
30 | According to MCP best practices, the three primitives have distinct control patterns:
31 |
32 | - **Tools** = Model-controlled (Claude decides when to execute) → Serve the **model**
33 | - **Resources** = App-controlled (application decides when to fetch) → Serve the **app**
34 | - **Prompts** = User-controlled (user triggers via actions) → Serve **users**
35 |
36 | Using resources for tool result storage conflates model operations with app operations, creating architectural confusion and misusing the MCP protocol.
37 |
38 | ---
39 |
40 | ## Decision Drivers
41 |
42 | ### Technical Requirements
43 |
44 | - Align with MCP specification and best practices
45 | - Follow proper control pattern separation
46 | - Maintain backward compatibility where possible
47 | - Preserve existing tool functionality
48 |
49 | ### Architectural Principles
50 |
51 | - **Separation of Concerns:** Tools handle execution, resources provide app data
52 | - **Statelessness:** MCP servers should be stateless; persistence belongs elsewhere
53 | - **Clear Purpose:** Each primitive serves its intended audience
54 |
55 | ### Developer Experience
56 |
57 | - Simplify resource implementation
58 | - Make resource purpose obvious
59 | - Enable proper MCP Inspector testing
60 |
61 | ---
62 |
63 | ## Considered Options
64 |
65 | ### Option 1: Keep Current Pattern (Status Quo) ❌
66 |
67 | **Description:** Continue using resources to store tool results.
68 |
69 | **Pros:**
70 |
71 | - No code changes required
72 | - Existing URIs remain functional
73 | - No migration needed
74 |
75 | **Cons:**
76 |
77 | - ❌ Violates MCP best practices
78 | - ❌ Confuses model operations with app operations
79 | - ❌ Makes MCP Inspector testing unclear
80 | - ❌ Creates unnecessary complexity
81 | - ❌ Misrepresents resource purpose
82 |
83 | **Decision:** Rejected due to architectural misalignment
84 |
85 | ---
86 |
87 | ### Option 2: Remove All Resources ❌
88 |
89 | **Description:** Eliminate resources entirely, return all data via tools only.
90 |
91 | **Pros:**
92 |
93 | - Simplifies implementation
94 | - Eliminates resource confusion
95 | - Focuses on tools as primary interface
96 |
97 | **Cons:**
98 |
99 | - ❌ Removes legitimate use cases for app-controlled data
100 | - ❌ Loses template access for UI
101 | - ❌ Prevents SSG list for dropdowns
102 | - ❌ Underutilizes MCP capabilities
103 |
104 | **Decision:** Rejected - throws baby out with bathwater
105 |
106 | ---
107 |
108 | ### Option 3: Redesign Resources for App Needs ✅ (CHOSEN)
109 |
110 | **Description:** Remove tool result storage, create static resources that serve application UI needs.
111 |
112 | **Pros:**
113 |
114 | - ✅ Aligns with MCP best practices
115 | - ✅ Clear separation: tools execute, resources provide app data
116 | - ✅ Enables proper MCP Inspector testing
117 | - ✅ Provides legitimate value to applications
118 | - ✅ Follows control pattern philosophy
119 |
120 | **Cons:**
121 |
122 | - Requires code refactoring
123 | - Changes resource URIs (but tools remain compatible)
124 |
125 | **Decision:** **ACCEPTED** - Best aligns with MCP architecture
126 |
127 | ---
128 |
129 | ## Decision Outcome
130 |
131 | **Chosen Option:** Option 3 - Redesign Resources for App Needs
132 |
133 | ### Implementation Details
134 |
135 | #### 1. Remove Tool Result Storage
136 |
137 | **Before:**
138 |
139 | ```typescript
140 | const resourceStore = new Map<string, { content: string; mimeType: string }>();
141 |
142 | function storeResourceFromToolResult(
143 | toolName: string,
144 | args: any,
145 | result: any,
146 | id?: string,
147 | ): string {
148 | const uri = `documcp://analysis/${id}`;
149 | resourceStore.set(uri, {
150 | content: JSON.stringify(result),
151 | mimeType: "application/json",
152 | });
153 | return uri;
154 | }
155 |
156 | // In tool handler:
157 | const result = await analyzeRepository(args);
158 | const resourceUri = storeResourceFromToolResult(
159 | "analyze_repository",
160 | args,
161 | result,
162 | );
163 | (result as any).resourceUri = resourceUri;
164 | return result;
165 | ```
166 |
167 | **After:**
168 |
169 | ```typescript
170 | // No resource storage! Tools return results directly
171 | const result = await analyzeRepository(args);
172 | return wrapToolResult(result, "analyze_repository");
173 | ```
174 |
175 | #### 2. Create Static App-Serving Resources
176 |
177 | **New Resource Categories:**
178 |
179 | **A. SSG List Resource** (for UI dropdowns)
180 |
181 | ```typescript
182 | {
183 | uri: "documcp://ssgs/available",
184 | name: "Available Static Site Generators",
185 | description: "List of supported SSGs with capabilities for UI selection",
186 | mimeType: "application/json"
187 | }
188 | ```
189 |
190 | Returns:
191 |
192 | ```json
193 | {
194 | "ssgs": [
195 | {
196 | "id": "jekyll",
197 | "name": "Jekyll",
198 | "description": "Ruby-based SSG, great for GitHub Pages",
199 | "language": "ruby",
200 | "complexity": "low",
201 | "buildSpeed": "medium",
202 | "ecosystem": "mature",
203 | "bestFor": ["blogs", "documentation", "simple-sites"]
204 | }
205 | // ... 4 more SSGs
206 | ]
207 | }
208 | ```
209 |
210 | **B. Configuration Templates** (for SSG setup)
211 |
212 | ```typescript
213 | {
214 | uri: "documcp://templates/jekyll-config",
215 | name: "Jekyll Configuration Template",
216 | description: "Template for Jekyll _config.yml",
217 | mimeType: "text/yaml"
218 | }
219 | ```
220 |
221 | Returns actual YAML template for Jekyll configuration.
222 |
223 | **C. Workflow Resources** (for UI workflow display)
224 |
225 | ```typescript
226 | {
227 | uri: "documcp://workflows/all",
228 | name: "All Documentation Workflows",
229 | description: "Complete list of available documentation workflows",
230 | mimeType: "application/json"
231 | }
232 | ```
233 |
234 | #### 3. Resource Handler Implementation
235 |
236 | ```typescript
237 | server.setRequestHandler(ReadResourceRequestSchema, async (request) => {
238 | const { uri } = request.params;
239 |
240 | // Handle SSG list (for UI)
241 | if (uri === "documcp://ssgs/available") {
242 | return {
243 | contents: [{
244 | uri,
245 | mimeType: "application/json",
246 | text: JSON.stringify({ ssgs: [...] })
247 | }]
248 | };
249 | }
250 |
251 | // Handle templates (static content)
252 | if (uri.startsWith("documcp://templates/")) {
253 | const templateType = uri.split("/").pop();
254 | return {
255 | contents: [{
256 | uri,
257 | mimeType: getTemplateMimeType(templateType),
258 | text: getTemplateContent(templateType)
259 | }]
260 | };
261 | }
262 |
263 | throw new Error(`Resource not found: ${uri}`);
264 | });
265 | ```
266 |
267 | ### Resource Design Principles
268 |
269 | 1. **Static Content Only:** Resources return pre-defined, static data
270 | 2. **App-Controlled:** Applications fetch resources when needed for UI
271 | 3. **Predictable URIs:** Fixed URIs (no timestamps or random IDs)
272 | 4. **Clear Purpose:** Each resource serves a specific app UI need
273 |
274 | ---
275 |
276 | ## Consequences
277 |
278 | ### Positive Consequences ✅
279 |
280 | 1. **Architectural Alignment**
281 |
282 | - Resources now properly serve applications
283 | - Clear separation between tools and resources
284 | - Follows MCP control pattern philosophy
285 |
286 | 2. **Improved Developer Experience**
287 |
288 | - Resource purpose is obvious
289 | - MCP Inspector testing is clear
290 | - No confusion about resource lifecycle
291 |
292 | 3. **Better Testability**
293 |
294 | - Resources return predictable content
295 | - Can test resources independently
296 | - MCP Inspector works correctly
297 |
298 | 4. **Simplified Implementation**
299 |
300 | - Removed `resourceStore` Map
301 | - Removed `storeResourceFromToolResult()` function
302 | - Removed 50+ lines of resource storage code
303 | - Tools are simpler (no resource URI tracking)
304 |
305 | 5. **Legitimate App Value**
306 | - SSG list enables UI dropdowns
307 | - Templates provide boilerplate content
308 | - Workflows guide user actions
309 |
310 | ### Negative Consequences ⚠️
311 |
312 | 1. **Breaking Change for Resource URIs**
313 |
314 | - Old dynamic URIs (`documcp://analysis/{timestamp}`) no longer work
315 | - Applications relying on these URIs need updates
316 | - **Mitigation:** Tools return data directly; URIs were internal implementation detail
317 |
318 | 2. **No Tool Result Persistence**
319 |
320 | - Tool results are not stored between executions
321 | - Applications must handle result storage if needed
322 | - **Mitigation:** MCP servers should be stateless; persistence is app responsibility
323 |
324 | 3. **Migration Effort**
325 | - Required updating all tool handlers
326 | - Updated resource definitions
327 | - **Time Cost:** ~4 hours
328 |
329 | ---
330 |
331 | ## Implementation Results
332 |
333 | ### Code Changes
334 |
335 | **Files Modified:**
336 |
337 | - `src/index.ts` (main server file)
338 | - Removed `resourceStore` Map (10 lines)
339 | - Removed `storeResourceFromToolResult()` (50 lines)
340 | - Redesigned `RESOURCES` array (12 new resources)
341 | - Updated `ReadResourceRequestSchema` handler (150 lines)
342 | - Removed resource storage from all tools (30+ locations)
343 |
344 | **Lines of Code:**
345 |
346 | - **Removed:** ~120 lines (resource storage logic)
347 | - **Added:** ~200 lines (static resource handlers)
348 | - **Net Change:** +80 lines (but much clearer purpose)
349 |
350 | ### Test Results
351 |
352 | **Before Implementation:**
353 |
354 | - Tests: 122/122 passing ✅
355 | - TypeScript: Compiles ✅
356 |
357 | **After Implementation:**
358 |
359 | - Tests: 122/122 passing ✅
360 | - TypeScript: Compiles ✅
361 | - No broken tests
362 | - No regression issues
363 |
364 | ### Performance Impact
365 |
366 | **Before:**
367 |
368 | - Resource storage: O(1) Map insertion per tool
369 | - Memory: Growing Map of all tool results
370 |
371 | **After:**
372 |
373 | - Resource retrieval: O(1) static content lookup
374 | - Memory: Fixed size (no growth)
375 |
376 | **Improvement:** Reduced memory usage, no performance degradation
377 |
378 | ---
379 |
380 | ## Compliance with MCP Best Practices
381 |
382 | ### Before Redesign
383 |
384 | - **Resource Implementation:** 3/10 ❌
385 | - **Control Patterns:** 4/10 ❌
386 |
387 | ### After Redesign
388 |
389 | - **Resource Implementation:** 9/10 ✅
390 | - **Control Patterns:** 9/10 ✅
391 |
392 | ---
393 |
394 | ## Migration Guide
395 |
396 | ### For Client Applications
397 |
398 | **Old Pattern (No Longer Works):**
399 |
400 | ```javascript
401 | // Execute tool
402 | const result = await callTool("analyze_repository", { path: "./" });
403 |
404 | // WRONG: Try to fetch from resource URI
405 | const resourceUri = result.resourceUri;
406 | const resource = await readResource(resourceUri); // ❌ Will fail
407 | ```
408 |
409 | **New Pattern (Recommended):**
410 |
411 | ```javascript
412 | // Execute tool - result contains all data
413 | const result = await callTool("analyze_repository", { path: "./" });
414 |
415 | // Use result directly (no need for resources)
416 | console.log(result.data); // ✅ All data is here
417 |
418 | // Use resources for app UI needs
419 | const ssgList = await readResource("documcp://ssgs/available"); // ✅ For dropdowns
420 | const template = await readResource("documcp://templates/jekyll-config"); // ✅ For setup
421 | ```
422 |
423 | ### For Tool Developers
424 |
425 | **Old Pattern:**
426 |
427 | ```typescript
428 | const result = await analyzeRepository(args);
429 | const resourceUri = storeResourceFromToolResult(
430 | "analyze_repository",
431 | args,
432 | result,
433 | );
434 | (result as any).resourceUri = resourceUri;
435 | return result;
436 | ```
437 |
438 | **New Pattern:**
439 |
440 | ```typescript
441 | const result = await analyzeRepository(args);
442 | return wrapToolResult(result, "analyze_repository"); // Standardized wrapper
443 | ```
444 |
445 | ---
446 |
447 | ## References
448 |
449 | - **MCP Specification:** https://modelcontextprotocol.io/docs
450 | - **MCP Best Practices Review:** `MCP_BEST_PRACTICES_REVIEW.md`
451 | - **MCP Inspector Guide:** `docs/development/MCP_INSPECTOR_TESTING.md`
452 | - **Related ADRs:**
453 | - ADR-006: MCP Tools API Design
454 | - ADR-007: MCP Prompts and Resources Integration
455 |
456 | ---
457 |
458 | ## Notes
459 |
460 | ### Design Philosophy
461 |
462 | The resource redesign embodies a core MCP principle: **each primitive serves its audience**.
463 |
464 | - **Tools** answer the question: _"What can Claude do?"_
465 | - **Resources** answer the question: _"What data does my app need?"_
466 | - **Prompts** answer the question: _"What workflows can users trigger?"_
467 |
468 | Mixing these purposes creates architectural debt and violates separation of concerns.
469 |
470 | ### Future Enhancements
471 |
472 | **Potential Additional Resources:**
473 |
474 | - `documcp://themes/available` - UI theme list
475 | - `documcp://validators/rules` - Validation rule catalog
476 | - `documcp://examples/{category}` - Example content library
477 |
478 | These should all follow the same principle: **serve the application's UI needs**, not store execution results.
479 |
480 | ---
481 |
482 | **Last Updated:** 2025-10-09
483 | **Status:** Implemented and Verified ✅
484 |
```
--------------------------------------------------------------------------------
/tests/memory/knowledge-graph.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Basic unit tests for Knowledge Graph System
3 | * Tests basic instantiation and core functionality
4 | * Part of Issue #54 - Core Memory System Unit Tests
5 | */
6 |
7 | import { promises as fs } from "fs";
8 | import path from "path";
9 | import os from "os";
10 | import { MemoryManager } from "../../src/memory/manager.js";
11 | import {
12 | KnowledgeGraph,
13 | GraphNode,
14 | GraphEdge,
15 | } from "../../src/memory/knowledge-graph.js";
16 |
17 | describe("KnowledgeGraph", () => {
18 | let tempDir: string;
19 | let memoryManager: MemoryManager;
20 | let graph: KnowledgeGraph;
21 |
22 | beforeEach(async () => {
23 | // Create unique temp directory for each test
24 | tempDir = path.join(
25 | os.tmpdir(),
26 | `memory-graph-test-${Date.now()}-${Math.random()
27 | .toString(36)
28 | .substr(2, 9)}`,
29 | );
30 | await fs.mkdir(tempDir, { recursive: true });
31 |
32 | // Create memory manager for knowledge graph
33 | memoryManager = new MemoryManager(tempDir);
34 | await memoryManager.initialize();
35 |
36 | graph = new KnowledgeGraph(memoryManager);
37 | await graph.initialize();
38 | });
39 |
40 | afterEach(async () => {
41 | // Cleanup temp directory
42 | try {
43 | await fs.rm(tempDir, { recursive: true, force: true });
44 | } catch (error) {
45 | // Ignore cleanup errors
46 | }
47 | });
48 |
49 | describe("Basic Graph Operations", () => {
50 | test("should create knowledge graph instance", () => {
51 | expect(graph).toBeDefined();
52 | expect(graph).toBeInstanceOf(KnowledgeGraph);
53 | });
54 |
55 | test("should add nodes to the graph", () => {
56 | const projectNode: Omit<GraphNode, "lastUpdated"> = {
57 | id: "project:test-project",
58 | type: "project",
59 | label: "Test Project",
60 | properties: {
61 | language: "typescript",
62 | framework: "react",
63 | },
64 | weight: 1.0,
65 | };
66 |
67 | const addedNode = graph.addNode(projectNode);
68 | expect(addedNode).toBeDefined();
69 | expect(addedNode.id).toBe("project:test-project");
70 | expect(addedNode.type).toBe("project");
71 | expect(addedNode.lastUpdated).toBeDefined();
72 | });
73 |
74 | test("should add edges to the graph", () => {
75 | // First add nodes
76 | const projectNode = graph.addNode({
77 | id: "project:web-app",
78 | type: "project",
79 | label: "Web App",
80 | properties: { language: "typescript" },
81 | weight: 1.0,
82 | });
83 |
84 | const techNode = graph.addNode({
85 | id: "tech:react",
86 | type: "technology",
87 | label: "React",
88 | properties: { category: "framework" },
89 | weight: 1.0,
90 | });
91 |
92 | // Add edge
93 | const edge: Omit<GraphEdge, "id" | "lastUpdated"> = {
94 | source: projectNode.id,
95 | target: techNode.id,
96 | type: "uses",
97 | weight: 1.0,
98 | confidence: 0.9,
99 | properties: { importance: "high" },
100 | };
101 |
102 | const addedEdge = graph.addEdge(edge);
103 | expect(addedEdge).toBeDefined();
104 | expect(addedEdge.source).toBe(projectNode.id);
105 | expect(addedEdge.target).toBe(techNode.id);
106 | expect(addedEdge.id).toBeDefined();
107 | });
108 |
109 | test("should get all nodes", async () => {
110 | // Add some nodes
111 | graph.addNode({
112 | id: "project:test1",
113 | type: "project",
114 | label: "Test 1",
115 | properties: {},
116 | weight: 1.0,
117 | });
118 |
119 | graph.addNode({
120 | id: "tech:vue",
121 | type: "technology",
122 | label: "Vue",
123 | properties: {},
124 | weight: 1.0,
125 | });
126 |
127 | const nodes = await graph.getAllNodes();
128 | expect(Array.isArray(nodes)).toBe(true);
129 | expect(nodes.length).toBe(2);
130 | });
131 |
132 | test("should get all edges", async () => {
133 | // Add nodes and edges
134 | const node1 = graph.addNode({
135 | id: "project:test2",
136 | type: "project",
137 | label: "Test 2",
138 | properties: {},
139 | weight: 1.0,
140 | });
141 |
142 | const node2 = graph.addNode({
143 | id: "tech:angular",
144 | type: "technology",
145 | label: "Angular",
146 | properties: {},
147 | weight: 1.0,
148 | });
149 |
150 | graph.addEdge({
151 | source: node1.id,
152 | target: node2.id,
153 | type: "uses",
154 | weight: 1.0,
155 | confidence: 0.8,
156 | properties: {},
157 | });
158 |
159 | const edges = await graph.getAllEdges();
160 | expect(Array.isArray(edges)).toBe(true);
161 | expect(edges.length).toBe(1);
162 | });
163 | });
164 |
165 | describe("Graph Queries", () => {
166 | test("should query nodes by type", () => {
167 | // Add multiple nodes of different types
168 | graph.addNode({
169 | id: "project:project-a",
170 | type: "project",
171 | label: "Project A",
172 | properties: {},
173 | weight: 1.0,
174 | });
175 |
176 | graph.addNode({
177 | id: "project:project-b",
178 | type: "project",
179 | label: "Project B",
180 | properties: {},
181 | weight: 1.0,
182 | });
183 |
184 | graph.addNode({
185 | id: "tech:vue",
186 | type: "technology",
187 | label: "Vue",
188 | properties: { category: "framework" },
189 | weight: 1.0,
190 | });
191 |
192 | const results = graph.query({
193 | nodeTypes: ["project"],
194 | });
195 |
196 | expect(results).toBeDefined();
197 | expect(Array.isArray(results.nodes)).toBe(true);
198 | expect(results.nodes.length).toBe(2);
199 | expect(results.nodes.every((node) => node.type === "project")).toBe(true);
200 | });
201 |
202 | test("should find connections for a node", async () => {
203 | // Add nodes and create connections
204 | const projectNode = graph.addNode({
205 | id: "project:connected-test",
206 | type: "project",
207 | label: "Connected Test",
208 | properties: {},
209 | weight: 1.0,
210 | });
211 |
212 | const techNode = graph.addNode({
213 | id: "tech:express",
214 | type: "technology",
215 | label: "Express",
216 | properties: {},
217 | weight: 1.0,
218 | });
219 |
220 | graph.addEdge({
221 | source: projectNode.id,
222 | target: techNode.id,
223 | type: "uses",
224 | weight: 1.0,
225 | confidence: 0.9,
226 | properties: {},
227 | });
228 |
229 | const connections = await graph.getConnections(projectNode.id);
230 | expect(Array.isArray(connections)).toBe(true);
231 | expect(connections.length).toBe(1);
232 | expect(connections[0]).toBe(techNode.id);
233 | });
234 |
235 | test("should find paths between nodes", () => {
236 | // Add nodes and create a path
237 | const projectNode = graph.addNode({
238 | id: "project:path-test",
239 | type: "project",
240 | label: "Path Test Project",
241 | properties: {},
242 | weight: 1.0,
243 | });
244 |
245 | const techNode = graph.addNode({
246 | id: "tech:nodejs",
247 | type: "technology",
248 | label: "Node.js",
249 | properties: {},
250 | weight: 1.0,
251 | });
252 |
253 | graph.addEdge({
254 | source: projectNode.id,
255 | target: techNode.id,
256 | type: "uses",
257 | weight: 1.0,
258 | confidence: 0.9,
259 | properties: {},
260 | });
261 |
262 | const path = graph.findPath(projectNode.id, techNode.id);
263 | expect(path).toBeDefined();
264 | expect(path?.nodes.length).toBe(2);
265 | expect(path?.edges.length).toBe(1);
266 | });
267 | });
268 |
269 | describe("Graph Analysis", () => {
270 | test("should build from memory entries", async () => {
271 | // Add some test memory entries first
272 | await memoryManager.remember(
273 | "analysis",
274 | {
275 | language: { primary: "python" },
276 | framework: { name: "django" },
277 | },
278 | {
279 | projectId: "analysis-project",
280 | },
281 | );
282 |
283 | await memoryManager.remember(
284 | "recommendation",
285 | {
286 | recommended: "mkdocs",
287 | confidence: 0.9,
288 | },
289 | {
290 | projectId: "analysis-project",
291 | },
292 | );
293 |
294 | // Build graph from memories
295 | await graph.buildFromMemories();
296 |
297 | const nodes = await graph.getAllNodes();
298 | // The buildFromMemories method might be implemented differently
299 | // Just verify it doesn't throw and returns an array
300 | expect(Array.isArray(nodes)).toBe(true);
301 |
302 | // The graph might start empty, which is okay for this basic test
303 | if (nodes.length > 0) {
304 | // Optionally check node types if any were created
305 | const nodeTypes = [...new Set(nodes.map((n) => n.type))];
306 | expect(nodeTypes.length).toBeGreaterThan(0);
307 | }
308 | });
309 |
310 | test("should generate graph-based recommendations", async () => {
311 | // Add some memory data first
312 | await memoryManager.remember(
313 | "analysis",
314 | {
315 | language: { primary: "javascript" },
316 | framework: { name: "react" },
317 | },
318 | {
319 | projectId: "rec-test-project",
320 | },
321 | );
322 |
323 | await graph.buildFromMemories();
324 |
325 | const projectFeatures = {
326 | language: "javascript",
327 | framework: "react",
328 | };
329 |
330 | const recommendations = await graph.getGraphBasedRecommendation(
331 | projectFeatures,
332 | ["docusaurus", "gatsby"],
333 | );
334 |
335 | expect(Array.isArray(recommendations)).toBe(true);
336 | // Even if no recommendations found, should return empty array
337 | });
338 |
339 | test("should provide graph statistics", async () => {
340 | // Add some nodes
341 | graph.addNode({
342 | id: "project:stats-test",
343 | type: "project",
344 | label: "Stats Test",
345 | properties: {},
346 | weight: 1.0,
347 | });
348 |
349 | graph.addNode({
350 | id: "tech:webpack",
351 | type: "technology",
352 | label: "Webpack",
353 | properties: {},
354 | weight: 1.0,
355 | });
356 |
357 | const stats = await graph.getStatistics();
358 | expect(stats).toBeDefined();
359 | expect(typeof stats.nodeCount).toBe("number");
360 | expect(typeof stats.edgeCount).toBe("number");
361 | expect(typeof stats.nodesByType).toBe("object");
362 | expect(typeof stats.averageConnectivity).toBe("number");
363 | expect(Array.isArray(stats.mostConnectedNodes)).toBe(true);
364 | });
365 | });
366 |
367 | describe("Error Handling", () => {
368 | test("should handle removing non-existent nodes", async () => {
369 | const removed = await graph.removeNode("non-existent-node");
370 | expect(removed).toBe(false);
371 | });
372 |
373 | test("should handle concurrent graph operations", () => {
374 | // Create multiple nodes concurrently
375 | const nodes = Array.from({ length: 10 }, (_, i) =>
376 | graph.addNode({
377 | id: `project:concurrent-${i}`,
378 | type: "project",
379 | label: `Concurrent Project ${i}`,
380 | properties: { index: i },
381 | weight: 1.0,
382 | }),
383 | );
384 |
385 | expect(nodes).toHaveLength(10);
386 | expect(nodes.every((node) => typeof node.id === "string")).toBe(true);
387 | });
388 |
389 | test("should handle invalid query parameters", () => {
390 | const results = graph.query({
391 | nodeTypes: ["non-existent-type"],
392 | });
393 |
394 | expect(results).toBeDefined();
395 | expect(Array.isArray(results.nodes)).toBe(true);
396 | expect(results.nodes.length).toBe(0);
397 | });
398 |
399 | test("should handle empty graph operations", async () => {
400 | // Test operations on empty graph
401 | const path = graph.findPath("non-existent-1", "non-existent-2");
402 | expect(path).toBeNull();
403 |
404 | const connections = await graph.getConnections("non-existent-node");
405 | expect(Array.isArray(connections)).toBe(true);
406 | expect(connections.length).toBe(0);
407 | });
408 | });
409 |
410 | describe("Persistence and Memory Integration", () => {
411 | test("should save and load from memory", async () => {
412 | // Add some data to the graph
413 | graph.addNode({
414 | id: "project:persistence-test",
415 | type: "project",
416 | label: "Persistence Test",
417 | properties: {},
418 | weight: 1.0,
419 | });
420 |
421 | // Save to memory
422 | await graph.saveToMemory();
423 |
424 | // Create new graph and load
425 | const newGraph = new KnowledgeGraph(memoryManager);
426 | await newGraph.loadFromMemory();
427 |
428 | const nodes = await newGraph.getAllNodes();
429 | expect(nodes.length).toBeGreaterThanOrEqual(0);
430 | });
431 |
432 | test("should handle empty graph statistics", async () => {
433 | const stats = await graph.getStatistics();
434 | expect(stats).toBeDefined();
435 | expect(typeof stats.nodeCount).toBe("number");
436 | expect(typeof stats.edgeCount).toBe("number");
437 | expect(stats.nodeCount).toBe(0); // Empty graph initially
438 | expect(stats.edgeCount).toBe(0);
439 | });
440 | });
441 | });
442 |
```
--------------------------------------------------------------------------------
/tests/memory/kg-storage.test.ts:
--------------------------------------------------------------------------------
```typescript
1 | /**
2 | * Tests for Knowledge Graph Storage
3 | * Phase 1: Core Knowledge Graph Integration
4 | */
5 |
6 | import { describe, it, expect, beforeEach, afterEach } from "@jest/globals";
7 | import { promises as fs } from "fs";
8 | import { join } from "path";
9 | import { KGStorage } from "../../src/memory/kg-storage.js";
10 | import { GraphNode, GraphEdge } from "../../src/memory/knowledge-graph.js";
11 | import { tmpdir } from "os";
12 |
13 | describe("KGStorage", () => {
14 | let storage: KGStorage;
15 | let testDir: string;
16 |
17 | beforeEach(async () => {
18 | // Create temporary test directory
19 | testDir = join(tmpdir(), `kg-storage-test-${Date.now()}`);
20 | await fs.mkdir(testDir, { recursive: true });
21 |
22 | storage = new KGStorage({
23 | storageDir: testDir,
24 | backupOnWrite: true,
25 | validateOnRead: true,
26 | });
27 |
28 | await storage.initialize();
29 | });
30 |
31 | afterEach(async () => {
32 | // Clean up test directory
33 | try {
34 | await fs.rm(testDir, { recursive: true, force: true });
35 | } catch (error) {
36 | console.warn("Failed to clean up test directory:", error);
37 | }
38 | });
39 |
40 | describe("Initialization", () => {
41 | it("should create storage directory", async () => {
42 | const stats = await fs.stat(testDir);
43 | expect(stats.isDirectory()).toBe(true);
44 | });
45 |
46 | it("should create entity and relationship files", async () => {
47 | const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
48 | const relationshipFile = join(
49 | testDir,
50 | "knowledge-graph-relationships.jsonl",
51 | );
52 |
53 | await fs.access(entityFile);
54 | await fs.access(relationshipFile);
55 |
56 | // Files should exist (no error thrown)
57 | expect(true).toBe(true);
58 | });
59 |
60 | it("should write file markers", async () => {
61 | const entityFile = join(testDir, "knowledge-graph-entities.jsonl");
62 | const content = await fs.readFile(entityFile, "utf-8");
63 |
64 | expect(content).toContain("# DOCUMCP_KNOWLEDGE_GRAPH_ENTITIES");
65 | });
66 |
67 | it("should reject non-DocuMCP files", async () => {
68 | // Create a non-DocuMCP file
69 | const fakeFile = join(testDir, "knowledge-graph-entities.jsonl");
70 | await fs.writeFile(fakeFile, "not a documcp file\n", "utf-8");
71 |
72 | const newStorage = new KGStorage({ storageDir: testDir });
73 |
74 | await expect(newStorage.initialize()).rejects.toThrow(
75 | "is not a DocuMCP knowledge graph file",
76 | );
77 | });
78 | });
79 |
80 | describe("Entity Storage", () => {
81 | it("should save and load entities", async () => {
82 | const entities: GraphNode[] = [
83 | {
84 | id: "project:test",
85 | type: "project",
86 | label: "Test Project",
87 | properties: { name: "Test" },
88 | weight: 1.0,
89 | lastUpdated: new Date().toISOString(),
90 | },
91 | {
92 | id: "tech:typescript",
93 | type: "technology",
94 | label: "TypeScript",
95 | properties: { name: "TypeScript" },
96 | weight: 1.0,
97 | lastUpdated: new Date().toISOString(),
98 | },
99 | ];
100 |
101 | await storage.saveEntities(entities);
102 | const loaded = await storage.loadEntities();
103 |
104 | expect(loaded).toHaveLength(2);
105 | expect(loaded[0].id).toBe("project:test");
106 | expect(loaded[1].id).toBe("tech:typescript");
107 | });
108 |
109 | it("should handle empty entity list", async () => {
110 | await storage.saveEntities([]);
111 | const loaded = await storage.loadEntities();
112 |
113 | expect(loaded).toHaveLength(0);
114 | });
115 |
116 | it("should preserve entity properties", async () => {
117 | const entity: GraphNode = {
118 | id: "project:complex",
119 | type: "project",
120 | label: "Complex Project",
121 | properties: {
122 | name: "Complex",
123 | technologies: ["typescript", "react"],
124 | metadata: { nested: { value: 123 } },
125 | },
126 | weight: 0.85,
127 | lastUpdated: new Date().toISOString(),
128 | };
129 |
130 | await storage.saveEntities([entity]);
131 | const loaded = await storage.loadEntities();
132 |
133 | expect(loaded[0].properties.technologies).toEqual([
134 | "typescript",
135 | "react",
136 | ]);
137 | expect(loaded[0].properties.metadata.nested.value).toBe(123);
138 | });
139 | });
140 |
141 | describe("Relationship Storage", () => {
142 | it("should save and load relationships", async () => {
143 | const relationships: GraphEdge[] = [
144 | {
145 | id: "project:test-uses-tech:typescript",
146 | source: "project:test",
147 | target: "tech:typescript",
148 | type: "uses",
149 | weight: 1.0,
150 | confidence: 0.9,
151 | properties: {},
152 | lastUpdated: new Date().toISOString(),
153 | },
154 | ];
155 |
156 | await storage.saveRelationships(relationships);
157 | const loaded = await storage.loadRelationships();
158 |
159 | expect(loaded).toHaveLength(1);
160 | expect(loaded[0].source).toBe("project:test");
161 | expect(loaded[0].target).toBe("tech:typescript");
162 | });
163 |
164 | it("should handle empty relationship list", async () => {
165 | await storage.saveRelationships([]);
166 | const loaded = await storage.loadRelationships();
167 |
168 | expect(loaded).toHaveLength(0);
169 | });
170 |
171 | it("should preserve relationship properties", async () => {
172 | const relationship: GraphEdge = {
173 | id: "test-edge",
174 | source: "node1",
175 | target: "node2",
176 | type: "similar_to",
177 | weight: 0.75,
178 | confidence: 0.8,
179 | properties: {
180 | similarityScore: 0.75,
181 | sharedTechnologies: ["typescript"],
182 | },
183 | lastUpdated: new Date().toISOString(),
184 | };
185 |
186 | await storage.saveRelationships([relationship]);
187 | const loaded = await storage.loadRelationships();
188 |
189 | expect(loaded[0].properties.similarityScore).toBe(0.75);
190 | expect(loaded[0].properties.sharedTechnologies).toEqual(["typescript"]);
191 | });
192 | });
193 |
194 | describe("Complete Graph Storage", () => {
195 | it("should save and load complete graph", async () => {
196 | const entities: GraphNode[] = [
197 | {
198 | id: "project:test",
199 | type: "project",
200 | label: "Test",
201 | properties: {},
202 | weight: 1.0,
203 | lastUpdated: new Date().toISOString(),
204 | },
205 | ];
206 |
207 | const relationships: GraphEdge[] = [
208 | {
209 | id: "test-edge",
210 | source: "project:test",
211 | target: "tech:ts",
212 | type: "uses",
213 | weight: 1.0,
214 | confidence: 1.0,
215 | properties: {},
216 | lastUpdated: new Date().toISOString(),
217 | },
218 | ];
219 |
220 | await storage.saveGraph(entities, relationships);
221 | const loaded = await storage.loadGraph();
222 |
223 | expect(loaded.entities).toHaveLength(1);
224 | expect(loaded.relationships).toHaveLength(1);
225 | });
226 | });
227 |
228 | describe("Backup System", () => {
229 | it("should create backups on write", async () => {
230 | const entities: GraphNode[] = [
231 | {
232 | id: "test",
233 | type: "project",
234 | label: "Test",
235 | properties: {},
236 | weight: 1.0,
237 | lastUpdated: new Date().toISOString(),
238 | },
239 | ];
240 |
241 | await storage.saveEntities(entities);
242 | await storage.saveEntities(entities); // Second save should create backup
243 |
244 | const backupDir = join(testDir, "backups");
245 | const files = await fs.readdir(backupDir);
246 |
247 | const backupFiles = files.filter((f) => f.startsWith("entities-"));
248 | expect(backupFiles.length).toBeGreaterThan(0);
249 | });
250 |
251 | it("should restore from backup", async () => {
252 | const entities1: GraphNode[] = [
253 | {
254 | id: "version1",
255 | type: "project",
256 | label: "V1",
257 | properties: {},
258 | weight: 1.0,
259 | lastUpdated: new Date().toISOString(),
260 | },
261 | ];
262 |
263 | const entities2: GraphNode[] = [
264 | {
265 | id: "version2",
266 | type: "project",
267 | label: "V2",
268 | properties: {},
269 | weight: 1.0,
270 | lastUpdated: new Date().toISOString(),
271 | },
272 | ];
273 |
274 | // Save first version
275 | await storage.saveEntities(entities1);
276 |
277 | // Small delay to ensure different timestamps
278 | await new Promise((resolve) => setTimeout(resolve, 10));
279 |
280 | // Save second version (creates backup of first)
281 | await storage.saveEntities(entities2);
282 |
283 | // Verify we have second version
284 | let loaded = await storage.loadEntities();
285 | expect(loaded).toHaveLength(1);
286 | expect(loaded[0].id).toBe("version2");
287 |
288 | // Restore from backup
289 | await storage.restoreFromBackup("entities");
290 |
291 | // Verify we have first version back
292 | loaded = await storage.loadEntities();
293 | expect(loaded).toHaveLength(1);
294 | expect(loaded[0].id).toBe("version1");
295 | });
296 | });
297 |
298 | describe("Statistics", () => {
299 | it("should return accurate statistics", async () => {
300 | const entities: GraphNode[] = [
301 | {
302 | id: "e1",
303 | type: "project",
304 | label: "E1",
305 | properties: {},
306 | weight: 1.0,
307 | lastUpdated: new Date().toISOString(),
308 | },
309 | {
310 | id: "e2",
311 | type: "technology",
312 | label: "E2",
313 | properties: {},
314 | weight: 1.0,
315 | lastUpdated: new Date().toISOString(),
316 | },
317 | ];
318 |
319 | const relationships: GraphEdge[] = [
320 | {
321 | id: "r1",
322 | source: "e1",
323 | target: "e2",
324 | type: "uses",
325 | weight: 1.0,
326 | confidence: 1.0,
327 | properties: {},
328 | lastUpdated: new Date().toISOString(),
329 | },
330 | ];
331 |
332 | await storage.saveGraph(entities, relationships);
333 | const stats = await storage.getStatistics();
334 |
335 | expect(stats.entityCount).toBe(2);
336 | expect(stats.relationshipCount).toBe(1);
337 | expect(stats.schemaVersion).toBe("1.0.0");
338 | expect(stats.fileSize.entities).toBeGreaterThan(0);
339 | });
340 | });
341 |
342 | describe("Integrity Verification", () => {
343 | it("should detect orphaned relationships", async () => {
344 | const entities: GraphNode[] = [
345 | {
346 | id: "e1",
347 | type: "project",
348 | label: "E1",
349 | properties: {},
350 | weight: 1.0,
351 | lastUpdated: new Date().toISOString(),
352 | },
353 | ];
354 |
355 | const relationships: GraphEdge[] = [
356 | {
357 | id: "r1",
358 | source: "e1",
359 | target: "missing", // References non-existent entity
360 | type: "uses",
361 | weight: 1.0,
362 | confidence: 1.0,
363 | properties: {},
364 | lastUpdated: new Date().toISOString(),
365 | },
366 | ];
367 |
368 | await storage.saveGraph(entities, relationships);
369 | const result = await storage.verifyIntegrity();
370 |
371 | expect(result.valid).toBe(true); // No errors, just warnings
372 | expect(result.warnings.length).toBeGreaterThan(0);
373 | expect(result.warnings[0]).toContain("missing");
374 | });
375 |
376 | it("should detect duplicate entities", async () => {
377 | const entities: GraphNode[] = [
378 | {
379 | id: "duplicate",
380 | type: "project",
381 | label: "E1",
382 | properties: {},
383 | weight: 1.0,
384 | lastUpdated: new Date().toISOString(),
385 | },
386 | {
387 | id: "duplicate",
388 | type: "project",
389 | label: "E2",
390 | properties: {},
391 | weight: 1.0,
392 | lastUpdated: new Date().toISOString(),
393 | },
394 | ];
395 |
396 | await storage.saveEntities(entities);
397 | const result = await storage.verifyIntegrity();
398 |
399 | expect(result.valid).toBe(false);
400 | expect(result.errors.length).toBeGreaterThan(0);
401 | expect(result.errors[0]).toContain("Duplicate entity ID");
402 | });
403 | });
404 |
405 | describe("Export", () => {
406 | it("should export graph as JSON", async () => {
407 | const entities: GraphNode[] = [
408 | {
409 | id: "test",
410 | type: "project",
411 | label: "Test",
412 | properties: {},
413 | weight: 1.0,
414 | lastUpdated: new Date().toISOString(),
415 | },
416 | ];
417 |
418 | await storage.saveEntities(entities);
419 | const json = await storage.exportAsJSON();
420 | const parsed = JSON.parse(json);
421 |
422 | expect(parsed.metadata).toBeDefined();
423 | expect(parsed.metadata.version).toBe("1.0.0");
424 | expect(parsed.entities).toHaveLength(1);
425 | expect(parsed.relationships).toHaveLength(0);
426 | });
427 | });
428 | });
429 |
```